Merge remote-tracking branch 'origin/dev' into api-v1.9.5
# Conflicts: # backend/cmd/storage/main.go # ee/api/app.py # ee/api/routers/core_dynamic.py # ee/api/routers/ee.py # ee/backend/pkg/kafka/log.go # ee/scripts/schema/db/init_dbs/postgresql/init_schema.sql # frontend/app/components/Session_/QueueControls/QueueControls.tsx # frontend/app/components/Session_/Subheader.js # frontend/app/components/shared/AutoplayToggle/AutoplayToggle.tsx # frontend/app/components/shared/DevTools/ConsolePanel/ConsolePanel.tsx # frontend/app/components/shared/DevTools/NetworkPanel/NetworkPanel.tsx # frontend/app/components/shared/DevTools/StackEventPanel/StackEventPanel.tsx
This commit is contained in:
commit
c384f854f8
476 changed files with 21215 additions and 12956 deletions
2
.github/workflows/api-ee.yaml
vendored
2
.github/workflows/api-ee.yaml
vendored
|
|
@ -67,7 +67,7 @@ jobs:
|
|||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
PUSH_IMAGE=1 bash -x ./build.sh ee
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
|
|
|
|||
2
.github/workflows/api.yaml
vendored
2
.github/workflows/api.yaml
vendored
|
|
@ -66,7 +66,7 @@ jobs:
|
|||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
PUSH_IMAGE=1 bash -x ./build.sh
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
|
|
|
|||
81
.github/workflows/frontend-dev.yaml
vendored
Normal file
81
.github/workflows/frontend-dev.yaml
vendored
Normal file
|
|
@ -0,0 +1,81 @@
|
|||
name: Frontend Dev Deployment
|
||||
on: workflow_dispatch
|
||||
# Disable previous workflows for this action.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }} #-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Cache node modules
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: node_modules
|
||||
key: ${{ runner.OS }}-build-${{ hashFiles('**/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.OS }}-build-
|
||||
${{ runner.OS }}-
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.DEV_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- name: Building and Pushing frontend image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
set -x
|
||||
cd frontend
|
||||
mv .env.sample .env
|
||||
docker run --rm -v /etc/passwd:/etc/passwd -u `id -u`:`id -g` -v $(pwd):/home/${USER} -w /home/${USER} --name node_build node:14-stretch-slim /bin/bash -c "yarn && yarn build"
|
||||
# https://github.com/docker/cli/issues/1134#issuecomment-613516912
|
||||
DOCKER_BUILDKIT=1 docker build --target=cicd -t $DOCKER_REPO/frontend:${IMAGE_TAG} .
|
||||
docker tag $DOCKER_REPO/frontend:${IMAGE_TAG} $DOCKER_REPO/frontend:${IMAGE_TAG}-ee
|
||||
docker push $DOCKER_REPO/frontend:${IMAGE_TAG}
|
||||
docker push $DOCKER_REPO/frontend:${IMAGE_TAG}-ee
|
||||
|
||||
- name: Deploy to kubernetes foss
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
set -x
|
||||
cat <<EOF>>/tmp/image_override.yaml
|
||||
frontend:
|
||||
image:
|
||||
tag: ${IMAGE_TAG}
|
||||
EOF
|
||||
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.DEV_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.DEV_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.DEV_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.DEV_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.DEV_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/frontend/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,frontend,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,frontend,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
2
.github/workflows/frontend.yaml
vendored
2
.github/workflows/frontend.yaml
vendored
|
|
@ -1,4 +1,4 @@
|
|||
name: Frontend FOSS Deployment
|
||||
name: Frontend Foss Deployment
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
|
|
|
|||
30
.github/workflows/workers-ee.yaml
vendored
30
.github/workflows/workers-ee.yaml
vendored
|
|
@ -71,12 +71,10 @@ jobs:
|
|||
case ${build_param} in
|
||||
false)
|
||||
{
|
||||
git diff --name-only HEAD HEAD~1 | grep -E "backend/cmd|backend/services" | grep -vE ^ee/ | cut -d '/' -f3
|
||||
|
||||
git diff --name-only HEAD HEAD~1 | grep -E "backend/pkg|backend/internal" | grep -vE ^ee/ | cut -d '/' -f3 | uniq | while read -r pkg_name ; do
|
||||
grep -rl "pkg/$pkg_name" backend/services backend/cmd | cut -d '/' -f3
|
||||
done
|
||||
} | uniq > /tmp/images_to_build.txt
|
||||
} | awk '!seen[$0]++' > /tmp/images_to_build.txt
|
||||
;;
|
||||
all)
|
||||
ls backend/cmd > /tmp/images_to_build.txt
|
||||
|
|
@ -95,6 +93,7 @@ jobs:
|
|||
# Pushing image to registry
|
||||
#
|
||||
cd backend
|
||||
cat /tmp/images_to_build.txt
|
||||
for image in $(cat /tmp/images_to_build.txt);
|
||||
do
|
||||
echo "Bulding $image"
|
||||
|
|
@ -109,7 +108,7 @@ jobs:
|
|||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
PUSH_IMAGE=1 bash -x ./build.sh ee $image
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
echo "::set-output name=image::$DOCKER_REPO/$image:$IMAGE_TAG"
|
||||
done
|
||||
|
||||
|
|
@ -156,22 +155,19 @@ jobs:
|
|||
mv /tmp/helmcharts/* openreplay/charts/
|
||||
ls openreplay/charts
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
|
||||
# Deploy command
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true | kubectl apply -f -
|
||||
|
||||
#- name: Alert slack
|
||||
# if: ${{ failure() }}
|
||||
# uses: rtCamp/action-slack-notify@v2
|
||||
# env:
|
||||
# SLACK_CHANNEL: ee
|
||||
# SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
# SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
# SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
# SLACK_USERNAME: "OR Bot"
|
||||
# SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: ee
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
|
|
|
|||
9
.github/workflows/workers.yaml
vendored
9
.github/workflows/workers.yaml
vendored
|
|
@ -71,12 +71,10 @@ jobs:
|
|||
case ${build_param} in
|
||||
false)
|
||||
{
|
||||
git diff --name-only HEAD HEAD~1 | grep -E "backend/cmd|backend/services" | grep -vE ^ee/ | cut -d '/' -f3
|
||||
|
||||
git diff --name-only HEAD HEAD~1 | grep -E "backend/pkg|backend/internal" | grep -vE ^ee/ | cut -d '/' -f3 | uniq | while read -r pkg_name ; do
|
||||
grep -rl "pkg/$pkg_name" backend/services backend/cmd | cut -d '/' -f3
|
||||
done
|
||||
} | uniq > /tmp/images_to_build.txt
|
||||
} | awk '!seen[$0]++' > /tmp/images_to_build.txt
|
||||
;;
|
||||
all)
|
||||
ls backend/cmd > /tmp/images_to_build.txt
|
||||
|
|
@ -95,6 +93,7 @@ jobs:
|
|||
# Pushing image to registry
|
||||
#
|
||||
cd backend
|
||||
cat /tmp/images_to_build.txt
|
||||
for image in $(cat /tmp/images_to_build.txt);
|
||||
do
|
||||
echo "Bulding $image"
|
||||
|
|
@ -109,7 +108,7 @@ jobs:
|
|||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
PUSH_IMAGE=1 bash -x ./build.sh skip $image
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
echo "::set-output name=image::$DOCKER_REPO/$image:$IMAGE_TAG"
|
||||
done
|
||||
|
||||
|
|
@ -154,8 +153,6 @@ jobs:
|
|||
mv /tmp/helmcharts/* openreplay/charts/
|
||||
ls openreplay/charts
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
|
||||
# Deploy command
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true | kubectl apply -f -
|
||||
|
||||
|
|
|
|||
3
.gitignore
vendored
3
.gitignore
vendored
|
|
@ -3,4 +3,5 @@ public
|
|||
node_modules
|
||||
*DS_Store
|
||||
*.env
|
||||
.idea
|
||||
**/*.envrc
|
||||
.idea
|
||||
|
|
|
|||
|
|
@ -1,6 +1,9 @@
|
|||
FROM python:3.10-alpine
|
||||
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
|
||||
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
|
||||
ARG GIT_SHA
|
||||
LABEL GIT_SHA=$GIT_SHA
|
||||
|
||||
RUN apk add --no-cache build-base tini
|
||||
ARG envarg
|
||||
# Add Tini
|
||||
|
|
@ -9,7 +12,8 @@ ENV SOURCE_MAP_VERSION=0.7.4 \
|
|||
APP_NAME=chalice \
|
||||
LISTEN_PORT=8000 \
|
||||
PRIVATE_ENDPOINTS=false \
|
||||
ENTERPRISE_BUILD=${envarg}
|
||||
ENTERPRISE_BUILD=${envarg} \
|
||||
GIT_SHA=$GIT_SHA
|
||||
|
||||
WORKDIR /work
|
||||
COPY requirements.txt ./requirements.txt
|
||||
|
|
|
|||
|
|
@ -1,6 +1,9 @@
|
|||
FROM python:3.10-alpine
|
||||
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
|
||||
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
|
||||
ARG GIT_SHA
|
||||
LABEL GIT_SHA=$GIT_SHA
|
||||
|
||||
RUN apk add --no-cache build-base tini
|
||||
ARG envarg
|
||||
ENV APP_NAME=alerts \
|
||||
|
|
@ -8,6 +11,7 @@ ENV APP_NAME=alerts \
|
|||
PG_MAXCONN=10 \
|
||||
LISTEN_PORT=8000 \
|
||||
PRIVATE_ENDPOINTS=true \
|
||||
GIT_SHA=$GIT_SHA \
|
||||
ENTERPRISE_BUILD=${envarg}
|
||||
|
||||
WORKDIR /work
|
||||
|
|
|
|||
|
|
@ -16,7 +16,8 @@ exit_err() {
|
|||
}
|
||||
|
||||
environment=$1
|
||||
git_sha1=${IMAGE_TAG:-$(git rev-parse HEAD)}
|
||||
git_sha=$(git rev-parse --short HEAD)
|
||||
image_tag=${IMAGE_TAG:-git_sha}
|
||||
envarg="default-foss"
|
||||
check_prereq() {
|
||||
which docker || {
|
||||
|
|
@ -41,12 +42,12 @@ function build_api(){
|
|||
tag="ee-"
|
||||
}
|
||||
mv Dockerfile.dockerignore .dockerignore
|
||||
docker build -f ./Dockerfile --build-arg envarg=$envarg -t ${DOCKER_REPO:-'local'}/chalice:${git_sha1} .
|
||||
docker build -f ./Dockerfile --build-arg envarg=$envarg --build-arg GIT_SHA=$git_sha -t ${DOCKER_REPO:-'local'}/chalice:${image_tag} .
|
||||
cd ../api
|
||||
rm -rf ../${destination}
|
||||
[[ $PUSH_IMAGE -eq 1 ]] && {
|
||||
docker push ${DOCKER_REPO:-'local'}/chalice:${git_sha1}
|
||||
docker tag ${DOCKER_REPO:-'local'}/chalice:${git_sha1} ${DOCKER_REPO:-'local'}/chalice:${tag}latest
|
||||
docker push ${DOCKER_REPO:-'local'}/chalice:${image_tag}
|
||||
docker tag ${DOCKER_REPO:-'local'}/chalice:${image_tag} ${DOCKER_REPO:-'local'}/chalice:${tag}latest
|
||||
docker push ${DOCKER_REPO:-'local'}/chalice:${tag}latest
|
||||
}
|
||||
echo "api docker build completed"
|
||||
|
|
|
|||
|
|
@ -7,7 +7,8 @@
|
|||
|
||||
# Usage: IMAGE_TAG=latest DOCKER_REPO=myDockerHubID bash build.sh <ee>
|
||||
|
||||
git_sha1=${IMAGE_TAG:-$(git rev-parse HEAD)}
|
||||
git_sha=$(git rev-parse --short HEAD)
|
||||
image_tag=${IMAGE_TAG:-git_sha}
|
||||
envarg="default-foss"
|
||||
check_prereq() {
|
||||
which docker || {
|
||||
|
|
@ -31,12 +32,12 @@ function build_alerts(){
|
|||
tag="ee-"
|
||||
}
|
||||
mv Dockerfile_alerts.dockerignore .dockerignore
|
||||
docker build -f ./Dockerfile_alerts --build-arg envarg=$envarg -t ${DOCKER_REPO:-'local'}/alerts:${git_sha1} .
|
||||
docker build -f ./Dockerfile_alerts --build-arg envarg=$envarg --build-arg GIT_SHA=$git_sha -t ${DOCKER_REPO:-'local'}/alerts:${image_tag} .
|
||||
cd ../api
|
||||
rm -rf ../${destination}
|
||||
[[ $PUSH_IMAGE -eq 1 ]] && {
|
||||
docker push ${DOCKER_REPO:-'local'}/alerts:${git_sha1}
|
||||
docker tag ${DOCKER_REPO:-'local'}/alerts:${git_sha1} ${DOCKER_REPO:-'local'}/alerts:${tag}latest
|
||||
docker push ${DOCKER_REPO:-'local'}/alerts:${image_tag}
|
||||
docker tag ${DOCKER_REPO:-'local'}/alerts:${image_tag} ${DOCKER_REPO:-'local'}/alerts:${tag}latest
|
||||
docker push ${DOCKER_REPO:-'local'}/alerts:${tag}latest
|
||||
}
|
||||
echo "completed alerts build"
|
||||
|
|
|
|||
|
|
@ -19,10 +19,14 @@ RUN CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -o service -tags dynamic open
|
|||
|
||||
|
||||
FROM alpine AS entrypoint
|
||||
ARG GIT_SHA
|
||||
LABEL GIT_SHA=$GIT_SHA
|
||||
|
||||
RUN apk add --no-cache ca-certificates librdkafka-dev cyrus-sasl cyrus-sasl-gssapiv2 krb5
|
||||
RUN adduser -u 1001 openreplay -D
|
||||
|
||||
ENV TZ=UTC \
|
||||
GIT_SHA=$GIT_SHA \
|
||||
FS_ULIMIT=1000 \
|
||||
FS_DIR=/mnt/efs \
|
||||
MAXMINDDB_FILE=/home/openreplay/geoip.mmdb \
|
||||
|
|
@ -69,9 +73,10 @@ ENV TZ=UTC \
|
|||
PARTITIONS_NUMBER=16 \
|
||||
QUEUE_MESSAGE_SIZE_LIMIT=1048576 \
|
||||
BEACON_SIZE_LIMIT=1000000 \
|
||||
USE_FAILOVER=true \
|
||||
USE_FAILOVER=false \
|
||||
GROUP_STORAGE_FAILOVER=failover \
|
||||
TOPIC_STORAGE_FAILOVER=storage-failover
|
||||
TOPIC_STORAGE_FAILOVER=storage-failover \
|
||||
PROFILER_ENABLED=false
|
||||
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -9,7 +9,8 @@
|
|||
# Usage: IMAGE_TAG=latest DOCKER_REPO=myDockerHubID bash build.sh <ee>
|
||||
set -e
|
||||
|
||||
git_sha1=${IMAGE_TAG:-$(git rev-parse HEAD)}
|
||||
git_sha=$(git rev-parse --short HEAD)
|
||||
image_tag=${IMAGE_TAG:-git_sha}
|
||||
ee="false"
|
||||
check_prereq() {
|
||||
which docker || {
|
||||
|
|
@ -22,9 +23,9 @@ check_prereq() {
|
|||
function build_service() {
|
||||
image="$1"
|
||||
echo "BUILDING $image"
|
||||
docker build -t ${DOCKER_REPO:-'local'}/$image:${git_sha1} --platform linux/amd64 --build-arg SERVICE_NAME=$image .
|
||||
docker build -t ${DOCKER_REPO:-'local'}/$image:${image_tag} --platform linux/amd64 --build-arg SERVICE_NAME=$image --build-arg GIT_SHA=$git_sha .
|
||||
[[ $PUSH_IMAGE -eq 1 ]] && {
|
||||
docker push ${DOCKER_REPO:-'local'}/$image:${git_sha1}
|
||||
docker push ${DOCKER_REPO:-'local'}/$image:${image_tag}
|
||||
}
|
||||
echo "Build completed for $image"
|
||||
return
|
||||
|
|
@ -51,7 +52,7 @@ function build_api(){
|
|||
for image in $(ls cmd);
|
||||
do
|
||||
build_service $image
|
||||
echo "::set-output name=image::${DOCKER_REPO:-'local'}/$image:${git_sha1}"
|
||||
echo "::set-output name=image::${DOCKER_REPO:-'local'}/$image:${image_tag}"
|
||||
done
|
||||
cd ../backend
|
||||
rm -rf ../${destination}
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package main
|
|||
import (
|
||||
"context"
|
||||
"log"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
|
@ -18,10 +19,12 @@ import (
|
|||
|
||||
func main() {
|
||||
metrics := monitoring.New("assets")
|
||||
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := config.New()
|
||||
if cfg.UseProfiler {
|
||||
pprof.StartProfilingServer()
|
||||
}
|
||||
|
||||
cacher := cacher.NewCacher(cfg, metrics)
|
||||
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import (
|
|||
"errors"
|
||||
"log"
|
||||
types2 "openreplay/backend/pkg/db/types"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
|
@ -16,7 +17,6 @@ import (
|
|||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/handlers"
|
||||
custom2 "openreplay/backend/pkg/handlers/custom"
|
||||
logger "openreplay/backend/pkg/log"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"openreplay/backend/pkg/queue"
|
||||
|
|
@ -25,10 +25,12 @@ import (
|
|||
|
||||
func main() {
|
||||
metrics := monitoring.New("db")
|
||||
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := db.New()
|
||||
if cfg.UseProfiler {
|
||||
pprof.StartProfilingServer()
|
||||
}
|
||||
|
||||
// Init database
|
||||
pg := cache.NewPGCache(
|
||||
|
|
@ -56,20 +58,17 @@ func main() {
|
|||
// Init modules
|
||||
saver := datasaver.New(pg, producer)
|
||||
saver.InitStats()
|
||||
statsLogger := logger.NewQueueStats(cfg.LoggerTimeout)
|
||||
|
||||
msgFilter := []int{messages.MsgMetadata, messages.MsgIssueEvent, messages.MsgSessionStart, messages.MsgSessionEnd,
|
||||
messages.MsgUserID, messages.MsgUserAnonymousID, messages.MsgClickEvent,
|
||||
messages.MsgIntegrationEvent, messages.MsgPerformanceTrackAggr,
|
||||
messages.MsgJSException, messages.MsgResourceTiming,
|
||||
messages.MsgRawCustomEvent, messages.MsgCustomIssue, messages.MsgFetch, messages.MsgGraphQL,
|
||||
messages.MsgCustomEvent, messages.MsgCustomIssue, messages.MsgFetch, messages.MsgNetworkRequest, messages.MsgGraphQL,
|
||||
messages.MsgStateAction, messages.MsgSetInputTarget, messages.MsgSetInputValue, messages.MsgCreateDocument,
|
||||
messages.MsgMouseClick, messages.MsgSetPageLocation, messages.MsgPageLoadTiming, messages.MsgPageRenderTiming}
|
||||
|
||||
// Handler logic
|
||||
msgHandler := func(msg messages.Message) {
|
||||
statsLogger.Collect(msg)
|
||||
|
||||
// Just save session data into db without additional checks
|
||||
if err := saver.InsertMessage(msg); err != nil {
|
||||
if !postgres.IsPkeyViolation(err) {
|
||||
|
|
|
|||
|
|
@ -3,8 +3,10 @@ package main
|
|||
import (
|
||||
"log"
|
||||
"openreplay/backend/internal/storage"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
|
|
@ -13,21 +15,24 @@ import (
|
|||
"openreplay/backend/pkg/db/cache"
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/intervals"
|
||||
logger "openreplay/backend/pkg/log"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"openreplay/backend/pkg/queue"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
metrics := monitoring.New("ender")
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := ender.New()
|
||||
if cfg.UseProfiler {
|
||||
pprof.StartProfilingServer()
|
||||
}
|
||||
|
||||
pg := cache.NewPGCache(postgres.NewConn(cfg.Postgres, 0, 0, metrics), cfg.ProjectExpirationTimeoutMs)
|
||||
defer pg.Close()
|
||||
|
||||
sessions, err := sessionender.New(metrics, intervals.EVENTS_SESSION_END_TIMEOUT, cfg.PartitionsNumber, logger.NewQueueStats(cfg.LoggerTimeout))
|
||||
sessions, err := sessionender.New(metrics, intervals.EVENTS_SESSION_END_TIMEOUT, cfg.PartitionsNumber)
|
||||
if err != nil {
|
||||
log.Printf("can't init ender service: %s", err)
|
||||
return
|
||||
|
|
@ -62,6 +67,9 @@ func main() {
|
|||
consumer.Close()
|
||||
os.Exit(0)
|
||||
case <-tick:
|
||||
failedSessionEnds := make(map[uint64]int64)
|
||||
duplicatedSessionEnds := make(map[uint64]uint64)
|
||||
|
||||
// Find ended sessions and send notification to other services
|
||||
sessions.HandleEndedSessions(func(sessionID uint64, timestamp int64) bool {
|
||||
msg := &messages.SessionEnd{Timestamp: uint64(timestamp)}
|
||||
|
|
@ -71,12 +79,17 @@ func main() {
|
|||
}
|
||||
newDuration, err := pg.InsertSessionEnd(sessionID, msg.Timestamp)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "integer out of range") {
|
||||
// Skip session with broken duration
|
||||
failedSessionEnds[sessionID] = timestamp
|
||||
return true
|
||||
}
|
||||
log.Printf("can't save sessionEnd to database, sessID: %d, err: %s", sessionID, err)
|
||||
return false
|
||||
}
|
||||
if currDuration == newDuration {
|
||||
log.Printf("sessionEnd duplicate, sessID: %d, prevDur: %d, newDur: %d", sessionID,
|
||||
currDuration, newDuration)
|
||||
// Skip session end duplicate
|
||||
duplicatedSessionEnds[sessionID] = currDuration
|
||||
return true
|
||||
}
|
||||
if cfg.UseEncryption {
|
||||
|
|
@ -94,6 +107,12 @@ func main() {
|
|||
}
|
||||
return true
|
||||
})
|
||||
if len(failedSessionEnds) > 0 {
|
||||
log.Println("sessions with wrong duration:", failedSessionEnds)
|
||||
}
|
||||
if len(duplicatedSessionEnds) > 0 {
|
||||
log.Println("session end duplicates:", duplicatedSessionEnds)
|
||||
}
|
||||
producer.Flush(cfg.ProducerTimeout)
|
||||
if err := consumer.CommitBack(intervals.EVENTS_BACK_COMMIT_GAP); err != nil {
|
||||
log.Printf("can't commit messages with offset: %s", err)
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package main
|
|||
|
||||
import (
|
||||
"log"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
|
@ -11,7 +12,6 @@ import (
|
|||
"openreplay/backend/pkg/handlers"
|
||||
web2 "openreplay/backend/pkg/handlers/web"
|
||||
"openreplay/backend/pkg/intervals"
|
||||
logger "openreplay/backend/pkg/log"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/sessions"
|
||||
|
|
@ -20,8 +20,10 @@ import (
|
|||
func main() {
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
// Load service configuration
|
||||
cfg := heuristics.New()
|
||||
if cfg.UseProfiler {
|
||||
pprof.StartProfilingServer()
|
||||
}
|
||||
|
||||
// HandlersFabric returns the list of message handlers we want to be applied to each incoming message.
|
||||
handlersFabric := func() []handlers.MessageProcessor {
|
||||
|
|
@ -41,14 +43,10 @@ func main() {
|
|||
// Create handler's aggregator
|
||||
builderMap := sessions.NewBuilderMap(handlersFabric)
|
||||
|
||||
// Init logger
|
||||
statsLogger := logger.NewQueueStats(cfg.LoggerTimeout)
|
||||
|
||||
// Init producer and consumer for data bus
|
||||
producer := queue.NewProducer(cfg.MessageSizeLimit, true)
|
||||
|
||||
msgHandler := func(msg messages.Message) {
|
||||
statsLogger.Collect(msg)
|
||||
builderMap.HandleMessage(msg)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ import (
|
|||
"openreplay/backend/internal/http/server"
|
||||
"openreplay/backend/internal/http/services"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
|
@ -18,10 +19,12 @@ import (
|
|||
|
||||
func main() {
|
||||
metrics := monitoring.New("http")
|
||||
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := http.New()
|
||||
if cfg.UseProfiler {
|
||||
pprof.StartProfilingServer()
|
||||
}
|
||||
|
||||
// Connect to queue
|
||||
producer := queue.NewProducer(cfg.MessageSizeLimit, true)
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import (
|
|||
config "openreplay/backend/internal/config/integrations"
|
||||
"openreplay/backend/internal/integrations/clientManager"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
"time"
|
||||
|
||||
"os"
|
||||
|
|
@ -19,10 +20,12 @@ import (
|
|||
|
||||
func main() {
|
||||
metrics := monitoring.New("integrations")
|
||||
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := config.New()
|
||||
if cfg.UseProfiler {
|
||||
pprof.StartProfilingServer()
|
||||
}
|
||||
|
||||
pg := postgres.NewConn(cfg.PostgresURI, 0, 0, metrics)
|
||||
defer pg.Close()
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package main
|
|||
import (
|
||||
"context"
|
||||
"log"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
|
@ -20,10 +21,12 @@ import (
|
|||
|
||||
func main() {
|
||||
metrics := monitoring.New("sink")
|
||||
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := sink.New()
|
||||
if cfg.UseProfiler {
|
||||
pprof.StartProfilingServer()
|
||||
}
|
||||
|
||||
if _, err := os.Stat(cfg.FsDir); os.IsNotExist(err) {
|
||||
log.Fatalf("%v doesn't exist. %v", cfg.FsDir, err)
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package main
|
|||
|
||||
import (
|
||||
"log"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
|
@ -18,10 +19,12 @@ import (
|
|||
|
||||
func main() {
|
||||
metrics := monitoring.New("storage")
|
||||
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := config.New()
|
||||
if cfg.UseProfiler {
|
||||
pprof.StartProfilingServer()
|
||||
}
|
||||
|
||||
s3 := s3storage.NewS3(cfg.S3Region, cfg.S3Bucket)
|
||||
srv, err := storage.New(cfg, s3, metrics)
|
||||
|
|
@ -44,8 +47,8 @@ func main() {
|
|||
messages.NewMessageIterator(
|
||||
func(msg messages.Message) {
|
||||
sesEnd := msg.(*messages.SessionEnd)
|
||||
if err := srv.UploadSessionFiles(sesEnd); err != nil {
|
||||
log.Printf("can't find session: %d", msg.SessionID())
|
||||
if err := srv.Upload(sesEnd); err != nil {
|
||||
log.Printf("upload session err: %s, sessID: %d", err, msg.SessionID())
|
||||
sessionFinder.Find(msg.SessionID(), sesEnd.Timestamp)
|
||||
}
|
||||
// Log timestamp of last processed session
|
||||
|
|
@ -54,7 +57,7 @@ func main() {
|
|||
[]int{messages.MsgSessionEnd},
|
||||
true,
|
||||
),
|
||||
true,
|
||||
false,
|
||||
cfg.MessageSizeLimit,
|
||||
)
|
||||
|
||||
|
|
@ -69,10 +72,15 @@ func main() {
|
|||
case sig := <-sigchan:
|
||||
log.Printf("Caught signal %v: terminating\n", sig)
|
||||
sessionFinder.Stop()
|
||||
srv.Wait()
|
||||
consumer.Close()
|
||||
os.Exit(0)
|
||||
case <-counterTick:
|
||||
go counter.Print()
|
||||
srv.Wait()
|
||||
if err := consumer.Commit(); err != nil {
|
||||
log.Printf("can't commit messages: %s", err)
|
||||
}
|
||||
case msg := <-consumer.Rebalanced():
|
||||
log.Println(msg)
|
||||
default:
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ type Config struct {
|
|||
AssetsOrigin string `env:"ASSETS_ORIGIN,required"`
|
||||
AssetsSizeLimit int `env:"ASSETS_SIZE_LIMIT,required"`
|
||||
AssetsRequestHeaders map[string]string `env:"ASSETS_REQUEST_HEADERS"`
|
||||
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ type Config struct {
|
|||
BatchQueueLimit int `env:"DB_BATCH_QUEUE_LIMIT,required"`
|
||||
BatchSizeLimit int `env:"DB_BATCH_SIZE_LIMIT,required"`
|
||||
UseQuickwit bool `env:"QUICKWIT_ENABLED,default=false"`
|
||||
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@ type Config struct {
|
|||
ProducerTimeout int `env:"PRODUCER_TIMEOUT,default=2000"`
|
||||
PartitionsNumber int `env:"PARTITIONS_NUMBER,required"`
|
||||
UseEncryption bool `env:"USE_ENCRYPTION,default=false"`
|
||||
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ type Config struct {
|
|||
TopicRawWeb string `env:"TOPIC_RAW_WEB,required"`
|
||||
TopicRawIOS string `env:"TOPIC_RAW_IOS,required"`
|
||||
ProducerTimeout int `env:"PRODUCER_TIMEOUT,default=2000"`
|
||||
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ type Config struct {
|
|||
TokenSecret string `env:"TOKEN_SECRET,required"`
|
||||
UAParserFile string `env:"UAPARSER_FILE,required"`
|
||||
MaxMinDBFile string `env:"MAXMINDDB_FILE,required"`
|
||||
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
|
||||
WorkerID uint16
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ type Config struct {
|
|||
TopicAnalytics string `env:"TOPIC_ANALYTICS,required"`
|
||||
PostgresURI string `env:"POSTGRES_STRING,required"`
|
||||
TokenSecret string `env:"TOKEN_SECRET,required"`
|
||||
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
|
|
|
|||
|
|
@ -21,6 +21,8 @@ type Config struct {
|
|||
ProducerCloseTimeout int `env:"PRODUCER_CLOSE_TIMEOUT,default=15000"`
|
||||
CacheThreshold int64 `env:"CACHE_THRESHOLD,default=5"`
|
||||
CacheExpiration int64 `env:"CACHE_EXPIRATION,default=120"`
|
||||
CacheBlackList string `env:"CACHE_BLACK_LIST,default="`
|
||||
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ type Config struct {
|
|||
ProducerCloseTimeout int `env:"PRODUCER_CLOSE_TIMEOUT,default=15000"`
|
||||
UseFailover bool `env:"USE_FAILOVER,default=false"`
|
||||
MaxFileSize int64 `env:"MAX_FILE_SIZE,default=524288000"`
|
||||
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ import (
|
|||
"openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
type FetchEventFTS struct {
|
||||
type NetworkRequestFTS struct {
|
||||
Method string `json:"method"`
|
||||
URL string `json:"url"`
|
||||
Request string `json:"request"`
|
||||
|
|
@ -36,7 +36,7 @@ type PageEventFTS struct {
|
|||
TimeToInteractive uint64 `json:"time_to_interactive"`
|
||||
}
|
||||
|
||||
type GraphQLEventFTS struct {
|
||||
type GraphQLFTS struct {
|
||||
OperationKind string `json:"operation_kind"`
|
||||
OperationName string `json:"operation_name"`
|
||||
Variables string `json:"variables"`
|
||||
|
|
@ -56,18 +56,8 @@ func (s *Saver) sendToFTS(msg messages.Message, sessionID uint64) {
|
|||
|
||||
switch m := msg.(type) {
|
||||
// Common
|
||||
case *messages.Fetch:
|
||||
event, err = json.Marshal(FetchEventFTS{
|
||||
Method: m.Method,
|
||||
URL: m.URL,
|
||||
Request: m.Request,
|
||||
Response: m.Response,
|
||||
Status: m.Status,
|
||||
Timestamp: m.Timestamp,
|
||||
Duration: m.Duration,
|
||||
})
|
||||
case *messages.FetchEvent:
|
||||
event, err = json.Marshal(FetchEventFTS{
|
||||
case *messages.NetworkRequest:
|
||||
event, err = json.Marshal(NetworkRequestFTS{
|
||||
Method: m.Method,
|
||||
URL: m.URL,
|
||||
Request: m.Request,
|
||||
|
|
@ -97,14 +87,7 @@ func (s *Saver) sendToFTS(msg messages.Message, sessionID uint64) {
|
|||
TimeToInteractive: m.TimeToInteractive,
|
||||
})
|
||||
case *messages.GraphQL:
|
||||
event, err = json.Marshal(GraphQLEventFTS{
|
||||
OperationKind: m.OperationKind,
|
||||
OperationName: m.OperationName,
|
||||
Variables: m.Variables,
|
||||
Response: m.Response,
|
||||
})
|
||||
case *messages.GraphQLEvent:
|
||||
event, err = json.Marshal(GraphQLEventFTS{
|
||||
event, err = json.Marshal(GraphQLFTS{
|
||||
OperationKind: m.OperationKind,
|
||||
OperationName: m.OperationName,
|
||||
Variables: m.Variables,
|
||||
|
|
|
|||
|
|
@ -38,12 +38,12 @@ func (mi *Saver) InsertMessage(msg Message) error {
|
|||
case *PageEvent:
|
||||
mi.sendToFTS(msg, sessionID)
|
||||
return mi.pg.InsertWebPageEvent(sessionID, m)
|
||||
case *FetchEvent:
|
||||
case *NetworkRequest:
|
||||
mi.sendToFTS(msg, sessionID)
|
||||
return mi.pg.InsertWebFetchEvent(sessionID, m)
|
||||
case *GraphQLEvent:
|
||||
return mi.pg.InsertWebNetworkRequest(sessionID, m)
|
||||
case *GraphQL:
|
||||
mi.sendToFTS(msg, sessionID)
|
||||
return mi.pg.InsertWebGraphQLEvent(sessionID, m)
|
||||
return mi.pg.InsertWebGraphQL(sessionID, m)
|
||||
case *JSException:
|
||||
return mi.pg.InsertWebJSException(m)
|
||||
case *IntegrationEvent:
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@ import (
|
|||
"fmt"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
|
||||
"log"
|
||||
log2 "openreplay/backend/pkg/log"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"time"
|
||||
|
|
@ -29,10 +28,9 @@ type SessionEnder struct {
|
|||
timeCtrl *timeController
|
||||
activeSessions syncfloat64.UpDownCounter
|
||||
totalSessions syncfloat64.Counter
|
||||
stats log2.QueueStats
|
||||
}
|
||||
|
||||
func New(metrics *monitoring.Metrics, timeout int64, parts int, stats log2.QueueStats) (*SessionEnder, error) {
|
||||
func New(metrics *monitoring.Metrics, timeout int64, parts int) (*SessionEnder, error) {
|
||||
if metrics == nil {
|
||||
return nil, fmt.Errorf("metrics module is empty")
|
||||
}
|
||||
|
|
@ -51,13 +49,11 @@ func New(metrics *monitoring.Metrics, timeout int64, parts int, stats log2.Queue
|
|||
timeCtrl: NewTimeController(parts),
|
||||
activeSessions: activeSessions,
|
||||
totalSessions: totalSessions,
|
||||
stats: stats,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// UpdateSession save timestamp for new sessions and update for existing sessions
|
||||
func (se *SessionEnder) UpdateSession(msg messages.Message) {
|
||||
se.stats.Collect(msg)
|
||||
var (
|
||||
sessionID = msg.Meta().SessionID()
|
||||
batchTimestamp = msg.Meta().Batch().Timestamp()
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ import (
|
|||
"openreplay/backend/pkg/monitoring"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/pkg/url/assets"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
|
@ -27,6 +28,7 @@ type AssetsCache struct {
|
|||
rewriter *assets.Rewriter
|
||||
producer types.Producer
|
||||
cache map[string]*CachedAsset
|
||||
blackList []string // use "example.com" to filter all domains or ".example.com" to filter only third-level domain
|
||||
totalAssets syncfloat64.Counter
|
||||
cachedAssets syncfloat64.Counter
|
||||
skippedAssets syncfloat64.Counter
|
||||
|
|
@ -61,12 +63,22 @@ func New(cfg *sink.Config, rewriter *assets.Rewriter, producer types.Producer, m
|
|||
rewriter: rewriter,
|
||||
producer: producer,
|
||||
cache: make(map[string]*CachedAsset, 64),
|
||||
blackList: make([]string, 0),
|
||||
totalAssets: totalAssets,
|
||||
cachedAssets: cachedAssets,
|
||||
skippedAssets: skippedAssets,
|
||||
assetSize: assetSize,
|
||||
assetDuration: assetDuration,
|
||||
}
|
||||
// Parse black list for cache layer
|
||||
if len(cfg.CacheBlackList) > 0 {
|
||||
blackList := strings.Split(cfg.CacheBlackList, ",")
|
||||
for _, domain := range blackList {
|
||||
if len(domain) > 0 {
|
||||
assetsCache.blackList = append(assetsCache.blackList, domain)
|
||||
}
|
||||
}
|
||||
}
|
||||
go assetsCache.cleaner()
|
||||
return assetsCache
|
||||
}
|
||||
|
|
@ -98,6 +110,22 @@ func (e *AssetsCache) clearCache() {
|
|||
log.Printf("cache cleaner: deleted %d/%d assets", deleted, cacheSize)
|
||||
}
|
||||
|
||||
func (e *AssetsCache) shouldSkipAsset(baseURL string) bool {
|
||||
if len(e.blackList) == 0 {
|
||||
return false
|
||||
}
|
||||
host, err := parseHost(baseURL)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
for _, blackHost := range e.blackList {
|
||||
if strings.Contains(host, blackHost) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (e *AssetsCache) ParseAssets(msg messages.Message) messages.Message {
|
||||
switch m := msg.(type) {
|
||||
case *messages.SetNodeAttributeURLBased:
|
||||
|
|
@ -110,6 +138,9 @@ func (e *AssetsCache) ParseAssets(msg messages.Message) messages.Message {
|
|||
newMsg.SetMeta(msg.Meta())
|
||||
return newMsg
|
||||
} else if m.Name == "style" {
|
||||
if e.shouldSkipAsset(m.BaseURL) {
|
||||
return msg
|
||||
}
|
||||
newMsg := &messages.SetNodeAttribute{
|
||||
ID: m.ID,
|
||||
Name: m.Name,
|
||||
|
|
@ -119,6 +150,9 @@ func (e *AssetsCache) ParseAssets(msg messages.Message) messages.Message {
|
|||
return newMsg
|
||||
}
|
||||
case *messages.SetCSSDataURLBased:
|
||||
if e.shouldSkipAsset(m.BaseURL) {
|
||||
return msg
|
||||
}
|
||||
newMsg := &messages.SetCSSData{
|
||||
ID: m.ID,
|
||||
Data: e.handleCSS(m.SessionID(), m.BaseURL, m.Data),
|
||||
|
|
@ -126,6 +160,9 @@ func (e *AssetsCache) ParseAssets(msg messages.Message) messages.Message {
|
|||
newMsg.SetMeta(msg.Meta())
|
||||
return newMsg
|
||||
case *messages.CSSInsertRuleURLBased:
|
||||
if e.shouldSkipAsset(m.BaseURL) {
|
||||
return msg
|
||||
}
|
||||
newMsg := &messages.CSSInsertRule{
|
||||
ID: m.ID,
|
||||
Index: m.Index,
|
||||
|
|
@ -134,6 +171,9 @@ func (e *AssetsCache) ParseAssets(msg messages.Message) messages.Message {
|
|||
newMsg.SetMeta(msg.Meta())
|
||||
return newMsg
|
||||
case *messages.AdoptedSSReplaceURLBased:
|
||||
if e.shouldSkipAsset(m.BaseURL) {
|
||||
return msg
|
||||
}
|
||||
newMsg := &messages.AdoptedSSReplace{
|
||||
SheetID: m.SheetID,
|
||||
Text: e.handleCSS(m.SessionID(), m.BaseURL, m.Text),
|
||||
|
|
@ -141,6 +181,9 @@ func (e *AssetsCache) ParseAssets(msg messages.Message) messages.Message {
|
|||
newMsg.SetMeta(msg.Meta())
|
||||
return newMsg
|
||||
case *messages.AdoptedSSInsertRuleURLBased:
|
||||
if e.shouldSkipAsset(m.BaseURL) {
|
||||
return msg
|
||||
}
|
||||
newMsg := &messages.AdoptedSSInsertRule{
|
||||
SheetID: m.SheetID,
|
||||
Index: m.Index,
|
||||
|
|
@ -180,13 +223,21 @@ func (e *AssetsCache) handleURL(sessionID uint64, baseURL string, urlVal string)
|
|||
}
|
||||
}
|
||||
|
||||
func parseHost(baseURL string) (string, error) {
|
||||
u, err := url.Parse(baseURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return u.Scheme + "://" + u.Host + "/", nil
|
||||
}
|
||||
|
||||
func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) string {
|
||||
ctx := context.Background()
|
||||
e.totalAssets.Add(ctx, 1)
|
||||
// Try to find asset in cache
|
||||
h := md5.New()
|
||||
// Cut first part of url (scheme + host)
|
||||
u, err := url.Parse(baseURL)
|
||||
justUrl, err := parseHost(baseURL)
|
||||
if err != nil {
|
||||
log.Printf("can't parse url: %s, err: %s", baseURL, err)
|
||||
if e.cfg.CacheAssets {
|
||||
|
|
@ -194,7 +245,6 @@ func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) st
|
|||
}
|
||||
return e.getRewrittenCSS(sessionID, baseURL, css)
|
||||
}
|
||||
justUrl := u.Scheme + "://" + u.Host + "/"
|
||||
// Calculate hash sum of url + css
|
||||
io.WriteString(h, justUrl)
|
||||
io.WriteString(h, css)
|
||||
|
|
|
|||
|
|
@ -4,29 +4,51 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
gzip "github.com/klauspost/pgzip"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
|
||||
"log"
|
||||
config "openreplay/backend/internal/config/storage"
|
||||
"openreplay/backend/pkg/flakeid"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"openreplay/backend/pkg/storage"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type FileType string
|
||||
|
||||
const (
|
||||
DOM FileType = "/dom.mob"
|
||||
DEV FileType = "/devtools.mob"
|
||||
)
|
||||
|
||||
type Task struct {
|
||||
id string
|
||||
doms *bytes.Buffer
|
||||
dome *bytes.Buffer
|
||||
dev *bytes.Buffer
|
||||
}
|
||||
|
||||
type Storage struct {
|
||||
cfg *config.Config
|
||||
s3 *storage.S3
|
||||
startBytes []byte
|
||||
|
||||
totalSessions syncfloat64.Counter
|
||||
sessionDOMSize syncfloat64.Histogram
|
||||
sessionDevtoolsSize syncfloat64.Histogram
|
||||
readingDOMTime syncfloat64.Histogram
|
||||
readingTime syncfloat64.Histogram
|
||||
archivingTime syncfloat64.Histogram
|
||||
totalSessions syncfloat64.Counter
|
||||
sessionDOMSize syncfloat64.Histogram
|
||||
sessionDEVSize syncfloat64.Histogram
|
||||
readingDOMTime syncfloat64.Histogram
|
||||
readingDEVTime syncfloat64.Histogram
|
||||
archivingDOMTime syncfloat64.Histogram
|
||||
archivingDEVTime syncfloat64.Histogram
|
||||
uploadingDOMTime syncfloat64.Histogram
|
||||
uploadingDEVTime syncfloat64.Histogram
|
||||
|
||||
tasks chan *Task
|
||||
ready chan struct{}
|
||||
}
|
||||
|
||||
func New(cfg *config.Config, s3 *storage.S3, metrics *monitoring.Metrics) (*Storage, error) {
|
||||
|
|
@ -49,186 +71,235 @@ func New(cfg *config.Config, s3 *storage.S3, metrics *monitoring.Metrics) (*Stor
|
|||
if err != nil {
|
||||
log.Printf("can't create sessions_dt_size metric: %s", err)
|
||||
}
|
||||
readingTime, err := metrics.RegisterHistogram("reading_duration")
|
||||
readingDOMTime, err := metrics.RegisterHistogram("reading_duration")
|
||||
if err != nil {
|
||||
log.Printf("can't create reading_duration metric: %s", err)
|
||||
}
|
||||
archivingTime, err := metrics.RegisterHistogram("archiving_duration")
|
||||
readingDEVTime, err := metrics.RegisterHistogram("reading_dt_duration")
|
||||
if err != nil {
|
||||
log.Printf("can't create reading_duration metric: %s", err)
|
||||
}
|
||||
archivingDOMTime, err := metrics.RegisterHistogram("archiving_duration")
|
||||
if err != nil {
|
||||
log.Printf("can't create archiving_duration metric: %s", err)
|
||||
}
|
||||
return &Storage{
|
||||
cfg: cfg,
|
||||
s3: s3,
|
||||
startBytes: make([]byte, cfg.FileSplitSize),
|
||||
totalSessions: totalSessions,
|
||||
sessionDOMSize: sessionDOMSize,
|
||||
sessionDevtoolsSize: sessionDevtoolsSize,
|
||||
readingTime: readingTime,
|
||||
archivingTime: archivingTime,
|
||||
}, nil
|
||||
archivingDEVTime, err := metrics.RegisterHistogram("archiving_dt_duration")
|
||||
if err != nil {
|
||||
log.Printf("can't create archiving_duration metric: %s", err)
|
||||
}
|
||||
uploadingDOMTime, err := metrics.RegisterHistogram("uploading_duration")
|
||||
if err != nil {
|
||||
log.Printf("can't create uploading_duration metric: %s", err)
|
||||
}
|
||||
uploadingDEVTime, err := metrics.RegisterHistogram("uploading_dt_duration")
|
||||
if err != nil {
|
||||
log.Printf("can't create uploading_duration metric: %s", err)
|
||||
}
|
||||
newStorage := &Storage{
|
||||
cfg: cfg,
|
||||
s3: s3,
|
||||
startBytes: make([]byte, cfg.FileSplitSize),
|
||||
totalSessions: totalSessions,
|
||||
sessionDOMSize: sessionDOMSize,
|
||||
sessionDEVSize: sessionDevtoolsSize,
|
||||
readingDOMTime: readingDOMTime,
|
||||
readingDEVTime: readingDEVTime,
|
||||
archivingDOMTime: archivingDOMTime,
|
||||
archivingDEVTime: archivingDEVTime,
|
||||
uploadingDOMTime: uploadingDOMTime,
|
||||
uploadingDEVTime: uploadingDEVTime,
|
||||
tasks: make(chan *Task, 1),
|
||||
ready: make(chan struct{}),
|
||||
}
|
||||
go newStorage.worker()
|
||||
return newStorage, nil
|
||||
}
|
||||
|
||||
func (s *Storage) UploadSessionFiles(msg *messages.SessionEnd) error {
|
||||
if err := s.uploadKey(msg.SessionID(), "/dom.mob", true, 5, msg.EncryptionKey); err != nil {
|
||||
func (s *Storage) Wait() {
|
||||
<-s.ready
|
||||
}
|
||||
|
||||
func (s *Storage) Upload(msg *messages.SessionEnd) (err error) {
|
||||
// Generate file path
|
||||
sessionID := strconv.FormatUint(msg.SessionID(), 10)
|
||||
filePath := s.cfg.FSDir + "/" + sessionID
|
||||
// Prepare sessions
|
||||
newTask := &Task{
|
||||
id: sessionID,
|
||||
}
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(2)
|
||||
go func() {
|
||||
if prepErr := s.prepareSession(filePath, DOM, newTask); prepErr != nil {
|
||||
err = fmt.Errorf("prepareSession DOM err: %s", prepErr)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
if prepErr := s.prepareSession(filePath, DEV, newTask); prepErr != nil {
|
||||
err = fmt.Errorf("prepareSession DEV err: %s", prepErr)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "big file") {
|
||||
log.Printf("%s, sess: %d", err, msg.SessionID())
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
if err := s.uploadKey(msg.SessionID(), "/devtools.mob", false, 4, msg.EncryptionKey); err != nil {
|
||||
log.Printf("can't find devtools for session: %d, err: %s", msg.SessionID(), err)
|
||||
}
|
||||
// Send new task to worker
|
||||
s.tasks <- newTask
|
||||
// Unload worker
|
||||
<-s.ready
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: make a bit cleaner.
|
||||
// TODO: Of course, I'll do!
|
||||
func (s *Storage) uploadKey(sessID uint64, suffix string, shouldSplit bool, retryCount int, encryptionKey string) error {
|
||||
if retryCount <= 0 {
|
||||
return nil
|
||||
}
|
||||
start := time.Now()
|
||||
fileName := strconv.FormatUint(sessID, 10)
|
||||
mobFileName := fileName
|
||||
if suffix == "/devtools.mob" {
|
||||
mobFileName += "devtools"
|
||||
}
|
||||
filePath := s.cfg.FSDir + "/" + mobFileName
|
||||
|
||||
func (s *Storage) openSession(filePath string) ([]byte, error) {
|
||||
// Check file size before download into memory
|
||||
info, err := os.Stat(filePath)
|
||||
if err == nil {
|
||||
if info.Size() > s.cfg.MaxFileSize {
|
||||
log.Printf("big file, size: %d, session: %d", info.Size(), sessID)
|
||||
return nil
|
||||
}
|
||||
if err == nil && info.Size() > s.cfg.MaxFileSize {
|
||||
return nil, fmt.Errorf("big file, size: %d", info.Size())
|
||||
}
|
||||
file, err := os.Open(filePath)
|
||||
// Read file into memory
|
||||
return os.ReadFile(filePath)
|
||||
}
|
||||
|
||||
func (s *Storage) prepareSession(path string, tp FileType, task *Task) error {
|
||||
// Open mob file
|
||||
if tp == DEV {
|
||||
path += "devtools"
|
||||
}
|
||||
startRead := time.Now()
|
||||
mob, err := s.openSession(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("File open error: %v; sessID: %s, part: %d, sessStart: %s\n",
|
||||
err, fileName, sessID%16,
|
||||
time.UnixMilli(int64(flakeid.ExtractTimestamp(sessID))),
|
||||
)
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
var fileSize int64 = 0
|
||||
fileInfo, err := file.Stat()
|
||||
if err != nil {
|
||||
log.Printf("can't get file info: %s", err)
|
||||
} else {
|
||||
fileSize = fileInfo.Size()
|
||||
}
|
||||
|
||||
var encryptedData []byte
|
||||
fileName += suffix
|
||||
if shouldSplit {
|
||||
nRead, err := file.Read(s.startBytes)
|
||||
if err != nil {
|
||||
log.Printf("File read error: %s; sessID: %s, part: %d, sessStart: %s",
|
||||
err,
|
||||
fileName,
|
||||
sessID%16,
|
||||
time.UnixMilli(int64(flakeid.ExtractTimestamp(sessID))),
|
||||
)
|
||||
time.AfterFunc(s.cfg.RetryTimeout, func() {
|
||||
s.uploadKey(sessID, suffix, shouldSplit, retryCount-1, encryptionKey)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
s.readingTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()))
|
||||
|
||||
start = time.Now()
|
||||
// Encrypt session file if we have encryption key
|
||||
if encryptionKey != "" {
|
||||
encryptedData, err = EncryptData(s.startBytes[:nRead], []byte(encryptionKey))
|
||||
if err != nil {
|
||||
log.Printf("can't encrypt data: %s", err)
|
||||
encryptedData = s.startBytes[:nRead]
|
||||
}
|
||||
} else {
|
||||
encryptedData = s.startBytes[:nRead]
|
||||
}
|
||||
// Compress and save to s3
|
||||
startReader := bytes.NewBuffer(encryptedData)
|
||||
if err := s.s3.Upload(s.gzipFile(startReader), fileName+"s", "application/octet-stream", true); err != nil {
|
||||
log.Fatalf("Storage: start upload failed. %v\n", err)
|
||||
}
|
||||
// TODO: fix possible error (if we read less then FileSplitSize)
|
||||
if nRead == s.cfg.FileSplitSize {
|
||||
restPartSize := fileSize - int64(nRead)
|
||||
fileData := make([]byte, restPartSize)
|
||||
nRead, err = file.Read(fileData)
|
||||
if err != nil {
|
||||
log.Printf("File read error: %s; sessID: %s, part: %d, sessStart: %s",
|
||||
err,
|
||||
fileName,
|
||||
sessID%16,
|
||||
time.UnixMilli(int64(flakeid.ExtractTimestamp(sessID))),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
if int64(nRead) != restPartSize {
|
||||
log.Printf("can't read the rest part of file")
|
||||
}
|
||||
|
||||
// Encrypt session file if we have encryption key
|
||||
if encryptionKey != "" {
|
||||
encryptedData, err = EncryptData(fileData, []byte(encryptionKey))
|
||||
if err != nil {
|
||||
log.Printf("can't encrypt data: %s", err)
|
||||
encryptedData = fileData
|
||||
}
|
||||
} else {
|
||||
encryptedData = fileData
|
||||
}
|
||||
// Compress and save to s3
|
||||
endReader := bytes.NewBuffer(encryptedData)
|
||||
if err := s.s3.Upload(s.gzipFile(endReader), fileName+"e", "application/octet-stream", true); err != nil {
|
||||
log.Fatalf("Storage: end upload failed. %v\n", err)
|
||||
}
|
||||
}
|
||||
s.archivingTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()))
|
||||
} else {
|
||||
start = time.Now()
|
||||
fileData := make([]byte, fileSize)
|
||||
nRead, err := file.Read(fileData)
|
||||
if err != nil {
|
||||
log.Printf("File read error: %s; sessID: %s, part: %d, sessStart: %s",
|
||||
err,
|
||||
fileName,
|
||||
sessID%16,
|
||||
time.UnixMilli(int64(flakeid.ExtractTimestamp(sessID))),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
if int64(nRead) != fileSize {
|
||||
log.Printf("can't read the rest part of file")
|
||||
}
|
||||
|
||||
// Encrypt session file if we have encryption key
|
||||
if encryptionKey != "" {
|
||||
encryptedData, err = EncryptData(fileData, []byte(encryptionKey))
|
||||
if err != nil {
|
||||
log.Printf("can't encrypt data: %s", err)
|
||||
encryptedData = fileData
|
||||
}
|
||||
} else {
|
||||
encryptedData = fileData
|
||||
}
|
||||
endReader := bytes.NewBuffer(encryptedData)
|
||||
if err := s.s3.Upload(s.gzipFile(endReader), fileName, "application/octet-stream", true); err != nil {
|
||||
log.Fatalf("Storage: end upload failed. %v\n", err)
|
||||
}
|
||||
s.archivingTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()))
|
||||
}
|
||||
|
||||
// Save metrics
|
||||
durRead := time.Now().Sub(startRead).Milliseconds()
|
||||
// Send metrics
|
||||
ctx, _ := context.WithTimeout(context.Background(), time.Millisecond*200)
|
||||
if shouldSplit {
|
||||
s.totalSessions.Add(ctx, 1)
|
||||
s.sessionDOMSize.Record(ctx, float64(fileSize))
|
||||
if tp == DOM {
|
||||
s.sessionDOMSize.Record(ctx, float64(len(mob)))
|
||||
s.readingDOMTime.Record(ctx, float64(durRead))
|
||||
} else {
|
||||
s.sessionDevtoolsSize.Record(ctx, float64(fileSize))
|
||||
s.sessionDEVSize.Record(ctx, float64(len(mob)))
|
||||
s.readingDEVTime.Record(ctx, float64(durRead))
|
||||
}
|
||||
// Encode and compress session
|
||||
if tp == DEV {
|
||||
startCompress := time.Now()
|
||||
task.dev = s.compressSession(mob)
|
||||
s.archivingDEVTime.Record(ctx, float64(time.Now().Sub(startCompress).Milliseconds()))
|
||||
} else {
|
||||
if len(mob) <= s.cfg.FileSplitSize {
|
||||
startCompress := time.Now()
|
||||
task.doms = s.compressSession(mob)
|
||||
s.archivingDOMTime.Record(ctx, float64(time.Now().Sub(startCompress).Milliseconds()))
|
||||
return nil
|
||||
}
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(2)
|
||||
var firstPart, secondPart int64
|
||||
go func() {
|
||||
start := time.Now()
|
||||
task.doms = s.compressSession(mob[:s.cfg.FileSplitSize])
|
||||
firstPart = time.Now().Sub(start).Milliseconds()
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
start := time.Now()
|
||||
task.dome = s.compressSession(mob[s.cfg.FileSplitSize:])
|
||||
secondPart = time.Now().Sub(start).Milliseconds()
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
s.archivingDOMTime.Record(ctx, float64(firstPart+secondPart))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Storage) encryptSession(data []byte, encryptionKey string) []byte {
|
||||
var encryptedData []byte
|
||||
var err error
|
||||
if encryptionKey != "" {
|
||||
encryptedData, err = EncryptData(data, []byte(encryptionKey))
|
||||
if err != nil {
|
||||
log.Printf("can't encrypt data: %s", err)
|
||||
encryptedData = data
|
||||
}
|
||||
} else {
|
||||
encryptedData = data
|
||||
}
|
||||
return encryptedData
|
||||
}
|
||||
|
||||
func (s *Storage) compressSession(data []byte) *bytes.Buffer {
|
||||
zippedMob := new(bytes.Buffer)
|
||||
z, _ := gzip.NewWriterLevel(zippedMob, gzip.BestSpeed)
|
||||
if _, err := z.Write(data); err != nil {
|
||||
log.Printf("can't write session data to compressor: %s", err)
|
||||
}
|
||||
if err := z.Close(); err != nil {
|
||||
log.Printf("can't close compressor: %s", err)
|
||||
}
|
||||
return zippedMob
|
||||
}
|
||||
|
||||
func (s *Storage) uploadSession(task *Task) {
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(3)
|
||||
var (
|
||||
uploadDoms int64 = 0
|
||||
uploadDome int64 = 0
|
||||
uploadDev int64 = 0
|
||||
)
|
||||
go func() {
|
||||
if task.doms != nil {
|
||||
start := time.Now()
|
||||
if err := s.s3.Upload(task.doms, task.id+string(DOM)+"s", "application/octet-stream", true); err != nil {
|
||||
log.Fatalf("Storage: start upload failed. %s", err)
|
||||
}
|
||||
uploadDoms = time.Now().Sub(start).Milliseconds()
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
if task.dome != nil {
|
||||
start := time.Now()
|
||||
if err := s.s3.Upload(task.dome, task.id+string(DOM)+"e", "application/octet-stream", true); err != nil {
|
||||
log.Fatalf("Storage: start upload failed. %s", err)
|
||||
}
|
||||
uploadDome = time.Now().Sub(start).Milliseconds()
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
if task.dev != nil {
|
||||
start := time.Now()
|
||||
if err := s.s3.Upload(task.dev, task.id+string(DEV), "application/octet-stream", true); err != nil {
|
||||
log.Fatalf("Storage: start upload failed. %s", err)
|
||||
}
|
||||
uploadDev = time.Now().Sub(start).Milliseconds()
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
// Record metrics
|
||||
ctx, _ := context.WithTimeout(context.Background(), time.Millisecond*200)
|
||||
s.uploadingDOMTime.Record(ctx, float64(uploadDoms+uploadDome))
|
||||
s.uploadingDEVTime.Record(ctx, float64(uploadDev))
|
||||
s.totalSessions.Add(ctx, 1)
|
||||
}
|
||||
|
||||
func (s *Storage) worker() {
|
||||
for {
|
||||
select {
|
||||
case task := <-s.tasks:
|
||||
s.uploadSession(task)
|
||||
default:
|
||||
// Signal that worker finished all tasks
|
||||
s.ready <- struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
8
backend/pkg/db/cache/messages-web.go
vendored
8
backend/pkg/db/cache/messages-web.go
vendored
|
|
@ -99,7 +99,7 @@ func (c *PGCache) InsertSessionReferrer(sessionID uint64, referrer string) error
|
|||
return c.Conn.InsertSessionReferrer(sessionID, referrer)
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertWebFetchEvent(sessionID uint64, e *FetchEvent) error {
|
||||
func (c *PGCache) InsertWebNetworkRequest(sessionID uint64, e *NetworkRequest) error {
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -108,10 +108,10 @@ func (c *PGCache) InsertWebFetchEvent(sessionID uint64, e *FetchEvent) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.Conn.InsertWebFetchEvent(sessionID, session.ProjectID, project.SaveRequestPayloads, e)
|
||||
return c.Conn.InsertWebNetworkRequest(sessionID, session.ProjectID, project.SaveRequestPayloads, e)
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertWebGraphQLEvent(sessionID uint64, e *GraphQLEvent) error {
|
||||
func (c *PGCache) InsertWebGraphQL(sessionID uint64, e *GraphQL) error {
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -120,7 +120,7 @@ func (c *PGCache) InsertWebGraphQLEvent(sessionID uint64, e *GraphQLEvent) error
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.Conn.InsertWebGraphQLEvent(sessionID, session.ProjectID, project.SaveRequestPayloads, e)
|
||||
return c.Conn.InsertWebGraphQL(sessionID, session.ProjectID, project.SaveRequestPayloads, e)
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertWebCustomEvent(sessionID uint64, e *CustomEvent) error {
|
||||
|
|
|
|||
|
|
@ -2,8 +2,14 @@ package postgres
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
|
||||
"log"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -18,13 +24,15 @@ type Bulk interface {
|
|||
}
|
||||
|
||||
type bulkImpl struct {
|
||||
conn Pool
|
||||
table string
|
||||
columns string
|
||||
template string
|
||||
setSize int
|
||||
sizeLimit int
|
||||
values []interface{}
|
||||
conn Pool
|
||||
table string
|
||||
columns string
|
||||
template string
|
||||
setSize int
|
||||
sizeLimit int
|
||||
values []interface{}
|
||||
bulkSize syncfloat64.Histogram
|
||||
bulkDuration syncfloat64.Histogram
|
||||
}
|
||||
|
||||
func (b *bulkImpl) Append(args ...interface{}) error {
|
||||
|
|
@ -46,6 +54,8 @@ func (b *bulkImpl) Send() error {
|
|||
}
|
||||
|
||||
func (b *bulkImpl) send() error {
|
||||
start := time.Now()
|
||||
size := len(b.values) / b.setSize
|
||||
request := bytes.NewBufferString(insertPrefix + b.table + b.columns + insertValues)
|
||||
args := make([]interface{}, b.setSize)
|
||||
for i := 0; i < len(b.values)/b.setSize; i++ {
|
||||
|
|
@ -63,13 +73,19 @@ func (b *bulkImpl) send() error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("send bulk err: %s", err)
|
||||
}
|
||||
// Save bulk metrics
|
||||
ctx, _ := context.WithTimeout(context.Background(), time.Millisecond*200)
|
||||
b.bulkDuration.Record(ctx, float64(time.Now().Sub(start).Milliseconds()), attribute.String("table", b.table))
|
||||
b.bulkSize.Record(ctx, float64(size), attribute.String("table", b.table))
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewBulk(conn Pool, table, columns, template string, setSize, sizeLimit int) (Bulk, error) {
|
||||
func NewBulk(conn Pool, metrics *monitoring.Metrics, table, columns, template string, setSize, sizeLimit int) (Bulk, error) {
|
||||
switch {
|
||||
case conn == nil:
|
||||
return nil, errors.New("db conn is empty")
|
||||
case metrics == nil:
|
||||
return nil, errors.New("metrics is empty")
|
||||
case table == "":
|
||||
return nil, errors.New("table is empty")
|
||||
case columns == "":
|
||||
|
|
@ -81,13 +97,23 @@ func NewBulk(conn Pool, table, columns, template string, setSize, sizeLimit int)
|
|||
case sizeLimit <= 0:
|
||||
return nil, errors.New("size limit is wrong")
|
||||
}
|
||||
messagesInBulk, err := metrics.RegisterHistogram("messages_in_bulk")
|
||||
if err != nil {
|
||||
log.Printf("can't create messages_size metric: %s", err)
|
||||
}
|
||||
bulkInsertDuration, err := metrics.RegisterHistogram("bulk_insert_duration")
|
||||
if err != nil {
|
||||
log.Printf("can't create messages_size metric: %s", err)
|
||||
}
|
||||
return &bulkImpl{
|
||||
conn: conn,
|
||||
table: table,
|
||||
columns: columns,
|
||||
template: template,
|
||||
setSize: setSize,
|
||||
sizeLimit: sizeLimit,
|
||||
values: make([]interface{}, 0, setSize*sizeLimit),
|
||||
conn: conn,
|
||||
table: table,
|
||||
columns: columns,
|
||||
template: template,
|
||||
setSize: setSize,
|
||||
sizeLimit: sizeLimit,
|
||||
values: make([]interface{}, 0, setSize*sizeLimit),
|
||||
bulkSize: messagesInBulk,
|
||||
bulkDuration: bulkInsertDuration,
|
||||
}, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -34,7 +34,15 @@ type Conn struct {
|
|||
customEvents Bulk
|
||||
webPageEvents Bulk
|
||||
webInputEvents Bulk
|
||||
webGraphQLEvents Bulk
|
||||
webGraphQL Bulk
|
||||
webErrors Bulk
|
||||
webErrorEvents Bulk
|
||||
webErrorTags Bulk
|
||||
webIssues Bulk
|
||||
webIssueEvents Bulk
|
||||
webCustomEvents Bulk
|
||||
webClickEvents Bulk
|
||||
webNetworkRequest Bulk
|
||||
sessionUpdates map[uint64]*sessionUpdates
|
||||
batchQueueLimit int
|
||||
batchSizeLimit int
|
||||
|
|
@ -70,7 +78,7 @@ func NewConn(url string, queueLimit, sizeLimit int, metrics *monitoring.Metrics)
|
|||
if err != nil {
|
||||
log.Fatalf("can't create new pool wrapper: %s", err)
|
||||
}
|
||||
conn.initBulks()
|
||||
conn.initBulks(metrics)
|
||||
return conn
|
||||
}
|
||||
|
||||
|
|
@ -99,58 +107,122 @@ func (conn *Conn) initMetrics(metrics *monitoring.Metrics) {
|
|||
}
|
||||
}
|
||||
|
||||
func (conn *Conn) initBulks() {
|
||||
func (conn *Conn) initBulks(metrics *monitoring.Metrics) {
|
||||
var err error
|
||||
conn.autocompletes, err = NewBulk(conn.c,
|
||||
conn.autocompletes, err = NewBulk(conn.c, metrics,
|
||||
"autocomplete",
|
||||
"(value, type, project_id)",
|
||||
"($%d, $%d, $%d)",
|
||||
3, 100)
|
||||
3, 200)
|
||||
if err != nil {
|
||||
log.Fatalf("can't create autocomplete bulk")
|
||||
log.Fatalf("can't create autocomplete bulk: %s", err)
|
||||
}
|
||||
conn.requests, err = NewBulk(conn.c,
|
||||
conn.requests, err = NewBulk(conn.c, metrics,
|
||||
"events_common.requests",
|
||||
"(session_id, timestamp, seq_index, url, duration, success)",
|
||||
"($%d, $%d, $%d, left($%d, 2700), $%d, $%d)",
|
||||
6, 100)
|
||||
6, 200)
|
||||
if err != nil {
|
||||
log.Fatalf("can't create requests bulk")
|
||||
log.Fatalf("can't create requests bulk: %s", err)
|
||||
}
|
||||
conn.customEvents, err = NewBulk(conn.c,
|
||||
conn.customEvents, err = NewBulk(conn.c, metrics,
|
||||
"events_common.customs",
|
||||
"(session_id, timestamp, seq_index, name, payload)",
|
||||
"($%d, $%d, $%d, left($%d, 2700), $%d)",
|
||||
5, 100)
|
||||
5, 200)
|
||||
if err != nil {
|
||||
log.Fatalf("can't create customEvents bulk")
|
||||
log.Fatalf("can't create customEvents bulk: %s", err)
|
||||
}
|
||||
conn.webPageEvents, err = NewBulk(conn.c,
|
||||
conn.webPageEvents, err = NewBulk(conn.c, metrics,
|
||||
"events.pages",
|
||||
"(session_id, message_id, timestamp, referrer, base_referrer, host, path, query, dom_content_loaded_time, "+
|
||||
"load_time, response_end, first_paint_time, first_contentful_paint_time, speed_index, visually_complete, "+
|
||||
"time_to_interactive, response_time, dom_building_time)",
|
||||
"($%d, $%d, $%d, $%d, $%d, $%d, $%d, $%d, NULLIF($%d, 0), NULLIF($%d, 0), NULLIF($%d, 0), NULLIF($%d, 0),"+
|
||||
" NULLIF($%d, 0), NULLIF($%d, 0), NULLIF($%d, 0), NULLIF($%d, 0), NULLIF($%d, 0), NULLIF($%d, 0))",
|
||||
18, 100)
|
||||
18, 200)
|
||||
if err != nil {
|
||||
log.Fatalf("can't create webPageEvents bulk")
|
||||
log.Fatalf("can't create webPageEvents bulk: %s", err)
|
||||
}
|
||||
conn.webInputEvents, err = NewBulk(conn.c,
|
||||
conn.webInputEvents, err = NewBulk(conn.c, metrics,
|
||||
"events.inputs",
|
||||
"(session_id, message_id, timestamp, value, label)",
|
||||
"($%d, $%d, $%d, $%d, NULLIF($%d,''))",
|
||||
5, 100)
|
||||
5, 200)
|
||||
if err != nil {
|
||||
log.Fatalf("can't create webPageEvents bulk")
|
||||
log.Fatalf("can't create webPageEvents bulk: %s", err)
|
||||
}
|
||||
conn.webGraphQLEvents, err = NewBulk(conn.c,
|
||||
conn.webGraphQL, err = NewBulk(conn.c, metrics,
|
||||
"events.graphql",
|
||||
"(session_id, timestamp, message_id, name, request_body, response_body)",
|
||||
"($%d, $%d, $%d, left($%d, 2700), $%d, $%d)",
|
||||
6, 100)
|
||||
6, 200)
|
||||
if err != nil {
|
||||
log.Fatalf("can't create webPageEvents bulk")
|
||||
log.Fatalf("can't create webPageEvents bulk: %s", err)
|
||||
}
|
||||
conn.webErrors, err = NewBulk(conn.c, metrics,
|
||||
"errors",
|
||||
"(error_id, project_id, source, name, message, payload)",
|
||||
"($%d, $%d, $%d, $%d, $%d, $%d::jsonb)",
|
||||
6, 200)
|
||||
if err != nil {
|
||||
log.Fatalf("can't create webErrors bulk: %s", err)
|
||||
}
|
||||
conn.webErrorEvents, err = NewBulk(conn.c, metrics,
|
||||
"events.errors",
|
||||
"(session_id, message_id, timestamp, error_id)",
|
||||
"($%d, $%d, $%d, $%d)",
|
||||
4, 200)
|
||||
if err != nil {
|
||||
log.Fatalf("can't create webErrorEvents bulk: %s", err)
|
||||
}
|
||||
conn.webErrorTags, err = NewBulk(conn.c, metrics,
|
||||
"public.errors_tags",
|
||||
"(session_id, message_id, error_id, key, value)",
|
||||
"($%d, $%d, $%d, $%d, $%d)",
|
||||
5, 200)
|
||||
if err != nil {
|
||||
log.Fatalf("can't create webErrorEvents bulk: %s", err)
|
||||
}
|
||||
conn.webIssues, err = NewBulk(conn.c, metrics,
|
||||
"issues",
|
||||
"(project_id, issue_id, type, context_string)",
|
||||
"($%d, $%d, $%d, $%d)",
|
||||
4, 200)
|
||||
if err != nil {
|
||||
log.Fatalf("can't create webIssues bulk: %s", err)
|
||||
}
|
||||
conn.webIssueEvents, err = NewBulk(conn.c, metrics,
|
||||
"events_common.issues",
|
||||
"(session_id, issue_id, timestamp, seq_index, payload)",
|
||||
"($%d, $%d, $%d, $%d, CAST($%d AS jsonb))",
|
||||
5, 200)
|
||||
if err != nil {
|
||||
log.Fatalf("can't create webIssueEvents bulk: %s", err)
|
||||
}
|
||||
conn.webCustomEvents, err = NewBulk(conn.c, metrics,
|
||||
"events_common.customs",
|
||||
"(session_id, seq_index, timestamp, name, payload, level)",
|
||||
"($%d, $%d, $%d, left($%d, 2700), $%d, $%d)",
|
||||
6, 200)
|
||||
if err != nil {
|
||||
log.Fatalf("can't create webCustomEvents bulk: %s", err)
|
||||
}
|
||||
conn.webClickEvents, err = NewBulk(conn.c, metrics,
|
||||
"events.clicks",
|
||||
"(session_id, message_id, timestamp, label, selector, url, path)",
|
||||
"($%d, $%d, $%d, NULLIF($%d, ''), $%d, $%d, $%d)",
|
||||
7, 200)
|
||||
if err != nil {
|
||||
log.Fatalf("can't create webClickEvents bulk: %s", err)
|
||||
}
|
||||
conn.webNetworkRequest, err = NewBulk(conn.c, metrics,
|
||||
"events_common.requests",
|
||||
"(session_id, timestamp, seq_index, url, host, path, query, request_body, response_body, status_code, method, duration, success)",
|
||||
"($%d, $%d, $%d, left($%d, 2700), $%d, $%d, $%d, $%d, $%d, $%d::smallint, NULLIF($%d, '')::http_method, $%d, $%d)",
|
||||
13, 200)
|
||||
if err != nil {
|
||||
log.Fatalf("can't create webNetworkRequest bulk: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -195,7 +267,14 @@ func (conn *Conn) updateSessionEvents(sessionID uint64, events, pages int) {
|
|||
if _, ok := conn.sessionUpdates[sessionID]; !ok {
|
||||
conn.sessionUpdates[sessionID] = NewSessionUpdates(sessionID)
|
||||
}
|
||||
conn.sessionUpdates[sessionID].add(pages, events)
|
||||
conn.sessionUpdates[sessionID].addEvents(pages, events)
|
||||
}
|
||||
|
||||
func (conn *Conn) updateSessionIssues(sessionID uint64, errors, issueScore int) {
|
||||
if _, ok := conn.sessionUpdates[sessionID]; !ok {
|
||||
conn.sessionUpdates[sessionID] = NewSessionUpdates(sessionID)
|
||||
}
|
||||
conn.sessionUpdates[sessionID].addIssues(errors, issueScore)
|
||||
}
|
||||
|
||||
func (conn *Conn) sendBulks() {
|
||||
|
|
@ -214,8 +293,32 @@ func (conn *Conn) sendBulks() {
|
|||
if err := conn.webInputEvents.Send(); err != nil {
|
||||
log.Printf("webInputEvents bulk send err: %s", err)
|
||||
}
|
||||
if err := conn.webGraphQLEvents.Send(); err != nil {
|
||||
log.Printf("webGraphQLEvents bulk send err: %s", err)
|
||||
if err := conn.webGraphQL.Send(); err != nil {
|
||||
log.Printf("webGraphQL bulk send err: %s", err)
|
||||
}
|
||||
if err := conn.webErrors.Send(); err != nil {
|
||||
log.Printf("webErrors bulk send err: %s", err)
|
||||
}
|
||||
if err := conn.webErrorEvents.Send(); err != nil {
|
||||
log.Printf("webErrorEvents bulk send err: %s", err)
|
||||
}
|
||||
if err := conn.webErrorTags.Send(); err != nil {
|
||||
log.Printf("webErrorTags bulk send err: %s", err)
|
||||
}
|
||||
if err := conn.webIssues.Send(); err != nil {
|
||||
log.Printf("webIssues bulk send err: %s", err)
|
||||
}
|
||||
if err := conn.webIssueEvents.Send(); err != nil {
|
||||
log.Printf("webIssueEvents bulk send err: %s", err)
|
||||
}
|
||||
if err := conn.webCustomEvents.Send(); err != nil {
|
||||
log.Printf("webCustomEvents bulk send err: %s", err)
|
||||
}
|
||||
if err := conn.webClickEvents.Send(); err != nil {
|
||||
log.Printf("webClickEvents bulk send err: %s", err)
|
||||
}
|
||||
if err := conn.webNetworkRequest.Send(); err != nil {
|
||||
log.Printf("webNetworkRequest bulk send err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -146,70 +146,24 @@ func (conn *Conn) InsertMetadata(sessionID uint64, keyNo uint, value string) err
|
|||
return conn.c.Exec(fmt.Sprintf(sqlRequest, keyNo), value, sessionID)
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertIssueEvent(sessionID uint64, projectID uint32, e *messages.IssueEvent) (err error) {
|
||||
tx, err := conn.c.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if rollbackErr := tx.rollback(); rollbackErr != nil {
|
||||
log.Printf("rollback err: %s", rollbackErr)
|
||||
}
|
||||
}
|
||||
}()
|
||||
func (conn *Conn) InsertIssueEvent(sessionID uint64, projectID uint32, e *messages.IssueEvent) error {
|
||||
issueID := hashid.IssueID(projectID, e)
|
||||
|
||||
// TEMP. TODO: nullable & json message field type
|
||||
payload := &e.Payload
|
||||
if *payload == "" || *payload == "{}" {
|
||||
payload = nil
|
||||
}
|
||||
|
||||
if err = tx.exec(`
|
||||
INSERT INTO issues (
|
||||
project_id, issue_id, type, context_string
|
||||
) (SELECT
|
||||
project_id, $2, $3, $4
|
||||
FROM sessions
|
||||
WHERE session_id = $1
|
||||
)ON CONFLICT DO NOTHING`,
|
||||
sessionID, issueID, e.Type, e.ContextString,
|
||||
); err != nil {
|
||||
return err
|
||||
if err := conn.webIssues.Append(projectID, issueID, e.Type, e.ContextString); err != nil {
|
||||
log.Printf("insert web issue err: %s", err)
|
||||
}
|
||||
if err = tx.exec(`
|
||||
INSERT INTO events_common.issues (
|
||||
session_id, issue_id, timestamp, seq_index, payload
|
||||
) VALUES (
|
||||
$1, $2, $3, $4, CAST($5 AS jsonb)
|
||||
)`,
|
||||
sessionID, issueID, e.Timestamp,
|
||||
truncSqIdx(e.MessageID),
|
||||
payload,
|
||||
); err != nil {
|
||||
return err
|
||||
if err := conn.webIssueEvents.Append(sessionID, issueID, e.Timestamp, truncSqIdx(e.MessageID), payload); err != nil {
|
||||
log.Printf("insert web issue event err: %s", err)
|
||||
}
|
||||
if err = tx.exec(`
|
||||
UPDATE sessions SET issue_score = issue_score + $2
|
||||
WHERE session_id = $1`,
|
||||
sessionID, getIssueScore(e),
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO: no redundancy. Deliver to UI in a different way
|
||||
conn.updateSessionIssues(sessionID, 0, getIssueScore(e))
|
||||
if e.Type == "custom" {
|
||||
if err = tx.exec(`
|
||||
INSERT INTO events_common.customs
|
||||
(session_id, seq_index, timestamp, name, payload, level)
|
||||
VALUES
|
||||
($1, $2, $3, left($4, 2700), $5, 'error')
|
||||
`,
|
||||
sessionID, truncSqIdx(e.MessageID), e.Timestamp, e.ContextString, e.Payload,
|
||||
); err != nil {
|
||||
return err
|
||||
if err := conn.webCustomEvents.Append(sessionID, truncSqIdx(e.MessageID), e.Timestamp, e.ContextString, e.Payload, "error"); err != nil {
|
||||
log.Printf("insert web custom event err: %s", err)
|
||||
}
|
||||
}
|
||||
err = tx.commit()
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,9 +9,13 @@ import (
|
|||
)
|
||||
|
||||
func (conn *Conn) InsertWebCustomEvent(sessionID uint64, projectID uint32, e *CustomEvent) error {
|
||||
err := conn.InsertCustomEvent(sessionID, e.Timestamp,
|
||||
truncSqIdx(e.MessageID),
|
||||
e.Name, e.Payload)
|
||||
err := conn.InsertCustomEvent(
|
||||
sessionID,
|
||||
uint64(e.Meta().Timestamp),
|
||||
truncSqIdx(e.Meta().Index),
|
||||
e.Name,
|
||||
e.Payload,
|
||||
)
|
||||
if err == nil {
|
||||
conn.insertAutocompleteValue(sessionID, projectID, "CUSTOM", e.Name)
|
||||
}
|
||||
|
|
@ -54,16 +58,12 @@ func (conn *Conn) InsertWebPageEvent(sessionID uint64, projectID uint32, e *Page
|
|||
}
|
||||
|
||||
func (conn *Conn) InsertWebClickEvent(sessionID uint64, projectID uint32, e *ClickEvent) error {
|
||||
sqlRequest := `
|
||||
INSERT INTO events.clicks
|
||||
(session_id, message_id, timestamp, label, selector, url)
|
||||
(SELECT
|
||||
$1, $2, $3, NULLIF($4, ''), $5, host || path
|
||||
FROM events.pages
|
||||
WHERE session_id = $1 AND timestamp <= $3 ORDER BY timestamp DESC LIMIT 1
|
||||
)
|
||||
`
|
||||
conn.batchQueue(sessionID, sqlRequest, sessionID, truncSqIdx(e.MessageID), e.Timestamp, e.Label, e.Selector)
|
||||
var host, path string
|
||||
host, path, _, _ = url.GetURLParts(e.Url)
|
||||
log.Println("insert web click:", host, path)
|
||||
if err := conn.webClickEvents.Append(sessionID, truncSqIdx(e.MessageID), e.Timestamp, e.Label, e.Selector, host+path, path); err != nil {
|
||||
log.Printf("insert web click err: %s", err)
|
||||
}
|
||||
// Accumulate session updates and exec inside batch with another sql commands
|
||||
conn.updateSessionEvents(sessionID, 1, 0)
|
||||
// Add new value set to autocomplete bulk
|
||||
|
|
@ -87,64 +87,24 @@ func (conn *Conn) InsertWebInputEvent(sessionID uint64, projectID uint32, e *Inp
|
|||
return nil
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertWebErrorEvent(sessionID uint64, projectID uint32, e *types.ErrorEvent) (err error) {
|
||||
tx, err := conn.c.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if rollbackErr := tx.rollback(); rollbackErr != nil {
|
||||
log.Printf("rollback err: %s", rollbackErr)
|
||||
}
|
||||
}
|
||||
}()
|
||||
func (conn *Conn) InsertWebErrorEvent(sessionID uint64, projectID uint32, e *types.ErrorEvent) error {
|
||||
errorID := e.ID(projectID)
|
||||
|
||||
if err = tx.exec(`
|
||||
INSERT INTO errors
|
||||
(error_id, project_id, source, name, message, payload)
|
||||
VALUES
|
||||
($1, $2, $3, $4, $5, $6::jsonb)
|
||||
ON CONFLICT DO NOTHING`,
|
||||
errorID, projectID, e.Source, e.Name, e.Message, e.Payload,
|
||||
); err != nil {
|
||||
return err
|
||||
if err := conn.webErrors.Append(errorID, projectID, e.Source, e.Name, e.Message, e.Payload); err != nil {
|
||||
log.Printf("insert web error err: %s", err)
|
||||
}
|
||||
if err = tx.exec(`
|
||||
INSERT INTO events.errors
|
||||
(session_id, message_id, timestamp, error_id)
|
||||
VALUES
|
||||
($1, $2, $3, $4)
|
||||
`,
|
||||
sessionID, truncSqIdx(e.MessageID), e.Timestamp, errorID,
|
||||
); err != nil {
|
||||
return err
|
||||
if err := conn.webErrorEvents.Append(sessionID, truncSqIdx(e.MessageID), e.Timestamp, errorID); err != nil {
|
||||
log.Printf("insert web error event err: %s", err)
|
||||
}
|
||||
if err = tx.exec(`
|
||||
UPDATE sessions SET errors_count = errors_count + 1, issue_score = issue_score + 1000
|
||||
WHERE session_id = $1`,
|
||||
sessionID,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
err = tx.commit()
|
||||
|
||||
// Insert tags
|
||||
sqlRequest := `
|
||||
INSERT INTO public.errors_tags (
|
||||
session_id, message_id, error_id, key, value
|
||||
) VALUES (
|
||||
$1, $2, $3, $4, $5
|
||||
) ON CONFLICT DO NOTHING`
|
||||
conn.updateSessionIssues(sessionID, 1, 1000)
|
||||
for key, value := range e.Tags {
|
||||
conn.batchQueue(sessionID, sqlRequest, sessionID, truncSqIdx(e.MessageID), errorID, key, value)
|
||||
if err := conn.webErrorTags.Append(sessionID, truncSqIdx(e.MessageID), errorID, key, value); err != nil {
|
||||
log.Printf("insert web error token err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertWebFetchEvent(sessionID uint64, projectID uint32, savePayload bool, e *FetchEvent) error {
|
||||
func (conn *Conn) InsertWebNetworkRequest(sessionID uint64, projectID uint32, savePayload bool, e *NetworkRequest) error {
|
||||
var request, response *string
|
||||
if savePayload {
|
||||
request = &e.Request
|
||||
|
|
@ -155,39 +115,18 @@ func (conn *Conn) InsertWebFetchEvent(sessionID uint64, projectID uint32, savePa
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sqlRequest := `
|
||||
INSERT INTO events_common.requests (
|
||||
session_id, timestamp, seq_index,
|
||||
url, host, path, query,
|
||||
request_body, response_body, status_code, method,
|
||||
duration, success
|
||||
) VALUES (
|
||||
$1, $2, $3,
|
||||
left($4, 2700), $5, $6, $7,
|
||||
$8, $9, $10::smallint, NULLIF($11, '')::http_method,
|
||||
$12, $13
|
||||
) ON CONFLICT DO NOTHING`
|
||||
conn.batchQueue(sessionID, sqlRequest,
|
||||
sessionID, e.Timestamp, truncSqIdx(e.MessageID),
|
||||
e.URL, host, path, query,
|
||||
request, response, e.Status, url.EnsureMethod(e.Method),
|
||||
e.Duration, e.Status < 400,
|
||||
)
|
||||
|
||||
// Record approximate message size
|
||||
conn.updateBatchSize(sessionID, len(sqlRequest)+len(e.URL)+len(host)+len(path)+len(query)+
|
||||
len(e.Request)+len(e.Response)+len(url.EnsureMethod(e.Method))+8*5+1)
|
||||
conn.webNetworkRequest.Append(sessionID, e.Meta().Timestamp, truncSqIdx(e.Meta().Index), e.URL, host, path, query,
|
||||
request, response, e.Status, url.EnsureMethod(e.Method), e.Duration, e.Status < 400)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertWebGraphQLEvent(sessionID uint64, projectID uint32, savePayload bool, e *GraphQLEvent) error {
|
||||
func (conn *Conn) InsertWebGraphQL(sessionID uint64, projectID uint32, savePayload bool, e *GraphQL) error {
|
||||
var request, response *string
|
||||
if savePayload {
|
||||
request = &e.Variables
|
||||
response = &e.Response
|
||||
}
|
||||
if err := conn.webGraphQLEvents.Append(sessionID, e.Timestamp, truncSqIdx(e.MessageID), e.OperationName, request, response); err != nil {
|
||||
if err := conn.webGraphQL.Append(sessionID, e.Meta().Timestamp, truncSqIdx(e.Meta().Index), e.OperationName, request, response); err != nil {
|
||||
log.Printf("insert web graphQL event err: %s", err)
|
||||
}
|
||||
conn.insertAutocompleteValue(sessionID, projectID, "GRAPHQL", e.OperationName)
|
||||
|
|
|
|||
|
|
@ -1,12 +1,14 @@
|
|||
package postgres
|
||||
|
||||
// Mechanism of combination several session updates into one
|
||||
const sessionUpdateReq = `UPDATE sessions SET pages_count = pages_count + $1, events_count = events_count + $2 WHERE session_id = $3`
|
||||
const sessionUpdateReq = `UPDATE sessions SET pages_count = pages_count + $1, events_count = events_count + $2, errors_count = errors_count + $3, issue_score = issue_score + $4 WHERE session_id = $5`
|
||||
|
||||
type sessionUpdates struct {
|
||||
sessionID uint64
|
||||
pages int
|
||||
events int
|
||||
errors int
|
||||
issues int
|
||||
}
|
||||
|
||||
func NewSessionUpdates(sessionID uint64) *sessionUpdates {
|
||||
|
|
@ -14,17 +16,24 @@ func NewSessionUpdates(sessionID uint64) *sessionUpdates {
|
|||
sessionID: sessionID,
|
||||
pages: 0,
|
||||
events: 0,
|
||||
errors: 0,
|
||||
issues: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func (su *sessionUpdates) add(pages, events int) {
|
||||
func (su *sessionUpdates) addEvents(pages, events int) {
|
||||
su.pages += pages
|
||||
su.events += events
|
||||
}
|
||||
|
||||
func (su *sessionUpdates) addIssues(errors, issues int) {
|
||||
su.errors += errors
|
||||
su.issues += issues
|
||||
}
|
||||
|
||||
func (su *sessionUpdates) request() (string, []interface{}) {
|
||||
if su.pages == 0 && su.events == 0 {
|
||||
return "", nil
|
||||
}
|
||||
return sessionUpdateReq, []interface{}{su.pages, su.events, su.sessionID}
|
||||
return sessionUpdateReq, []interface{}{su.pages, su.events, su.errors, su.issues, su.sessionID}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -69,13 +69,6 @@ func (b *EventMapper) Handle(message Message, messageID uint64, timestamp uint64
|
|||
Type: getResourceType(msg.Initiator, msg.URL),
|
||||
Success: msg.Duration != 0,
|
||||
}
|
||||
case *RawCustomEvent:
|
||||
return &CustomEvent{
|
||||
MessageID: messageID,
|
||||
Timestamp: timestamp,
|
||||
Name: msg.Name,
|
||||
Payload: msg.Payload,
|
||||
}
|
||||
case *CustomIssue:
|
||||
return &IssueEvent{
|
||||
Type: "custom",
|
||||
|
|
@ -84,32 +77,6 @@ func (b *EventMapper) Handle(message Message, messageID uint64, timestamp uint64
|
|||
ContextString: msg.Name,
|
||||
Payload: msg.Payload,
|
||||
}
|
||||
case *Fetch:
|
||||
return &FetchEvent{
|
||||
MessageID: messageID,
|
||||
Timestamp: msg.Timestamp,
|
||||
Method: msg.Method,
|
||||
URL: msg.URL,
|
||||
Request: msg.Request,
|
||||
Response: msg.Response,
|
||||
Status: msg.Status,
|
||||
Duration: msg.Duration,
|
||||
}
|
||||
case *GraphQL:
|
||||
return &GraphQLEvent{
|
||||
MessageID: messageID,
|
||||
Timestamp: timestamp,
|
||||
OperationKind: msg.OperationKind,
|
||||
OperationName: msg.OperationName,
|
||||
Variables: msg.Variables,
|
||||
Response: msg.Response,
|
||||
}
|
||||
case *StateAction:
|
||||
return &StateActionEvent{
|
||||
MessageID: messageID,
|
||||
Timestamp: timestamp,
|
||||
Type: msg.Type,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ import (
|
|||
/*
|
||||
Handler name: NetworkIssue
|
||||
Input events: ResourceTiming,
|
||||
Fetch
|
||||
NetworkRequest
|
||||
Output event: IssueEvent
|
||||
*/
|
||||
|
||||
|
|
@ -19,21 +19,7 @@ func (f *NetworkIssueDetector) Build() Message {
|
|||
|
||||
func (f *NetworkIssueDetector) Handle(message Message, messageID uint64, timestamp uint64) Message {
|
||||
switch msg := message.(type) {
|
||||
// case *ResourceTiming:
|
||||
// success := msg.Duration != 0 // The only available way here
|
||||
// if !success {
|
||||
// issueType := "missing_resource"
|
||||
// if msg.Initiator == "fetch" || msg.Initiator == "xmlhttprequest" {
|
||||
// issueType = "bad_request"
|
||||
// }
|
||||
// return &IssueEvent{
|
||||
// Type: issueType,
|
||||
// MessageID: messageID,
|
||||
// Timestamp: msg.Timestamp,
|
||||
// ContextString: msg.URL,
|
||||
// }
|
||||
// }
|
||||
case *Fetch:
|
||||
case *NetworkRequest:
|
||||
if msg.Status >= 400 {
|
||||
return &IssueEvent{
|
||||
Type: "bad_request",
|
||||
|
|
|
|||
|
|
@ -1,79 +0,0 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
type partitionStats struct {
|
||||
maxts int64
|
||||
mints int64
|
||||
lastts int64
|
||||
lastID uint64
|
||||
count int
|
||||
}
|
||||
|
||||
// Update partition statistic
|
||||
func (prt *partitionStats) update(m *messages.BatchInfo) {
|
||||
if prt.maxts < m.Timestamp() {
|
||||
prt.maxts = m.Timestamp()
|
||||
}
|
||||
if prt.mints > m.Timestamp() || prt.mints == 0 {
|
||||
prt.mints = m.Timestamp()
|
||||
}
|
||||
prt.lastts = m.Timestamp()
|
||||
prt.lastID = m.ID()
|
||||
prt.count += 1
|
||||
}
|
||||
|
||||
type queueStats struct {
|
||||
prts map[int32]*partitionStats
|
||||
tick <-chan time.Time
|
||||
}
|
||||
|
||||
type QueueStats interface {
|
||||
Collect(msg messages.Message)
|
||||
}
|
||||
|
||||
func NewQueueStats(sec int) *queueStats {
|
||||
return &queueStats{
|
||||
prts: make(map[int32]*partitionStats),
|
||||
tick: time.Tick(time.Duration(sec) * time.Second),
|
||||
}
|
||||
}
|
||||
|
||||
// Collect writes new data to partition statistic
|
||||
func (qs *queueStats) Collect(msg messages.Message) {
|
||||
prti := int32(msg.SessionID() % 16) // TODO use GetKeyPartition from kafka/key.go
|
||||
prt, ok := qs.prts[prti]
|
||||
if !ok {
|
||||
qs.prts[prti] = &partitionStats{}
|
||||
prt = qs.prts[prti]
|
||||
}
|
||||
prt.update(msg.Meta().Batch())
|
||||
|
||||
select {
|
||||
case <-qs.tick:
|
||||
qs.log()
|
||||
qs.reset()
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Print to console collected statistics
|
||||
func (qs *queueStats) log() {
|
||||
s := "Queue Statistics: "
|
||||
for i, p := range qs.prts {
|
||||
s = fmt.Sprintf("%v | %v:: lastTS %v, lastID %v, count %v, maxTS %v, minTS %v",
|
||||
s, i, p.lastts, p.lastID, p.count, p.maxts, p.mints)
|
||||
}
|
||||
log.Println(s)
|
||||
}
|
||||
|
||||
// Clear all queue partitions
|
||||
func (qs *queueStats) reset() {
|
||||
qs.prts = make(map[int32]*partitionStats)
|
||||
}
|
||||
119
backend/pkg/messages/bytes.go
Normal file
119
backend/pkg/messages/bytes.go
Normal file
|
|
@ -0,0 +1,119 @@
|
|||
package messages
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
type BytesReader interface {
|
||||
ReadSize() (uint64, error)
|
||||
ReadByte() (byte, error)
|
||||
ReadUint() (uint64, error)
|
||||
ReadInt() (int64, error)
|
||||
ReadBoolean() (bool, error)
|
||||
ReadString() (string, error)
|
||||
Data() []byte
|
||||
Pointer() int64
|
||||
SetPointer(p int64)
|
||||
}
|
||||
|
||||
type bytesReaderImpl struct {
|
||||
data []byte
|
||||
curr int64
|
||||
}
|
||||
|
||||
func NewBytesReader(data []byte) BytesReader {
|
||||
return &bytesReaderImpl{
|
||||
data: data,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *bytesReaderImpl) ReadSize() (uint64, error) {
|
||||
if len(m.data)-int(m.curr) < 3 {
|
||||
return 0, fmt.Errorf("out of range")
|
||||
}
|
||||
var size uint64
|
||||
for i, b := range m.data[m.curr : m.curr+3] {
|
||||
size += uint64(b) << (8 * i)
|
||||
}
|
||||
m.curr += 3
|
||||
return size, nil
|
||||
}
|
||||
|
||||
func (m *bytesReaderImpl) ReadByte() (byte, error) {
|
||||
if int(m.curr) >= len(m.data) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
m.curr++
|
||||
return m.data[m.curr-1], nil
|
||||
}
|
||||
|
||||
func (m *bytesReaderImpl) ReadUint() (uint64, error) {
|
||||
var x uint64
|
||||
var s uint
|
||||
i := 0
|
||||
for {
|
||||
b, err := m.ReadByte()
|
||||
if err != nil {
|
||||
return x, err
|
||||
}
|
||||
if b < 0x80 {
|
||||
if i > 9 || i == 9 && b > 1 {
|
||||
return x, errors.New("uint overflow")
|
||||
}
|
||||
return x | uint64(b)<<s, nil
|
||||
}
|
||||
x |= uint64(b&0x7f) << s
|
||||
s += 7
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
func (m *bytesReaderImpl) ReadInt() (int64, error) {
|
||||
ux, err := m.ReadUint()
|
||||
x := int64(ux >> 1)
|
||||
if err != nil {
|
||||
return x, err
|
||||
}
|
||||
if ux&1 != 0 {
|
||||
x = ^x
|
||||
}
|
||||
return x, err
|
||||
}
|
||||
|
||||
func (m *bytesReaderImpl) ReadBoolean() (bool, error) {
|
||||
val, err := m.ReadByte()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return val == 1, nil
|
||||
}
|
||||
|
||||
func (m *bytesReaderImpl) ReadString() (string, error) {
|
||||
l, err := m.ReadUint()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if l > 10e6 {
|
||||
return "", errors.New("too long string")
|
||||
}
|
||||
if len(m.data)-int(m.curr) < int(l) {
|
||||
return "", fmt.Errorf("out of range")
|
||||
}
|
||||
str := string(m.data[m.curr : int(m.curr)+int(l)])
|
||||
m.curr += int64(l)
|
||||
return str, nil
|
||||
}
|
||||
|
||||
func (m *bytesReaderImpl) Data() []byte {
|
||||
return m.data
|
||||
}
|
||||
|
||||
func (m *bytesReaderImpl) Pointer() int64 {
|
||||
return m.curr
|
||||
}
|
||||
|
||||
func (m *bytesReaderImpl) SetPointer(p int64) {
|
||||
m.curr = p
|
||||
}
|
||||
22
backend/pkg/messages/cache.go
Normal file
22
backend/pkg/messages/cache.go
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
package messages
|
||||
|
||||
type pageLocations struct {
|
||||
urls map[uint64]string
|
||||
}
|
||||
|
||||
func NewPageLocations() *pageLocations {
|
||||
return &pageLocations{urls: make(map[uint64]string)}
|
||||
}
|
||||
|
||||
func (p *pageLocations) Set(sessID uint64, url string) {
|
||||
p.urls[sessID] = url
|
||||
}
|
||||
|
||||
func (p *pageLocations) Get(sessID uint64) string {
|
||||
url := p.urls[sessID]
|
||||
return url
|
||||
}
|
||||
|
||||
func (p *pageLocations) Delete(sessID uint64) {
|
||||
delete(p.urls, sessID)
|
||||
}
|
||||
|
|
@ -2,7 +2,7 @@
|
|||
package messages
|
||||
|
||||
func IsReplayerType(id int) bool {
|
||||
return 80 != id && 81 != id && 82 != id && 1 != id && 3 != id && 17 != id && 23 != id && 24 != id && 25 != id && 26 != id && 27 != id && 28 != id && 29 != id && 30 != id && 31 != id && 32 != id && 33 != id && 35 != id && 36 != id && 42 != id && 43 != id && 50 != id && 51 != id && 52 != id && 53 != id && 56 != id && 62 != id && 63 != id && 64 != id && 66 != id && 78 != id && 126 != id && 127 != id && 107 != id && 91 != id && 92 != id && 94 != id && 95 != id && 97 != id && 98 != id && 99 != id && 101 != id && 104 != id && 110 != id && 111 != id
|
||||
return 80 != id && 81 != id && 82 != id && 1 != id && 3 != id && 17 != id && 23 != id && 24 != id && 25 != id && 26 != id && 27 != id && 28 != id && 29 != id && 30 != id && 31 != id && 32 != id && 33 != id && 35 != id && 42 != id && 52 != id && 56 != id && 62 != id && 63 != id && 64 != id && 66 != id && 78 != id && 126 != id && 127 != id && 107 != id && 91 != id && 92 != id && 94 != id && 95 != id && 97 != id && 98 != id && 99 != id && 101 != id && 104 != id && 110 != id && 111 != id
|
||||
}
|
||||
|
||||
func IsIOSType(id int) bool {
|
||||
|
|
|
|||
|
|
@ -1,9 +1,7 @@
|
|||
package messages
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
)
|
||||
|
||||
|
|
@ -26,10 +24,15 @@ type messageIteratorImpl struct {
|
|||
broken bool
|
||||
messageInfo *message
|
||||
batchInfo *BatchInfo
|
||||
urls *pageLocations
|
||||
}
|
||||
|
||||
func NewMessageIterator(messageHandler MessageHandler, messageFilter []int, autoDecode bool) MessageIterator {
|
||||
iter := &messageIteratorImpl{handler: messageHandler, autoDecode: autoDecode}
|
||||
iter := &messageIteratorImpl{
|
||||
handler: messageHandler,
|
||||
autoDecode: autoDecode,
|
||||
urls: NewPageLocations(),
|
||||
}
|
||||
if len(messageFilter) != 0 {
|
||||
filter := make(map[int]struct{}, len(messageFilter))
|
||||
for _, msgType := range messageFilter {
|
||||
|
|
@ -54,76 +57,32 @@ func (i *messageIteratorImpl) prepareVars(batchInfo *BatchInfo) {
|
|||
}
|
||||
|
||||
func (i *messageIteratorImpl) Iterate(batchData []byte, batchInfo *BatchInfo) {
|
||||
// Create new message reader
|
||||
reader := NewMessageReader(batchData)
|
||||
|
||||
// Pre-decode batch data
|
||||
if err := reader.Parse(); err != nil {
|
||||
log.Printf("pre-decode batch err: %s, info: %s", err, batchInfo.Info())
|
||||
return
|
||||
}
|
||||
|
||||
// Prepare iterator before processing messages in batch
|
||||
i.prepareVars(batchInfo)
|
||||
|
||||
// Initialize batch reader
|
||||
reader := bytes.NewReader(batchData)
|
||||
|
||||
// Process until end of batch or parsing error
|
||||
for {
|
||||
for reader.Next() {
|
||||
// Increase message index (can be overwritten by batch info message)
|
||||
i.messageInfo.Index++
|
||||
|
||||
if i.broken {
|
||||
log.Printf("skipping broken batch, info: %s", i.batchInfo.Info())
|
||||
return
|
||||
}
|
||||
|
||||
if i.canSkip {
|
||||
if _, err := reader.Seek(int64(i.size), io.SeekCurrent); err != nil {
|
||||
log.Printf("can't skip message: %s, info: %s", err, i.batchInfo.Info())
|
||||
return
|
||||
}
|
||||
}
|
||||
i.canSkip = false
|
||||
|
||||
// Read message type
|
||||
msgType, err := ReadUint(reader)
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
log.Printf("can't read message type: %s, info: %s", err, i.batchInfo.Info())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var msg Message
|
||||
// Read message body (and decode if protocol version less than 1)
|
||||
if i.version > 0 && messageHasSize(msgType) {
|
||||
// Read message size if it is a new protocol version
|
||||
i.size, err = ReadSize(reader)
|
||||
if err != nil {
|
||||
log.Printf("can't read message size: %s, info: %s", err, i.batchInfo.Info())
|
||||
return
|
||||
}
|
||||
msg = &RawMessage{
|
||||
tp: msgType,
|
||||
size: i.size,
|
||||
reader: reader,
|
||||
raw: batchData,
|
||||
skipped: &i.canSkip,
|
||||
broken: &i.broken,
|
||||
meta: i.messageInfo,
|
||||
}
|
||||
i.canSkip = true
|
||||
} else {
|
||||
msg, err = ReadMessage(msgType, reader)
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
log.Printf("can't read message body: %s, info: %s", err, i.batchInfo.Info())
|
||||
}
|
||||
return
|
||||
}
|
||||
msg = transformDeprecated(msg)
|
||||
}
|
||||
msg := reader.Message()
|
||||
|
||||
// Preprocess "system" messages
|
||||
if _, ok := i.preFilter[msg.TypeID()]; ok {
|
||||
msg = msg.Decode()
|
||||
if msg == nil {
|
||||
log.Printf("decode error, type: %d, info: %s", msgType, i.batchInfo.Info())
|
||||
log.Printf("decode error, type: %d, info: %s", msg.TypeID(), i.batchInfo.Info())
|
||||
return
|
||||
}
|
||||
msg = transformDeprecated(msg)
|
||||
if err := i.preprocessing(msg); err != nil {
|
||||
log.Printf("message preprocessing err: %s", err)
|
||||
return
|
||||
|
|
@ -140,7 +99,7 @@ func (i *messageIteratorImpl) Iterate(batchData []byte, batchInfo *BatchInfo) {
|
|||
if i.autoDecode {
|
||||
msg = msg.Decode()
|
||||
if msg == nil {
|
||||
log.Printf("decode error, type: %d, info: %s", msgType, i.batchInfo.Info())
|
||||
log.Printf("decode error, type: %d, info: %s", msg.TypeID(), i.batchInfo.Info())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
@ -171,7 +130,7 @@ func (i *messageIteratorImpl) preprocessing(msg Message) error {
|
|||
if m.Timestamp == 0 {
|
||||
i.zeroTsLog("BatchMetadata")
|
||||
}
|
||||
i.messageInfo.Url = m.Url
|
||||
i.messageInfo.Url = m.Location
|
||||
i.version = m.Version
|
||||
i.batchInfo.version = m.Version
|
||||
|
||||
|
|
@ -184,6 +143,10 @@ func (i *messageIteratorImpl) preprocessing(msg Message) error {
|
|||
if m.Timestamp == 0 {
|
||||
i.zeroTsLog("BatchMeta")
|
||||
}
|
||||
// Try to get saved session's page url
|
||||
if savedURL := i.urls.Get(i.messageInfo.batch.sessionID); savedURL != "" {
|
||||
i.messageInfo.Url = savedURL
|
||||
}
|
||||
|
||||
case *Timestamp:
|
||||
i.messageInfo.Timestamp = int64(m.Timestamp)
|
||||
|
|
@ -204,9 +167,13 @@ func (i *messageIteratorImpl) preprocessing(msg Message) error {
|
|||
if m.Timestamp == 0 {
|
||||
i.zeroTsLog("SessionEnd")
|
||||
}
|
||||
// Delete session from urls cache layer
|
||||
i.urls.Delete(i.messageInfo.batch.sessionID)
|
||||
|
||||
case *SetPageLocation:
|
||||
i.messageInfo.Url = m.URL
|
||||
// Save session page url in cache for using in next batches
|
||||
i.urls.Set(i.messageInfo.batch.sessionID, m.URL)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -14,6 +14,17 @@ func transformDeprecated(msg Message) Message {
|
|||
Timestamp: m.Timestamp,
|
||||
EncryptionKey: "",
|
||||
}
|
||||
case *Fetch:
|
||||
return &NetworkRequest{
|
||||
Type: "fetch",
|
||||
Method: m.Method,
|
||||
URL: m.URL,
|
||||
Request: m.Request,
|
||||
Response: m.Response,
|
||||
Status: m.Status,
|
||||
Timestamp: m.Timestamp,
|
||||
Duration: m.Duration,
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ import "fmt"
|
|||
|
||||
type Message interface {
|
||||
Encode() []byte
|
||||
EncodeWithIndex() []byte
|
||||
Decode() Message
|
||||
TypeID() int
|
||||
Meta() *message
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -1,11 +1,9 @@
|
|||
package messages
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -21,19 +19,6 @@ func ReadByte(reader io.Reader) (byte, error) {
|
|||
return one[0], nil
|
||||
}
|
||||
|
||||
func ReadData(reader io.Reader) ([]byte, error) {
|
||||
n, err := ReadUint(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p := make([]byte, n)
|
||||
_, err = io.ReadFull(reader, p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func ReadUint(reader io.Reader) (uint64, error) {
|
||||
var x uint64
|
||||
var s uint
|
||||
|
|
@ -55,6 +40,16 @@ func ReadUint(reader io.Reader) (uint64, error) {
|
|||
}
|
||||
}
|
||||
|
||||
func WriteUint(v uint64, buf []byte, p int) int {
|
||||
for v >= 0x80 {
|
||||
buf[p] = byte(v) | 0x80
|
||||
v >>= 7
|
||||
p++
|
||||
}
|
||||
buf[p] = byte(v)
|
||||
return p + 1
|
||||
}
|
||||
|
||||
func ReadInt(reader io.Reader) (int64, error) {
|
||||
ux, err := ReadUint(reader)
|
||||
x := int64(ux >> 1)
|
||||
|
|
@ -67,6 +62,14 @@ func ReadInt(reader io.Reader) (int64, error) {
|
|||
return x, err
|
||||
}
|
||||
|
||||
func WriteInt(v int64, buf []byte, p int) int {
|
||||
uv := uint64(v) << 1
|
||||
if v < 0 {
|
||||
uv = ^uv
|
||||
}
|
||||
return WriteUint(uv, buf, p)
|
||||
}
|
||||
|
||||
func ReadBoolean(reader io.Reader) (bool, error) {
|
||||
p := make([]byte, 1)
|
||||
_, err := io.ReadFull(reader, p)
|
||||
|
|
@ -76,6 +79,15 @@ func ReadBoolean(reader io.Reader) (bool, error) {
|
|||
return p[0] == 1, nil
|
||||
}
|
||||
|
||||
func WriteBoolean(v bool, buf []byte, p int) int {
|
||||
if v {
|
||||
buf[p] = 1
|
||||
} else {
|
||||
buf[p] = 0
|
||||
}
|
||||
return p + 1
|
||||
}
|
||||
|
||||
func ReadString(reader io.Reader) (string, error) {
|
||||
l, err := ReadUint(reader)
|
||||
if err != nil {
|
||||
|
|
@ -92,73 +104,11 @@ func ReadString(reader io.Reader) (string, error) {
|
|||
return string(buf), nil
|
||||
}
|
||||
|
||||
func ReadJson(reader io.Reader) (interface{}, error) {
|
||||
jsonData, err := ReadData(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var v interface{}
|
||||
if err = json.Unmarshal(jsonData, &v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func WriteUint(v uint64, buf []byte, p int) int {
|
||||
for v >= 0x80 {
|
||||
buf[p] = byte(v) | 0x80
|
||||
v >>= 7
|
||||
p++
|
||||
}
|
||||
buf[p] = byte(v)
|
||||
return p + 1
|
||||
}
|
||||
|
||||
func WriteInt(v int64, buf []byte, p int) int {
|
||||
uv := uint64(v) << 1
|
||||
if v < 0 {
|
||||
uv = ^uv
|
||||
}
|
||||
return WriteUint(uv, buf, p)
|
||||
}
|
||||
|
||||
func WriteBoolean(v bool, buf []byte, p int) int {
|
||||
if v {
|
||||
buf[p] = 1
|
||||
} else {
|
||||
buf[p] = 0
|
||||
}
|
||||
return p + 1
|
||||
}
|
||||
|
||||
func WriteString(str string, buf []byte, p int) int {
|
||||
p = WriteUint(uint64(len(str)), buf, p)
|
||||
return p + copy(buf[p:], str)
|
||||
}
|
||||
|
||||
func WriteData(data []byte, buf []byte, p int) int {
|
||||
p = WriteUint(uint64(len(data)), buf, p)
|
||||
return p + copy(buf[p:], data)
|
||||
}
|
||||
|
||||
func WriteJson(v interface{}, buf []byte, p int) int {
|
||||
data, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
log.Printf("JSON encoding error: %v", err)
|
||||
return WriteString("null", buf, p)
|
||||
}
|
||||
return WriteData(data, buf, p)
|
||||
}
|
||||
|
||||
func WriteSize(size uint64, buf []byte, p int) {
|
||||
var m uint64 = 255
|
||||
for i := 0; i < 3; i++ {
|
||||
buf[p+i] = byte(size & m)
|
||||
size = size >> 8
|
||||
}
|
||||
fmt.Println(buf)
|
||||
}
|
||||
|
||||
func ReadSize(reader io.Reader) (uint64, error) {
|
||||
n, err := io.ReadFull(reader, three)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -1,75 +1,23 @@
|
|||
package messages
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"log"
|
||||
)
|
||||
|
||||
// RawMessage is a not decoded message
|
||||
type RawMessage struct {
|
||||
tp uint64
|
||||
size uint64
|
||||
data []byte
|
||||
reader *bytes.Reader
|
||||
raw []byte
|
||||
meta *message
|
||||
encoded bool
|
||||
skipped *bool
|
||||
broken *bool
|
||||
tp uint64
|
||||
data []byte
|
||||
broken *bool
|
||||
meta *message
|
||||
}
|
||||
|
||||
func (m *RawMessage) Encode() []byte {
|
||||
if m.encoded {
|
||||
return m.data
|
||||
}
|
||||
// Try to avoid EOF error
|
||||
if m.reader.Len() < int(m.size) {
|
||||
return nil
|
||||
}
|
||||
// Get current batch position
|
||||
currPos, err := m.reader.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
log.Printf("can't get current batch position: %s", err)
|
||||
return nil
|
||||
}
|
||||
// "Move" message type
|
||||
if currPos == 0 {
|
||||
log.Printf("can't move message type, curr position = %d", currPos)
|
||||
return nil
|
||||
}
|
||||
// Dirty hack to avoid extra memory allocation
|
||||
m.raw[currPos-1] = uint8(m.tp)
|
||||
m.data = m.raw[currPos-1 : currPos+int64(m.size)]
|
||||
m.encoded = true
|
||||
return m.data
|
||||
}
|
||||
|
||||
func (m *RawMessage) EncodeWithIndex() []byte {
|
||||
if !m.encoded {
|
||||
if m.Encode() == nil {
|
||||
*m.broken = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if IsIOSType(int(m.tp)) {
|
||||
return m.data
|
||||
}
|
||||
data := make([]byte, len(m.data)+8)
|
||||
copy(data[8:], m.data[:])
|
||||
binary.LittleEndian.PutUint64(data[0:], m.Meta().Index)
|
||||
return data
|
||||
}
|
||||
|
||||
func (m *RawMessage) Decode() Message {
|
||||
if !m.encoded {
|
||||
if m.Encode() == nil {
|
||||
*m.broken = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
msg, err := ReadMessage(m.tp, bytes.NewReader(m.data[1:]))
|
||||
msg, err := ReadMessage(m.tp, NewBytesReader(m.data[1:]))
|
||||
if err != nil {
|
||||
log.Printf("decode err: %s", err)
|
||||
*m.broken = true
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
166
backend/pkg/messages/reader.go
Normal file
166
backend/pkg/messages/reader.go
Normal file
|
|
@ -0,0 +1,166 @@
|
|||
package messages
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
type MessageReader interface {
|
||||
Parse() (err error)
|
||||
Next() bool
|
||||
Message() Message
|
||||
}
|
||||
|
||||
func NewMessageReader(data []byte) MessageReader {
|
||||
return &messageReaderImpl{
|
||||
data: data,
|
||||
reader: NewBytesReader(data),
|
||||
list: make([]*MessageMeta, 0, 1024),
|
||||
}
|
||||
}
|
||||
|
||||
type MessageMeta struct {
|
||||
msgType uint64
|
||||
msgSize uint64
|
||||
msgFrom uint64
|
||||
}
|
||||
|
||||
type messageReaderImpl struct {
|
||||
data []byte
|
||||
reader BytesReader
|
||||
msgType uint64
|
||||
msgSize uint64
|
||||
msgBody []byte
|
||||
version int
|
||||
broken bool
|
||||
message Message
|
||||
err error
|
||||
list []*MessageMeta
|
||||
listPtr int
|
||||
}
|
||||
|
||||
func (m *messageReaderImpl) Parse() (err error) {
|
||||
m.listPtr = 0
|
||||
m.list = m.list[:0]
|
||||
m.broken = false
|
||||
for {
|
||||
// Try to read and decode message type, message size and check range in
|
||||
m.msgType, err = m.reader.ReadUint()
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
return fmt.Errorf("read message type err: %s", err)
|
||||
}
|
||||
// Reached the end of batch
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read message body (and decode if protocol version less than 1)
|
||||
if m.version > 0 && messageHasSize(m.msgType) {
|
||||
// Read message size if it is a new protocol version
|
||||
m.msgSize, err = m.reader.ReadSize()
|
||||
if err != nil {
|
||||
return fmt.Errorf("read message size err: %s", err)
|
||||
}
|
||||
|
||||
// Try to avoid EOF error
|
||||
|
||||
curr := m.reader.Pointer()
|
||||
if len(m.data)-int(curr) < int(m.msgSize) {
|
||||
return fmt.Errorf("can't read message body")
|
||||
}
|
||||
|
||||
// Dirty hack to avoid extra memory allocation
|
||||
m.data[curr-1] = uint8(m.msgType)
|
||||
|
||||
// Add message meta to list
|
||||
m.list = append(m.list, &MessageMeta{
|
||||
msgType: m.msgType,
|
||||
msgSize: m.msgSize + 1,
|
||||
msgFrom: uint64(curr - 1),
|
||||
})
|
||||
|
||||
// Update data pointer
|
||||
m.reader.SetPointer(curr + int64(m.msgSize))
|
||||
} else {
|
||||
from := m.reader.Pointer() - 1
|
||||
msg, err := ReadMessage(m.msgType, m.reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read message err: %s", err)
|
||||
}
|
||||
if m.msgType == MsgBatchMeta || m.msgType == MsgBatchMetadata {
|
||||
if len(m.list) > 0 {
|
||||
return fmt.Errorf("batch meta not at the start of batch")
|
||||
}
|
||||
switch message := msg.(type) {
|
||||
case *BatchMetadata:
|
||||
m.version = int(message.Version)
|
||||
case *BatchMeta:
|
||||
m.version = 0
|
||||
}
|
||||
if m.version != 1 {
|
||||
// Unsupported tracker version, reset reader
|
||||
m.list = m.list[:0]
|
||||
m.reader.SetPointer(0)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Add message meta to list
|
||||
m.list = append(m.list, &MessageMeta{
|
||||
msgType: m.msgType,
|
||||
msgSize: uint64(m.reader.Pointer() - from),
|
||||
msgFrom: uint64(from),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *messageReaderImpl) Next() bool {
|
||||
if m.broken {
|
||||
return false
|
||||
}
|
||||
|
||||
// For new version of tracker
|
||||
if len(m.list) > 0 {
|
||||
if m.listPtr >= len(m.list) {
|
||||
return false
|
||||
}
|
||||
|
||||
meta := m.list[m.listPtr]
|
||||
m.listPtr++
|
||||
m.message = &RawMessage{
|
||||
tp: meta.msgType,
|
||||
data: m.data[meta.msgFrom : meta.msgFrom+meta.msgSize],
|
||||
broken: &m.broken,
|
||||
meta: &message{},
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// For prev version of tracker
|
||||
var msg Message
|
||||
var err error
|
||||
|
||||
// Try to read and decode message type, message size and check range in
|
||||
m.msgType, err = m.reader.ReadUint()
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
m.err = fmt.Errorf("read message type err: %s", err)
|
||||
}
|
||||
// Reached the end of batch
|
||||
return false
|
||||
}
|
||||
|
||||
// Read and decode message
|
||||
msg, err = ReadMessage(m.msgType, m.reader)
|
||||
if err != nil {
|
||||
m.err = fmt.Errorf("read message err: %s", err)
|
||||
return false
|
||||
}
|
||||
m.message = msg
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *messageReaderImpl) Message() Message {
|
||||
return m.message
|
||||
}
|
||||
|
|
@ -38,7 +38,7 @@ func New(name string) *Metrics {
|
|||
// initPrometheusDataExporter allows to use collected metrics in prometheus
|
||||
func (m *Metrics) initPrometheusDataExporter() {
|
||||
config := prometheus.Config{
|
||||
DefaultHistogramBoundaries: []float64{1, 2, 5, 10, 20, 50},
|
||||
DefaultHistogramBoundaries: []float64{1, 2, 5, 10, 20, 50, 100, 250, 500, 1000},
|
||||
}
|
||||
c := controller.New(
|
||||
processor.NewFactory(
|
||||
|
|
@ -76,8 +76,8 @@ Counter is a synchronous instrument that measures additive non-decreasing values
|
|||
*/
|
||||
|
||||
func (m *Metrics) RegisterCounter(name string) (syncfloat64.Counter, error) {
|
||||
if _, ok := m.counters[name]; ok {
|
||||
return nil, fmt.Errorf("counter %s already exists", name)
|
||||
if counter, ok := m.counters[name]; ok {
|
||||
return counter, nil
|
||||
}
|
||||
counter, err := m.meter.SyncFloat64().Counter(name)
|
||||
if err != nil {
|
||||
|
|
@ -100,8 +100,8 @@ for example, the number of:
|
|||
*/
|
||||
|
||||
func (m *Metrics) RegisterUpDownCounter(name string) (syncfloat64.UpDownCounter, error) {
|
||||
if _, ok := m.upDownCounters[name]; ok {
|
||||
return nil, fmt.Errorf("upDownCounter %s already exists", name)
|
||||
if counter, ok := m.upDownCounters[name]; ok {
|
||||
return counter, nil
|
||||
}
|
||||
counter, err := m.meter.SyncFloat64().UpDownCounter(name)
|
||||
if err != nil {
|
||||
|
|
@ -122,8 +122,8 @@ Histogram is a synchronous instrument that produces a histogram from recorded va
|
|||
*/
|
||||
|
||||
func (m *Metrics) RegisterHistogram(name string) (syncfloat64.Histogram, error) {
|
||||
if _, ok := m.histograms[name]; ok {
|
||||
return nil, fmt.Errorf("histogram %s already exists", name)
|
||||
if hist, ok := m.histograms[name]; ok {
|
||||
return hist, nil
|
||||
}
|
||||
hist, err := m.meter.SyncFloat64().Histogram(name)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -12,9 +12,11 @@ from starlette.responses import StreamingResponse, JSONResponse
|
|||
from chalicelib.core import traces
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils import events_queue
|
||||
from routers import core, core_dynamic, ee, saml
|
||||
from routers.crons import core_crons
|
||||
from routers.crons import core_dynamic_crons
|
||||
from routers.crons import ee_crons
|
||||
from routers.subs import insights, metrics, v1_api_ee
|
||||
from routers.subs import v1_api
|
||||
|
||||
|
|
@ -80,9 +82,10 @@ app.queue_system = queue.Queue()
|
|||
async def startup():
|
||||
logging.info(">>>>> starting up <<<<<")
|
||||
await pg_client.init()
|
||||
await events_queue.init()
|
||||
app.schedule.start()
|
||||
|
||||
for job in core_crons.cron_jobs + core_dynamic_crons.cron_jobs + traces.cron_jobs:
|
||||
for job in core_crons.cron_jobs + core_dynamic_crons.cron_jobs + traces.cron_jobs + ee_crons.ee_cron_jobs:
|
||||
app.schedule.add_job(id=job["func"].__name__, **job)
|
||||
|
||||
ap_logger.info(">Scheduled jobs:")
|
||||
|
|
@ -95,6 +98,7 @@ async def shutdown():
|
|||
logging.info(">>>>> shutting down <<<<<")
|
||||
app.schedule.shutdown(wait=True)
|
||||
await traces.process_traces_queue()
|
||||
await events_queue.terminate()
|
||||
await pg_client.terminate()
|
||||
|
||||
|
||||
|
|
|
|||
12
ee/api/chalicelib/core/signals.py
Normal file
12
ee/api/chalicelib/core/signals.py
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
import schemas_ee
|
||||
import logging
|
||||
from chalicelib.utils import events_queue
|
||||
|
||||
|
||||
def handle_frontend_signals_queued(project_id: int, user_id: int, data: schemas_ee.SignalsSchema):
|
||||
try:
|
||||
events_queue.global_queue.put((project_id, user_id, data))
|
||||
return {'data': 'insertion succeded'}
|
||||
except Exception as e:
|
||||
logging.info(f'Error while inserting: {e}')
|
||||
return {'errors': [e]}
|
||||
80
ee/api/chalicelib/utils/events_queue.py
Normal file
80
ee/api/chalicelib/utils/events_queue.py
Normal file
|
|
@ -0,0 +1,80 @@
|
|||
import json
|
||||
import queue
|
||||
import logging
|
||||
|
||||
from chalicelib.utils import pg_client
|
||||
|
||||
global_queue = None
|
||||
|
||||
class EventQueue():
|
||||
|
||||
def __init__(self, test=False, queue_max_length=100):
|
||||
self.events = queue.Queue()
|
||||
self.events.maxsize = queue_max_length
|
||||
self.test = test
|
||||
|
||||
def flush(self, conn):
|
||||
events = list()
|
||||
params = dict()
|
||||
# while not self.events.empty():
|
||||
# project_id, user_id, element = self.events.get()
|
||||
# events.append("({project_id}, {user_id}, {timestamp}, '{action}', '{source}', '{category}', '{data}')".format(
|
||||
# project_id=project_id, user_id=user_id, timestamp=element.timestamp, action=element.action, source=element.source, category=element.category, data=json.dumps(element.data)))
|
||||
i = 0
|
||||
while not self.events.empty():
|
||||
project_id, user_id, element = self.events.get()
|
||||
params[f'project_id_{i}'] = project_id
|
||||
params[f'user_id_{i}'] = user_id
|
||||
for _key, _val in element.dict().items():
|
||||
if _key == 'data':
|
||||
params[f'{_key}_{i}'] = json.dumps(_val)
|
||||
else:
|
||||
params[f'{_key}_{i}'] = _val
|
||||
events.append(f"(%(project_id_{i})s, %(user_id_{i})s, %(timestamp_{i})s, %(action_{i})s, %(source_{i})s, %(category_{i})s, %(data_{i})s::jsonb)")
|
||||
i += 1
|
||||
if i == 0:
|
||||
return 0
|
||||
if self.test:
|
||||
print(events)
|
||||
return 1
|
||||
conn.execute(
|
||||
conn.mogrify(f"""INSERT INTO public.frontend_signals (project_id, user_id, timestamp, action, source, category, data)
|
||||
VALUES {' , '.join(events)}""", params)
|
||||
)
|
||||
return 1
|
||||
|
||||
def force_flush(self):
|
||||
if not self.events.empty():
|
||||
try:
|
||||
with pg_client.PostgresClient() as conn:
|
||||
self.flush(conn)
|
||||
except Exception as e:
|
||||
logging.info(f'Error: {e}')
|
||||
|
||||
def put(self, element):
|
||||
if self.events.full():
|
||||
try:
|
||||
with pg_client.PostgresClient() as conn:
|
||||
self.flush(conn)
|
||||
except Exception as e:
|
||||
logging.info(f'Error: {e}')
|
||||
self.events.put(element)
|
||||
self.events.task_done()
|
||||
|
||||
async def init(test=False):
|
||||
global global_queue
|
||||
global_queue = EventQueue(test=test)
|
||||
logging.info("> queue initialized")
|
||||
|
||||
async def terminate():
|
||||
global global_queue
|
||||
if global_queue is not None:
|
||||
global_queue.force_flush()
|
||||
logging.info('> queue fulshed')
|
||||
|
||||
# def __process_schema(trace):
|
||||
# data = trace.dict()
|
||||
# data["parameters"] = json.dumps(trace.parameters) if trace.parameters is not None and len(
|
||||
# trace.parameters.keys()) > 0 else None
|
||||
# data["payload"] = json.dumps(trace.payload) if trace.payload is not None and len(trace.payload.keys()) > 0 else None
|
||||
# return data
|
||||
|
|
@ -23,13 +23,14 @@ def unlock_cron() -> None:
|
|||
|
||||
|
||||
cron_jobs = [
|
||||
{"func": unlock_cron, "trigger": "cron", "hour": "*"}
|
||||
{"func": unlock_cron, "trigger": "cron", "hour": "*"},
|
||||
]
|
||||
|
||||
SINGLE_CRONS = [{"func": telemetry_cron, "trigger": "cron", "day_of_week": "*"},
|
||||
{"func": run_scheduled_jobs, "trigger": "interval", "seconds": 60, "misfire_grace_time": 20},
|
||||
{"func": weekly_report, "trigger": "cron", "day_of_week": "mon", "hour": 5,
|
||||
"misfire_grace_time": 60 * 60}]
|
||||
"misfire_grace_time": 60 * 60}
|
||||
]
|
||||
|
||||
if config("LOCAL_CRONS", default=False, cast=bool):
|
||||
cron_jobs += SINGLE_CRONS
|
||||
|
|
|
|||
10
ee/api/routers/crons/ee_crons.py
Normal file
10
ee/api/routers/crons/ee_crons.py
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
from chalicelib.utils import events_queue
|
||||
|
||||
|
||||
def pg_events_queue() -> None:
|
||||
events_queue.global_queue.force_flush()
|
||||
|
||||
|
||||
ee_cron_jobs = [
|
||||
{"func": pg_events_queue, "trigger": "interval", "seconds": 60*5, "misfire_grace_time": 20},
|
||||
]
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
from typing import Union
|
||||
|
||||
from chalicelib.core import roles, traces, projects, sourcemaps, assist_records, sessions
|
||||
from chalicelib.core import unlock
|
||||
from chalicelib.core import unlock, signals
|
||||
from chalicelib.utils import assist_helper
|
||||
|
||||
unlock.check()
|
||||
|
|
@ -116,3 +116,13 @@ def delete_record(projectId: int, recordId: int, context: schemas_ee.CurrentCont
|
|||
if "errors" in result:
|
||||
return result
|
||||
return {"data": result}
|
||||
|
||||
|
||||
@app.post('/{projectId}/signals', tags=['signals'])
|
||||
def send_interactions(projectId: int, data: schemas_ee.SignalsSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = signals.handle_frontend_signals_queued(project_id=projectId, user_id=context.user_id, data=data)
|
||||
|
||||
if "errors" in data:
|
||||
return data
|
||||
return {'data': data}
|
||||
|
|
@ -31,6 +31,14 @@ class RolePayloadSchema(BaseModel):
|
|||
alias_generator = schemas.attribute_to_camel_case
|
||||
|
||||
|
||||
class SignalsSchema(BaseModel):
|
||||
timestamp: int = Field(...)
|
||||
action: str = Field(...)
|
||||
source: str = Field(...)
|
||||
category: str = Field(...)
|
||||
data: dict = Field(default={})
|
||||
|
||||
|
||||
class CreateMemberSchema(schemas.CreateMemberSchema):
|
||||
roleId: Optional[int] = Field(None)
|
||||
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ func (mi *Saver) InsertMessage(msg Message) error {
|
|||
return mi.pg.InsertWebJSException(m)
|
||||
case *IntegrationEvent:
|
||||
return mi.pg.InsertWebIntegrationEvent(m)
|
||||
case *FetchEvent:
|
||||
case *NetworkRequest:
|
||||
session, err := mi.pg.GetSession(sessionID)
|
||||
if err != nil {
|
||||
log.Printf("can't get session info for CH: %s", err)
|
||||
|
|
@ -72,8 +72,8 @@ func (mi *Saver) InsertMessage(msg Message) error {
|
|||
}
|
||||
}
|
||||
}
|
||||
return mi.pg.InsertWebFetchEvent(sessionID, m)
|
||||
case *GraphQLEvent:
|
||||
return mi.pg.InsertWebNetworkRequest(sessionID, m)
|
||||
case *GraphQL:
|
||||
session, err := mi.pg.GetSession(sessionID)
|
||||
if err != nil {
|
||||
log.Printf("can't get session info for CH: %s", err)
|
||||
|
|
@ -82,7 +82,7 @@ func (mi *Saver) InsertMessage(msg Message) error {
|
|||
log.Printf("can't insert graphQL event into clickhouse: %s", err)
|
||||
}
|
||||
}
|
||||
return mi.pg.InsertWebGraphQLEvent(sessionID, m)
|
||||
return mi.pg.InsertWebGraphQL(sessionID, m)
|
||||
case *SetPageLocation:
|
||||
return mi.pg.InsertSessionReferrer(sessionID, m.Referrer)
|
||||
|
||||
|
|
|
|||
|
|
@ -27,9 +27,9 @@ type Connector interface {
|
|||
InsertWebErrorEvent(session *types.Session, msg *types.ErrorEvent) error
|
||||
InsertWebPerformanceTrackAggr(session *types.Session, msg *messages.PerformanceTrackAggr) error
|
||||
InsertAutocomplete(session *types.Session, msgType, msgValue string) error
|
||||
InsertRequest(session *types.Session, msg *messages.FetchEvent, savePayload bool) error
|
||||
InsertRequest(session *types.Session, msg *messages.NetworkRequest, savePayload bool) error
|
||||
InsertCustom(session *types.Session, msg *messages.CustomEvent) error
|
||||
InsertGraphQL(session *types.Session, msg *messages.GraphQLEvent) error
|
||||
InsertGraphQL(session *types.Session, msg *messages.GraphQL) error
|
||||
InsertIssue(session *types.Session, msg *messages.IssueEvent) error
|
||||
}
|
||||
|
||||
|
|
@ -289,7 +289,13 @@ func (c *connectorImpl) InsertWebErrorEvent(session *types.Session, msg *types.E
|
|||
keys = append(keys, k)
|
||||
values = append(values, v)
|
||||
}
|
||||
|
||||
// Check error source before insert to avoid panic from clickhouse lib
|
||||
switch msg.Source {
|
||||
case "js_exception", "bugsnag", "cloudwatch", "datadog", "elasticsearch", "newrelic", "rollbar", "sentry", "stackdriver", "sumologic":
|
||||
default:
|
||||
return fmt.Errorf("unknown error source: %s", msg.Source)
|
||||
}
|
||||
// Insert event to batch
|
||||
if err := c.batches["errors"].Append(
|
||||
session.SessionID,
|
||||
uint16(session.ProjectID),
|
||||
|
|
@ -352,7 +358,7 @@ func (c *connectorImpl) InsertAutocomplete(session *types.Session, msgType, msgV
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *connectorImpl) InsertRequest(session *types.Session, msg *messages.FetchEvent, savePayload bool) error {
|
||||
func (c *connectorImpl) InsertRequest(session *types.Session, msg *messages.NetworkRequest, savePayload bool) error {
|
||||
urlMethod := url.EnsureMethod(msg.Method)
|
||||
if urlMethod == "" {
|
||||
return fmt.Errorf("can't parse http method. sess: %d, method: %s", session.SessionID, msg.Method)
|
||||
|
|
@ -365,8 +371,8 @@ func (c *connectorImpl) InsertRequest(session *types.Session, msg *messages.Fetc
|
|||
if err := c.batches["requests"].Append(
|
||||
session.SessionID,
|
||||
uint16(session.ProjectID),
|
||||
msg.MessageID,
|
||||
datetime(msg.Timestamp),
|
||||
msg.Meta().Index,
|
||||
datetime(uint64(msg.Meta().Timestamp)),
|
||||
msg.URL,
|
||||
request,
|
||||
response,
|
||||
|
|
@ -386,8 +392,8 @@ func (c *connectorImpl) InsertCustom(session *types.Session, msg *messages.Custo
|
|||
if err := c.batches["custom"].Append(
|
||||
session.SessionID,
|
||||
uint16(session.ProjectID),
|
||||
msg.MessageID,
|
||||
datetime(msg.Timestamp),
|
||||
msg.Meta().Index,
|
||||
datetime(uint64(msg.Meta().Timestamp)),
|
||||
msg.Name,
|
||||
msg.Payload,
|
||||
"CUSTOM",
|
||||
|
|
@ -398,12 +404,12 @@ func (c *connectorImpl) InsertCustom(session *types.Session, msg *messages.Custo
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *connectorImpl) InsertGraphQL(session *types.Session, msg *messages.GraphQLEvent) error {
|
||||
func (c *connectorImpl) InsertGraphQL(session *types.Session, msg *messages.GraphQL) error {
|
||||
if err := c.batches["graphql"].Append(
|
||||
session.SessionID,
|
||||
uint16(session.ProjectID),
|
||||
msg.MessageID,
|
||||
datetime(msg.Timestamp),
|
||||
msg.Meta().Index,
|
||||
datetime(uint64(msg.Meta().Timestamp)),
|
||||
msg.OperationName,
|
||||
nullableString(msg.Variables),
|
||||
nullableString(msg.Response),
|
||||
|
|
|
|||
|
|
@ -91,7 +91,7 @@ func (s *sessionFinderImpl) worker() {
|
|||
func (s *sessionFinderImpl) findSession(sessionID, timestamp, partition uint64) {
|
||||
sessEnd := &messages.SessionEnd{Timestamp: timestamp}
|
||||
sessEnd.SetSessionID(sessionID)
|
||||
err := s.storage.UploadSessionFiles(sessEnd)
|
||||
err := s.storage.Upload(sessEnd)
|
||||
if err == nil {
|
||||
log.Printf("found session: %d in partition: %d, original: %d",
|
||||
sessionID, partition, sessionID%numberOfPartitions)
|
||||
|
|
|
|||
|
|
@ -120,7 +120,6 @@ func (consumer *Consumer) commitAtTimestamps(
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logPartitions("Actually assigned:", assigned)
|
||||
|
||||
var timestamps []kafka.TopicPartition
|
||||
for _, p := range assigned { // p is a copy here since it is not a pointer
|
||||
|
|
@ -142,7 +141,6 @@ func (consumer *Consumer) commitAtTimestamps(
|
|||
if err != nil {
|
||||
return errors.Wrap(err, "Kafka Consumer retrieving committed error")
|
||||
}
|
||||
logPartitions("Actually committed:", committed)
|
||||
for _, comm := range committed {
|
||||
if comm.Offset == kafka.OffsetStored ||
|
||||
comm.Offset == kafka.OffsetInvalid ||
|
||||
|
|
|
|||
|
|
@ -1,15 +0,0 @@
|
|||
package kafka
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/confluentinc/confluent-kafka-go/kafka"
|
||||
)
|
||||
|
||||
func logPartitions(s string, prts []kafka.TopicPartition) {
|
||||
for _, p := range prts {
|
||||
s = fmt.Sprintf("%v | %v", s, p.Partition)
|
||||
}
|
||||
log.Println(s)
|
||||
}
|
||||
|
|
@ -213,6 +213,20 @@ class MouseMove(Message):
|
|||
self.y = y
|
||||
|
||||
|
||||
class NetworkRequest(Message):
|
||||
__id__ = 21
|
||||
|
||||
def __init__(self, type, method, url, request, response, status, timestamp, duration):
|
||||
self.type = type
|
||||
self.method = method
|
||||
self.url = url
|
||||
self.request = request
|
||||
self.response = response
|
||||
self.status = status
|
||||
self.timestamp = timestamp
|
||||
self.duration = duration
|
||||
|
||||
|
||||
class ConsoleLog(Message):
|
||||
__id__ = 22
|
||||
|
||||
|
|
@ -265,7 +279,7 @@ class IntegrationEvent(Message):
|
|||
self.payload = payload
|
||||
|
||||
|
||||
class RawCustomEvent(Message):
|
||||
class CustomEvent(Message):
|
||||
__id__ = 27
|
||||
|
||||
def __init__(self, name, payload):
|
||||
|
|
@ -358,16 +372,6 @@ class ResourceEvent(Message):
|
|||
self.status = status
|
||||
|
||||
|
||||
class CustomEvent(Message):
|
||||
__id__ = 36
|
||||
|
||||
def __init__(self, message_id, timestamp, name, payload):
|
||||
self.message_id = message_id
|
||||
self.timestamp = timestamp
|
||||
self.name = name
|
||||
self.payload = payload
|
||||
|
||||
|
||||
class CSSInsertRule(Message):
|
||||
__id__ = 37
|
||||
|
||||
|
|
@ -423,15 +427,6 @@ class StateAction(Message):
|
|||
self.type = type
|
||||
|
||||
|
||||
class StateActionEvent(Message):
|
||||
__id__ = 43
|
||||
|
||||
def __init__(self, message_id, timestamp, type):
|
||||
self.message_id = message_id
|
||||
self.timestamp = timestamp
|
||||
self.type = type
|
||||
|
||||
|
||||
class Redux(Message):
|
||||
__id__ = 44
|
||||
|
||||
|
|
@ -486,32 +481,6 @@ class PerformanceTrack(Message):
|
|||
self.used_js_heap_size = used_js_heap_size
|
||||
|
||||
|
||||
class GraphQLEvent(Message):
|
||||
__id__ = 50
|
||||
|
||||
def __init__(self, message_id, timestamp, operation_kind, operation_name, variables, response):
|
||||
self.message_id = message_id
|
||||
self.timestamp = timestamp
|
||||
self.operation_kind = operation_kind
|
||||
self.operation_name = operation_name
|
||||
self.variables = variables
|
||||
self.response = response
|
||||
|
||||
|
||||
class FetchEvent(Message):
|
||||
__id__ = 51
|
||||
|
||||
def __init__(self, message_id, timestamp, method, url, request, response, status, duration):
|
||||
self.message_id = message_id
|
||||
self.timestamp = timestamp
|
||||
self.method = method
|
||||
self.url = url
|
||||
self.request = request
|
||||
self.response = response
|
||||
self.status = status
|
||||
self.duration = duration
|
||||
|
||||
|
||||
class DOMDrop(Message):
|
||||
__id__ = 52
|
||||
|
||||
|
|
|
|||
|
|
@ -237,6 +237,18 @@ class MessageCodec(Codec):
|
|||
y=self.read_uint(reader)
|
||||
)
|
||||
|
||||
if message_id == 21:
|
||||
return NetworkRequest(
|
||||
type=self.read_string(reader),
|
||||
method=self.read_string(reader),
|
||||
url=self.read_string(reader),
|
||||
request=self.read_string(reader),
|
||||
response=self.read_string(reader),
|
||||
status=self.read_uint(reader),
|
||||
timestamp=self.read_uint(reader),
|
||||
duration=self.read_uint(reader)
|
||||
)
|
||||
|
||||
if message_id == 22:
|
||||
return ConsoleLog(
|
||||
level=self.read_string(reader),
|
||||
|
|
@ -280,7 +292,7 @@ class MessageCodec(Codec):
|
|||
)
|
||||
|
||||
if message_id == 27:
|
||||
return RawCustomEvent(
|
||||
return CustomEvent(
|
||||
name=self.read_string(reader),
|
||||
payload=self.read_string(reader)
|
||||
)
|
||||
|
|
@ -356,14 +368,6 @@ class MessageCodec(Codec):
|
|||
status=self.read_uint(reader)
|
||||
)
|
||||
|
||||
if message_id == 36:
|
||||
return CustomEvent(
|
||||
message_id=self.read_uint(reader),
|
||||
timestamp=self.read_uint(reader),
|
||||
name=self.read_string(reader),
|
||||
payload=self.read_string(reader)
|
||||
)
|
||||
|
||||
if message_id == 37:
|
||||
return CSSInsertRule(
|
||||
id=self.read_uint(reader),
|
||||
|
|
@ -407,13 +411,6 @@ class MessageCodec(Codec):
|
|||
type=self.read_string(reader)
|
||||
)
|
||||
|
||||
if message_id == 43:
|
||||
return StateActionEvent(
|
||||
message_id=self.read_uint(reader),
|
||||
timestamp=self.read_uint(reader),
|
||||
type=self.read_string(reader)
|
||||
)
|
||||
|
||||
if message_id == 44:
|
||||
return Redux(
|
||||
action=self.read_string(reader),
|
||||
|
|
@ -456,28 +453,6 @@ class MessageCodec(Codec):
|
|||
used_js_heap_size=self.read_uint(reader)
|
||||
)
|
||||
|
||||
if message_id == 50:
|
||||
return GraphQLEvent(
|
||||
message_id=self.read_uint(reader),
|
||||
timestamp=self.read_uint(reader),
|
||||
operation_kind=self.read_string(reader),
|
||||
operation_name=self.read_string(reader),
|
||||
variables=self.read_string(reader),
|
||||
response=self.read_string(reader)
|
||||
)
|
||||
|
||||
if message_id == 51:
|
||||
return FetchEvent(
|
||||
message_id=self.read_uint(reader),
|
||||
timestamp=self.read_uint(reader),
|
||||
method=self.read_string(reader),
|
||||
url=self.read_string(reader),
|
||||
request=self.read_string(reader),
|
||||
response=self.read_string(reader),
|
||||
status=self.read_uint(reader),
|
||||
duration=self.read_uint(reader)
|
||||
)
|
||||
|
||||
if message_id == 52:
|
||||
return DOMDrop(
|
||||
timestamp=self.read_uint(reader)
|
||||
|
|
|
|||
14
ee/recommendation/Dockerfile
Normal file
14
ee/recommendation/Dockerfile
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
FROM apache/airflow:2.4.3
|
||||
COPY requirements.txt .
|
||||
|
||||
USER root
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
vim \
|
||||
&& apt-get install gcc libc-dev g++ -y \
|
||||
&& apt-get install -y pkg-config libxml2-dev libxmlsec1-dev libxmlsec1-openssl
|
||||
|
||||
|
||||
USER airflow
|
||||
RUN pip install --upgrade pip
|
||||
RUN pip install -r requirements.txt
|
||||
1
ee/recommendation/clean.sh
Normal file
1
ee/recommendation/clean.sh
Normal file
|
|
@ -0,0 +1 @@
|
|||
docker-compose down --volumes --rmi all
|
||||
46
ee/recommendation/dags/training_dag.py
Normal file
46
ee/recommendation/dags/training_dag.py
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
from datetime import datetime, timedelta
|
||||
from textwrap import dedent
|
||||
|
||||
import pendulum
|
||||
|
||||
from airflow import DAG
|
||||
from airflow.operators.bash import BashOperator
|
||||
from airflow.operators.python import PythonOperator
|
||||
import os
|
||||
_work_dir = os.getcwd()
|
||||
|
||||
def my_function():
|
||||
l = os.listdir('scripts')
|
||||
print(l)
|
||||
return l
|
||||
|
||||
dag = DAG(
|
||||
"first_test",
|
||||
default_args={
|
||||
"depends_on_past": True,
|
||||
"retries": 1,
|
||||
"retry_delay": timedelta(minutes=3),
|
||||
},
|
||||
start_date=pendulum.datetime(2015, 12, 1, tz="UTC"),
|
||||
description="My first test",
|
||||
schedule="@daily",
|
||||
catchup=False,
|
||||
)
|
||||
|
||||
|
||||
#assigning the task for our dag to do
|
||||
with dag:
|
||||
first_world = PythonOperator(
|
||||
task_id='FirstTest',
|
||||
python_callable=my_function,
|
||||
)
|
||||
hello_world = BashOperator(
|
||||
task_id='OneTest',
|
||||
bash_command=f'python {_work_dir}/scripts/processing.py --batch_size 500',
|
||||
# provide_context=True
|
||||
)
|
||||
this_world = BashOperator(
|
||||
task_id='ThisTest',
|
||||
bash_command=f'python {_work_dir}/scripts/task.py --mode train --kernel linear',
|
||||
)
|
||||
first_world >> hello_world >> this_world
|
||||
285
ee/recommendation/docker-compose.yaml
Normal file
285
ee/recommendation/docker-compose.yaml
Normal file
|
|
@ -0,0 +1,285 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Basic Airflow cluster configuration for CeleryExecutor with Redis and PostgreSQL.
|
||||
#
|
||||
# WARNING: This configuration is for local development. Do not use it in a production deployment.
|
||||
#
|
||||
# This configuration supports basic configuration using environment variables or an .env file
|
||||
# The following variables are supported:
|
||||
#
|
||||
# AIRFLOW_IMAGE_NAME - Docker image name used to run Airflow.
|
||||
# Default: apache/airflow:2.4.3
|
||||
# AIRFLOW_UID - User ID in Airflow containers
|
||||
# Default: 50000
|
||||
# Those configurations are useful mostly in case of standalone testing/running Airflow in test/try-out mode
|
||||
#
|
||||
# _AIRFLOW_WWW_USER_USERNAME - Username for the administrator account (if requested).
|
||||
# Default: airflow
|
||||
# _AIRFLOW_WWW_USER_PASSWORD - Password for the administrator account (if requested).
|
||||
# Default: airflow
|
||||
# _PIP_ADDITIONAL_REQUIREMENTS - Additional PIP requirements to add when starting all containers.
|
||||
# Default: ''
|
||||
#
|
||||
# Feel free to modify this file to suit your needs.
|
||||
---
|
||||
version: '3'
|
||||
x-airflow-common:
|
||||
&airflow-common
|
||||
# In order to add custom dependencies or upgrade provider packages you can use your extended image.
|
||||
# Comment the image line, place your Dockerfile in the directory where you placed the docker-compose.yaml
|
||||
# and uncomment the "build" line below, Then run `docker-compose build` to build the images.
|
||||
# image: ${AIRFLOW_IMAGE_NAME:-apache/airflow:2.4.3}
|
||||
build: .
|
||||
environment:
|
||||
&airflow-common-env
|
||||
AIRFLOW__CORE__EXECUTOR: CeleryExecutor
|
||||
AIRFLOW__DATABASE__SQL_ALCHEMY_CONN: postgresql+psycopg2://airflow:airflow@postgres/airflow
|
||||
# For backward compatibility, with Airflow <2.3
|
||||
AIRFLOW__CORE__SQL_ALCHEMY_CONN: postgresql+psycopg2://airflow:airflow@postgres/airflow
|
||||
AIRFLOW__CELERY__RESULT_BACKEND: db+postgresql://airflow:airflow@postgres/airflow
|
||||
AIRFLOW__CELERY__BROKER_URL: redis://:@redis:6379/0
|
||||
AIRFLOW__CORE__FERNET_KEY: ''
|
||||
AIRFLOW__CORE__DAGS_ARE_PAUSED_AT_CREATION: 'true'
|
||||
AIRFLOW__CORE__LOAD_EXAMPLES: 'false'
|
||||
AIRFLOW__API__AUTH_BACKENDS: 'airflow.api.auth.backend.basic_auth'
|
||||
_PIP_ADDITIONAL_REQUIREMENTS: 'argcomplete'
|
||||
AIRFLOW__CODE_EDITOR__ENABLED: 'true'
|
||||
AIRFLOW__CODE_EDITOR__GIT_ENABLED: 'false'
|
||||
AIRFLOW__CODE_EDITOR__STRING_NORMALIZATION: 'true'
|
||||
AIRFLOW__CODE_EDITOR__MOUNT: '/opt/airflow/dags'
|
||||
pg_user: "${pg_user}"
|
||||
pg_password: "${pg_password}"
|
||||
pg_dbname: "${pg_dbname}"
|
||||
pg_host: "${pg_host}"
|
||||
pg_port: "${pg_port}"
|
||||
PG_TIMEOUT: "${PG_TIMEOUT}"
|
||||
PG_POOL: "${PG_POOL}"
|
||||
volumes:
|
||||
- ./dags:/opt/airflow/dags
|
||||
- ./logs:/opt/airflow/logs
|
||||
- ./plugins:/opt/airflow/plugins
|
||||
- ./scripts:/opt/airflow/scripts
|
||||
- ./cache:/opt/airflow/cache
|
||||
user: "${AIRFLOW_UID:-50000}:0"
|
||||
depends_on:
|
||||
&airflow-common-depends-on
|
||||
redis:
|
||||
condition: service_healthy
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:13
|
||||
environment:
|
||||
POSTGRES_USER: airflow
|
||||
POSTGRES_PASSWORD: airflow
|
||||
POSTGRES_DB: airflow
|
||||
volumes:
|
||||
- postgres-db-volume:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ["CMD", "pg_isready", "-U", "airflow"]
|
||||
interval: 5s
|
||||
retries: 5
|
||||
restart: always
|
||||
|
||||
redis:
|
||||
image: redis:latest
|
||||
expose:
|
||||
- 6379
|
||||
healthcheck:
|
||||
test: ["CMD", "redis-cli", "ping"]
|
||||
interval: 5s
|
||||
timeout: 30s
|
||||
retries: 50
|
||||
restart: always
|
||||
|
||||
airflow-webserver:
|
||||
<<: *airflow-common
|
||||
command: webserver
|
||||
ports:
|
||||
- 8080:8080
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "--fail", "http://localhost:8080/health"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
restart: always
|
||||
depends_on:
|
||||
<<: *airflow-common-depends-on
|
||||
airflow-init:
|
||||
condition: service_completed_successfully
|
||||
|
||||
airflow-scheduler:
|
||||
<<: *airflow-common
|
||||
command: scheduler
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", 'airflow jobs check --job-type SchedulerJob --hostname "$${HOSTNAME}"']
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
restart: always
|
||||
depends_on:
|
||||
<<: *airflow-common-depends-on
|
||||
airflow-init:
|
||||
condition: service_completed_successfully
|
||||
|
||||
airflow-worker:
|
||||
<<: *airflow-common
|
||||
command: celery worker
|
||||
healthcheck:
|
||||
test:
|
||||
- "CMD-SHELL"
|
||||
- 'celery --app airflow.executors.celery_executor.app inspect ping -d "celery@$${HOSTNAME}"'
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
environment:
|
||||
<<: *airflow-common-env
|
||||
# Required to handle warm shutdown of the celery workers properly
|
||||
# See https://airflow.apache.org/docs/docker-stack/entrypoint.html#signal-propagation
|
||||
DUMB_INIT_SETSID: "0"
|
||||
restart: always
|
||||
depends_on:
|
||||
<<: *airflow-common-depends-on
|
||||
airflow-init:
|
||||
condition: service_completed_successfully
|
||||
|
||||
airflow-triggerer:
|
||||
<<: *airflow-common
|
||||
command: triggerer
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", 'airflow jobs check --job-type TriggererJob --hostname "$${HOSTNAME}"']
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
restart: always
|
||||
depends_on:
|
||||
<<: *airflow-common-depends-on
|
||||
airflow-init:
|
||||
condition: service_completed_successfully
|
||||
|
||||
airflow-init:
|
||||
<<: *airflow-common
|
||||
entrypoint: /bin/bash
|
||||
# yamllint disable rule:line-length
|
||||
command:
|
||||
- -c
|
||||
- |
|
||||
function ver() {
|
||||
printf "%04d%04d%04d%04d" $${1//./ }
|
||||
}
|
||||
register-python-argcomplete airflow >> ~/.bashrc
|
||||
airflow_version=$$(AIRFLOW__LOGGING__LOGGING_LEVEL=INFO && gosu airflow airflow version)
|
||||
airflow_version_comparable=$$(ver $${airflow_version})
|
||||
min_airflow_version=2.2.0
|
||||
min_airflow_version_comparable=$$(ver $${min_airflow_version})
|
||||
if [[ -z "${AIRFLOW_UID}" ]]; then
|
||||
echo
|
||||
echo -e "\033[1;33mWARNING!!!: AIRFLOW_UID not set!\e[0m"
|
||||
echo "If you are on Linux, you SHOULD follow the instructions below to set "
|
||||
echo "AIRFLOW_UID environment variable, otherwise files will be owned by root."
|
||||
echo "For other operating systems you can get rid of the warning with manually created .env file:"
|
||||
echo " See: https://airflow.apache.org/docs/apache-airflow/stable/howto/docker-compose/index.html#setting-the-right-airflow-user"
|
||||
echo
|
||||
fi
|
||||
one_meg=1048576
|
||||
mem_available=$$(($$(getconf _PHYS_PAGES) * $$(getconf PAGE_SIZE) / one_meg))
|
||||
cpus_available=$$(grep -cE 'cpu[0-9]+' /proc/stat)
|
||||
disk_available=$$(df / | tail -1 | awk '{print $$4}')
|
||||
warning_resources="false"
|
||||
if (( mem_available < 4000 )) ; then
|
||||
echo
|
||||
echo -e "\033[1;33mWARNING!!!: Not enough memory available for Docker.\e[0m"
|
||||
echo "At least 4GB of memory required. You have $$(numfmt --to iec $$((mem_available * one_meg)))"
|
||||
echo
|
||||
warning_resources="true"
|
||||
fi
|
||||
if (( cpus_available < 2 )); then
|
||||
echo
|
||||
echo -e "\033[1;33mWARNING!!!: Not enough CPUS available for Docker.\e[0m"
|
||||
echo "At least 2 CPUs recommended. You have $${cpus_available}"
|
||||
echo
|
||||
warning_resources="true"
|
||||
fi
|
||||
if (( disk_available < one_meg * 10 )); then
|
||||
echo
|
||||
echo -e "\033[1;33mWARNING!!!: Not enough Disk space available for Docker.\e[0m"
|
||||
echo "At least 10 GBs recommended. You have $$(numfmt --to iec $$((disk_available * 1024 )))"
|
||||
echo
|
||||
warning_resources="true"
|
||||
fi
|
||||
if [[ $${warning_resources} == "true" ]]; then
|
||||
echo
|
||||
echo -e "\033[1;33mWARNING!!!: You have not enough resources to run Airflow (see above)!\e[0m"
|
||||
echo "Please follow the instructions to increase amount of resources available:"
|
||||
echo " https://airflow.apache.org/docs/apache-airflow/stable/howto/docker-compose/index.html#before-you-begin"
|
||||
echo
|
||||
fi
|
||||
mkdir -p /sources/logs /sources/dags /sources/plugins
|
||||
chown -R "${AIRFLOW_UID}:0" /sources/{logs,dags,plugins}
|
||||
exec /entrypoint airflow version
|
||||
# yamllint enable rule:line-length
|
||||
environment:
|
||||
<<: *airflow-common-env
|
||||
_AIRFLOW_DB_UPGRADE: 'true'
|
||||
_AIRFLOW_WWW_USER_CREATE: 'true'
|
||||
_AIRFLOW_WWW_USER_USERNAME: ${_AIRFLOW_WWW_USER_USERNAME:-airflow}
|
||||
_AIRFLOW_WWW_USER_PASSWORD: ${_AIRFLOW_WWW_USER_PASSWORD:-airflow}
|
||||
_PIP_ADDITIONAL_REQUIREMENTS: ''
|
||||
user: "0:0"
|
||||
volumes:
|
||||
- .:/sources
|
||||
|
||||
airflow-cli:
|
||||
<<: *airflow-common
|
||||
profiles:
|
||||
- debug
|
||||
environment:
|
||||
<<: *airflow-common-env
|
||||
CONNECTION_CHECK_MAX_COUNT: "0"
|
||||
# Workaround for entrypoint issue. See: https://github.com/apache/airflow/issues/16252
|
||||
command:
|
||||
- bash
|
||||
- -c
|
||||
- airflow
|
||||
|
||||
# You can enable flower by adding "--profile flower" option e.g. docker-compose --profile flower up
|
||||
# or by explicitly targeted on the command line e.g. docker-compose up flower.
|
||||
# See: https://docs.docker.com/compose/profiles/
|
||||
flower:
|
||||
<<: *airflow-common
|
||||
command: celery flower
|
||||
profiles:
|
||||
- flower
|
||||
ports:
|
||||
- 5555:5555
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "--fail", "http://localhost:5555/"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
restart: always
|
||||
depends_on:
|
||||
<<: *airflow-common-depends-on
|
||||
airflow-init:
|
||||
condition: service_completed_successfully
|
||||
|
||||
volumes:
|
||||
postgres-db-volume:
|
||||
22
ee/recommendation/requirements.txt
Normal file
22
ee/recommendation/requirements.txt
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
requests==2.28.1
|
||||
urllib3==1.26.12
|
||||
pyjwt==2.5.0
|
||||
psycopg2-binary==2.9.3
|
||||
|
||||
numpy
|
||||
threadpoolctl==3.1.0
|
||||
joblib==1.2.0
|
||||
scipy
|
||||
scikit-learn
|
||||
mlflow
|
||||
|
||||
airflow-code-editor
|
||||
|
||||
pydantic[email]==1.10.2
|
||||
|
||||
clickhouse-driver==0.2.4
|
||||
python3-saml==1.14.0
|
||||
python-multipart==0.0.5
|
||||
python-decouple
|
||||
|
||||
argcomplete
|
||||
11
ee/recommendation/run.sh
Normal file
11
ee/recommendation/run.sh
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
echo 'Setting up required modules..'
|
||||
mkdir scripts
|
||||
mkdir plugins
|
||||
mkdir logs
|
||||
mkdir scripts/utils
|
||||
cp ../../api/chalicelib/utils/pg_client.py scripts/utils
|
||||
cp ../api/chalicelib/utils/ch_client.py scripts/utils
|
||||
echo 'Building containers...'
|
||||
docker-compose up airflow-init
|
||||
echo 'Running containers...'
|
||||
docker-compose up
|
||||
161
ee/recommendation/scripts/core/features.py
Normal file
161
ee/recommendation/scripts/core/features.py
Normal file
|
|
@ -0,0 +1,161 @@
|
|||
from utils.ch_client import ClickHouseClient
|
||||
from utils.pg_client import PostgresClient
|
||||
|
||||
def get_features_clickhouse(**kwargs):
|
||||
"""Gets features from ClickHouse database"""
|
||||
if 'limit' in kwargs:
|
||||
limit = kwargs['limit']
|
||||
else:
|
||||
limit = 500
|
||||
query = f"""SELECT session_id, project_id, user_id, events_count, errors_count, duration, country, issue_score, device_type, rage, jsexception, badrequest FROM (
|
||||
SELECT session_id, project_id, user_id, events_count, errors_count, duration, toInt8(user_country) as country, issue_score, toInt8(user_device_type) as device_type FROM experimental.sessions WHERE user_id IS NOT NULL) as T1
|
||||
INNER JOIN (SELECT session_id, project_id, sum(issue_type = 'click_rage') as rage, sum(issue_type = 'js_exception') as jsexception, sum(issue_type = 'bad_request') as badrequest FROM experimental.events WHERE event_type = 'ISSUE' AND session_id > 0 GROUP BY session_id, project_id LIMIT {limit}) as T2
|
||||
ON T1.session_id = T2.session_id AND T1.project_id = T2.project_id;"""
|
||||
with ClickHouseClient() as conn:
|
||||
res = conn.execute(query)
|
||||
return res
|
||||
|
||||
|
||||
def get_features_postgres(**kwargs):
|
||||
with PostgresClient() as conn:
|
||||
funnels = query_funnels(conn, **kwargs)
|
||||
metrics = query_metrics(conn, **kwargs)
|
||||
filters = query_with_filters(conn, **kwargs)
|
||||
#clean_filters(funnels)
|
||||
#clean_filters(filters)
|
||||
return clean_filters_split(funnels, isfunnel=True), metrics, clean_filters_split(filters)
|
||||
|
||||
|
||||
|
||||
def query_funnels(conn, **kwargs):
|
||||
"""Gets Funnels (PG database)"""
|
||||
# If public.funnel is empty
|
||||
funnels_query = f"""SELECT project_id, user_id, filter FROM (SELECT project_id, user_id, metric_id FROM public.metrics WHERE metric_type='funnel'
|
||||
) as T1 LEFT JOIN (SELECT filter, metric_id FROM public.metric_series) as T2 ON T1.metric_id = T2.metric_id"""
|
||||
# Else
|
||||
# funnels_query = "SELECT project_id, user_id, filter FROM public.funnels"
|
||||
|
||||
conn.execute(funnels_query)
|
||||
res = conn.fetchall()
|
||||
return res
|
||||
|
||||
|
||||
def query_metrics(conn, **kwargs):
|
||||
"""Gets Metrics (PG_database)"""
|
||||
metrics_query = """SELECT metric_type, metric_of, metric_value, metric_format FROM public.metrics"""
|
||||
conn.execute(metrics_query)
|
||||
res = conn.fetchall()
|
||||
return res
|
||||
|
||||
|
||||
def query_with_filters(conn, **kwargs):
|
||||
"""Gets Metrics with filters (PG database)"""
|
||||
filters_query = """SELECT T1.metric_id as metric_id, project_id, name, metric_type, metric_of, filter FROM (
|
||||
SELECT metric_id, project_id, name, metric_type, metric_of FROM metrics) as T1 INNER JOIN
|
||||
(SELECT metric_id, filter FROM metric_series WHERE filter != '{}') as T2 ON T1.metric_id = T2.metric_id"""
|
||||
conn.execute(filters_query)
|
||||
res = conn.fetchall()
|
||||
return res
|
||||
|
||||
|
||||
def transform_funnel(project_id, user_id, data):
|
||||
res = list()
|
||||
for k in range(len(data)):
|
||||
_tmp = data[k]
|
||||
if _tmp['project_id'] != project_id or _tmp['user_id'] != user_id:
|
||||
continue
|
||||
else:
|
||||
_tmp = _tmp['filter']['events']
|
||||
res.append(_tmp)
|
||||
return res
|
||||
|
||||
|
||||
def transform_with_filter(data, *kwargs):
|
||||
res = list()
|
||||
for k in range(len(data)):
|
||||
_tmp = data[k]
|
||||
jump = False
|
||||
for _key in kwargs.keys():
|
||||
if data[_key] != kwargs[_key]:
|
||||
jump = True
|
||||
break
|
||||
if jump:
|
||||
continue
|
||||
_type = data['metric_type']
|
||||
if _type == 'funnel':
|
||||
res.append(['funnel', _tmp['filter']['events']])
|
||||
elif _type == 'timeseries':
|
||||
res.append(['timeseries', _tmp['filter']['filters'], _tmp['filter']['events']])
|
||||
elif _type == 'table':
|
||||
res.append(['table', _tmp['metric_of'], _tmp['filter']['events']])
|
||||
return res
|
||||
|
||||
|
||||
def transform(element):
|
||||
key_ = element.pop('user_id')
|
||||
secondary_key_ = element.pop('session_id')
|
||||
context_ = element.pop('project_id')
|
||||
features_ = element
|
||||
del element
|
||||
return {(key_, context_): {secondary_key_: list(features_.values())}}
|
||||
|
||||
|
||||
def get_by_project(data, project_id):
|
||||
head_ = [list(d.keys())[0][1] for d in data]
|
||||
index_ = [k for k in range(len(head_)) if head_[k] == project_id]
|
||||
return [data[k] for k in index_]
|
||||
|
||||
|
||||
def get_by_user(data, user_id):
|
||||
head_ = [list(d.keys())[0][0] for d in data]
|
||||
index_ = [k for k in range(len(head_)) if head_[k] == user_id]
|
||||
return [data[k] for k in index_]
|
||||
|
||||
|
||||
def clean_filters(data):
|
||||
for j in range(len(data)):
|
||||
_filter = data[j]['filter']
|
||||
_tmp = list()
|
||||
for i in range(len(_filter['filters'])):
|
||||
if 'value' in _filter['filters'][i].keys():
|
||||
_tmp.append({'type': _filter['filters'][i]['type'],
|
||||
'value': _filter['filters'][i]['value'],
|
||||
'operator': _filter['filters'][i]['operator']})
|
||||
data[j]['filter'] = _tmp
|
||||
|
||||
|
||||
def clean_filters_split(data, isfunnel=False):
|
||||
_data = list()
|
||||
for j in range(len(data)):
|
||||
_filter = data[j]['filter']
|
||||
_tmp = list()
|
||||
for i in range(len(_filter['filters'])):
|
||||
if 'value' in _filter['filters'][i].keys():
|
||||
_type = _filter['filters'][i]['type']
|
||||
_value = _filter['filters'][i]['value']
|
||||
if isinstance(_value, str):
|
||||
_value = [_value]
|
||||
_operator = _filter['filters'][i]['operator']
|
||||
if isfunnel:
|
||||
_data.append({'project_id': data[j]['project_id'], 'user_id': data[j]['user_id'],
|
||||
'type': _type,
|
||||
'value': _value,
|
||||
'operator': _operator
|
||||
})
|
||||
else:
|
||||
_data.append({'metric_id': data[j]['metric_id'], 'project_id': data[j]['project_id'],
|
||||
'name': data[j]['name'], 'metric_type': data[j]['metric_type'],
|
||||
'metric_of': data[j]['metric_of'],
|
||||
'type': _type,
|
||||
'value': _value,
|
||||
'operator': _operator
|
||||
})
|
||||
return _data
|
||||
|
||||
def test():
|
||||
print('One test')
|
||||
|
||||
if __name__ == '__main__':
|
||||
print('Just a test')
|
||||
#data = get_features_clickhouse()
|
||||
#print('Data length:', len(data))
|
||||
15
ee/recommendation/scripts/core/recommendation_model.py
Normal file
15
ee/recommendation/scripts/core/recommendation_model.py
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
from sklearn.svm import SVC
|
||||
|
||||
class SVM_recommendation():
|
||||
def __init__(**params):
|
||||
f"""{SVC.__doc__}"""
|
||||
self.svm = SVC(params)
|
||||
|
||||
def fit(self, X1=None, X2=None):
|
||||
assert X1 is not None or X2 is not None, 'X1 or X2 must be given'
|
||||
self.svm.fit(X1)
|
||||
self.svm.fit(X2)
|
||||
|
||||
|
||||
def predict(self, X):
|
||||
return self.svm.predict(X)
|
||||
60
ee/recommendation/scripts/model_registry.py
Normal file
60
ee/recommendation/scripts/model_registry.py
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
import mlflow
|
||||
##
|
||||
import numpy as np
|
||||
import pickle
|
||||
|
||||
from sklearn import datasets, linear_model
|
||||
from sklearn.metrics import mean_squared_error, r2_score
|
||||
|
||||
# source: https://scikit-learn.org/stable/auto_examples/linear_model/plot_ols.html
|
||||
|
||||
# Load the diabetes dataset
|
||||
diabetes_X, diabetes_y = datasets.load_diabetes(return_X_y=True)
|
||||
|
||||
# Use only one feature
|
||||
diabetes_X = diabetes_X[:, np.newaxis, 2]
|
||||
|
||||
# Split the data into training/testing sets
|
||||
diabetes_X_train = diabetes_X[:-20]
|
||||
diabetes_X_test = diabetes_X[-20:]
|
||||
|
||||
# Split the targets into training/testing sets
|
||||
diabetes_y_train = diabetes_y[:-20]
|
||||
diabetes_y_test = diabetes_y[-20:]
|
||||
|
||||
|
||||
def print_predictions(m, y_pred):
|
||||
|
||||
# The coefficients
|
||||
print('Coefficients: \n', m.coef_)
|
||||
# The mean squared error
|
||||
print('Mean squared error: %.2f'
|
||||
% mean_squared_error(diabetes_y_test, y_pred))
|
||||
# The coefficient of determination: 1 is perfect prediction
|
||||
print('Coefficient of determination: %.2f'
|
||||
% r2_score(diabetes_y_test, y_pred))
|
||||
|
||||
# Create linear regression object
|
||||
lr_model = linear_model.LinearRegression()
|
||||
|
||||
# Train the model using the training sets
|
||||
lr_model.fit(diabetes_X_train, diabetes_y_train)
|
||||
|
||||
# Make predictions using the testing set
|
||||
diabetes_y_pred = lr_model.predict(diabetes_X_test)
|
||||
print_predictions(lr_model, diabetes_y_pred)
|
||||
|
||||
# save the model in the native sklearn format
|
||||
filename = 'lr_model.pkl'
|
||||
pickle.dump(lr_model, open(filename, 'wb'))
|
||||
##
|
||||
# load the model into memory
|
||||
loaded_model = pickle.load(open(filename, 'rb'))
|
||||
|
||||
# log and register the model using MLflow scikit-learn API
|
||||
mlflow.set_tracking_uri("postgresql+psycopg2://airflow:airflow@postgres/mlruns")
|
||||
reg_model_name = "SklearnLinearRegression"
|
||||
print("--")
|
||||
mlflow.sklearn.log_model(loaded_model, "sk_learn",
|
||||
serialization_format="cloudpickle",
|
||||
registered_model_name=reg_model_name)
|
||||
42
ee/recommendation/scripts/processing.py
Normal file
42
ee/recommendation/scripts/processing.py
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
import time
|
||||
import argparse
|
||||
from core import features
|
||||
from utils import pg_client
|
||||
import multiprocessing as mp
|
||||
from decouple import config
|
||||
import asyncio
|
||||
import pandas
|
||||
|
||||
|
||||
def features_ch(q):
|
||||
q.put(features.get_features_clickhouse())
|
||||
|
||||
def features_pg(q):
|
||||
q.put(features.get_features_postgres())
|
||||
|
||||
def get_features():
|
||||
#mp.set_start_method('spawn')
|
||||
#q = mp.Queue()
|
||||
#p1 = mp.Process(target=features_ch, args=(q,))
|
||||
#p1.start()
|
||||
pg_features = features.get_features_postgres()
|
||||
ch_features = []#p1.join()
|
||||
return [pg_features, ch_features]
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description='Gets and process data from Postgres and ClickHouse.')
|
||||
parser.add_argument('--batch_size', type=int, required=True, help='--batch_size max size of columns per file to be saved in opt/airflow/cache')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if __name__ == '__main__':
|
||||
asyncio.run(pg_client.init())
|
||||
print(args)
|
||||
t1 = time.time()
|
||||
data = get_features()
|
||||
#print(data)
|
||||
cache_dir = config("data_dir", default=f"/opt/airflow/cache")
|
||||
for d in data[0]:
|
||||
pandas.DataFrame(d).to_csv(f'{cache_dir}/tmp-{hash(time.time())}', sep=',')
|
||||
t2 = time.time()
|
||||
print(f'DONE! information retrieved in {t2-t1: .2f} seconds')
|
||||
41
ee/recommendation/scripts/task.py
Normal file
41
ee/recommendation/scripts/task.py
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
import time
|
||||
import argparse
|
||||
from decouple import config
|
||||
from core import recommendation_model
|
||||
|
||||
import pandas
|
||||
import json
|
||||
import os
|
||||
|
||||
|
||||
def transform_dict_string(s_dicts):
|
||||
data = list()
|
||||
for s_dict in s_dicts:
|
||||
data.append(json.loads(s_dict.replace("'", '"').replace('None','null').replace('False','false')))
|
||||
return data
|
||||
|
||||
def process_file(file_name):
|
||||
return pandas.read_csv(file_name, sep=",")
|
||||
|
||||
|
||||
def read_batches():
|
||||
base_dir = config('dir_path', default='/opt/airflow/cache')
|
||||
files = os.listdir(base_dir)
|
||||
for file in files:
|
||||
yield process_file(f'{base_dir}/{file}')
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description='Handle machine learning inputs.')
|
||||
parser.add_argument('--mode', choices=['train', 'test'], required=True, help='--mode sets the model in train or test mode')
|
||||
parser.add_argument('--kernel', default='linear', help='--kernel set the kernel to be used for SVM')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if __name__ == '__main__':
|
||||
print(args)
|
||||
t1 = time.time()
|
||||
buff = read_batches()
|
||||
for b in buff:
|
||||
print(b.head())
|
||||
t2 = time.time()
|
||||
print(f'DONE! information retrieved in {t2-t1: .2f} seconds')
|
||||
54
ee/recommendation/scripts/utils/ch_client.py
Normal file
54
ee/recommendation/scripts/utils/ch_client.py
Normal file
|
|
@ -0,0 +1,54 @@
|
|||
import logging
|
||||
|
||||
import clickhouse_driver
|
||||
from decouple import config
|
||||
|
||||
logging.basicConfig(level=config("LOGLEVEL", default=logging.INFO))
|
||||
|
||||
settings = {}
|
||||
if config('ch_timeout', cast=int, default=-1) > 0:
|
||||
logging.info(f"CH-max_execution_time set to {config('ch_timeout')}s")
|
||||
settings = {**settings, "max_execution_time": config('ch_timeout', cast=int)}
|
||||
|
||||
if config('ch_receive_timeout', cast=int, default=-1) > 0:
|
||||
logging.info(f"CH-receive_timeout set to {config('ch_receive_timeout')}s")
|
||||
settings = {**settings, "receive_timeout": config('ch_receive_timeout', cast=int)}
|
||||
|
||||
|
||||
class ClickHouseClient:
|
||||
__client = None
|
||||
|
||||
def __init__(self):
|
||||
self.__client = clickhouse_driver.Client(host=config("ch_host"),
|
||||
database="default",
|
||||
port=config("ch_port", cast=int),
|
||||
settings=settings) \
|
||||
if self.__client is None else self.__client
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def execute(self, query, params=None, **args):
|
||||
try:
|
||||
results = self.__client.execute(query=query, params=params, with_column_types=True, **args)
|
||||
keys = tuple(x for x, y in results[1])
|
||||
return [dict(zip(keys, i)) for i in results[0]]
|
||||
except Exception as err:
|
||||
logging.error("--------- CH QUERY EXCEPTION -----------")
|
||||
logging.error(self.format(query=query, params=params))
|
||||
logging.error("--------------------")
|
||||
raise err
|
||||
|
||||
def insert(self, query, params=None, **args):
|
||||
return self.__client.execute(query=query, params=params, **args)
|
||||
|
||||
def client(self):
|
||||
return self.__client
|
||||
|
||||
def format(self, query, params):
|
||||
if params is None:
|
||||
return query
|
||||
return self.__client.substitute_params(query, params, self.__client.connection.context)
|
||||
|
||||
def __exit__(self, *args):
|
||||
pass
|
||||
166
ee/recommendation/scripts/utils/pg_client.py
Normal file
166
ee/recommendation/scripts/utils/pg_client.py
Normal file
|
|
@ -0,0 +1,166 @@
|
|||
import logging
|
||||
import time
|
||||
from threading import Semaphore
|
||||
|
||||
import psycopg2
|
||||
import psycopg2.extras
|
||||
from decouple import config
|
||||
from psycopg2 import pool
|
||||
|
||||
logging.basicConfig(level=config("LOGLEVEL", default=logging.INFO))
|
||||
logging.getLogger('apscheduler').setLevel(config("LOGLEVEL", default=logging.INFO))
|
||||
|
||||
_PG_CONFIG = {"host": config("pg_host"),
|
||||
"database": config("pg_dbname"),
|
||||
"user": config("pg_user"),
|
||||
"password": config("pg_password"),
|
||||
"port": config("pg_port", cast=int),
|
||||
"application_name": config("APP_NAME", default="PY")}
|
||||
PG_CONFIG = dict(_PG_CONFIG)
|
||||
if config("PG_TIMEOUT", cast=int, default=0) > 0:
|
||||
PG_CONFIG["options"] = f"-c statement_timeout={config('PG_TIMEOUT', cast=int) * 1000}"
|
||||
|
||||
|
||||
class ORThreadedConnectionPool(psycopg2.pool.ThreadedConnectionPool):
|
||||
def __init__(self, minconn, maxconn, *args, **kwargs):
|
||||
self._semaphore = Semaphore(maxconn)
|
||||
super().__init__(minconn, maxconn, *args, **kwargs)
|
||||
|
||||
def getconn(self, *args, **kwargs):
|
||||
self._semaphore.acquire()
|
||||
try:
|
||||
return super().getconn(*args, **kwargs)
|
||||
except psycopg2.pool.PoolError as e:
|
||||
if str(e) == "connection pool is closed":
|
||||
make_pool()
|
||||
raise e
|
||||
|
||||
def putconn(self, *args, **kwargs):
|
||||
try:
|
||||
super().putconn(*args, **kwargs)
|
||||
self._semaphore.release()
|
||||
except psycopg2.pool.PoolError as e:
|
||||
if str(e) == "trying to put unkeyed connection":
|
||||
print("!!! trying to put unkeyed connection")
|
||||
print(f"env-PG_POOL:{config('PG_POOL', default=None)}")
|
||||
return
|
||||
raise e
|
||||
|
||||
|
||||
postgreSQL_pool: ORThreadedConnectionPool = None
|
||||
|
||||
RETRY_MAX = config("PG_RETRY_MAX", cast=int, default=50)
|
||||
RETRY_INTERVAL = config("PG_RETRY_INTERVAL", cast=int, default=2)
|
||||
RETRY = 0
|
||||
|
||||
|
||||
def make_pool():
|
||||
if not config('PG_POOL', cast=bool, default=True):
|
||||
return
|
||||
global postgreSQL_pool
|
||||
global RETRY
|
||||
if postgreSQL_pool is not None:
|
||||
try:
|
||||
postgreSQL_pool.closeall()
|
||||
except (Exception, psycopg2.DatabaseError) as error:
|
||||
logging.error("Error while closing all connexions to PostgreSQL", error)
|
||||
try:
|
||||
postgreSQL_pool = ORThreadedConnectionPool(config("PG_MINCONN", cast=int, default=20),
|
||||
config("PG_MAXCONN", cast=int, default=80),
|
||||
**PG_CONFIG)
|
||||
if (postgreSQL_pool):
|
||||
logging.info("Connection pool created successfully")
|
||||
except (Exception, psycopg2.DatabaseError) as error:
|
||||
logging.error("Error while connecting to PostgreSQL", error)
|
||||
if RETRY < RETRY_MAX:
|
||||
RETRY += 1
|
||||
logging.info(f"waiting for {RETRY_INTERVAL}s before retry n°{RETRY}")
|
||||
time.sleep(RETRY_INTERVAL)
|
||||
make_pool()
|
||||
else:
|
||||
raise error
|
||||
|
||||
|
||||
class PostgresClient:
|
||||
connection = None
|
||||
cursor = None
|
||||
long_query = False
|
||||
unlimited_query = False
|
||||
|
||||
def __init__(self, long_query=False, unlimited_query=False):
|
||||
self.long_query = long_query
|
||||
self.unlimited_query = unlimited_query
|
||||
if unlimited_query:
|
||||
long_config = dict(_PG_CONFIG)
|
||||
long_config["application_name"] += "-UNLIMITED"
|
||||
self.connection = psycopg2.connect(**long_config)
|
||||
elif long_query:
|
||||
long_config = dict(_PG_CONFIG)
|
||||
long_config["application_name"] += "-LONG"
|
||||
long_config["options"] = f"-c statement_timeout=" \
|
||||
f"{config('pg_long_timeout', cast=int, default=5 * 60) * 1000}"
|
||||
self.connection = psycopg2.connect(**long_config)
|
||||
elif not config('PG_POOL', cast=bool, default=True):
|
||||
single_config = dict(_PG_CONFIG)
|
||||
single_config["application_name"] += "-NOPOOL"
|
||||
single_config["options"] = f"-c statement_timeout={config('PG_TIMEOUT', cast=int, default=30) * 1000}"
|
||||
self.connection = psycopg2.connect(**single_config)
|
||||
else:
|
||||
self.connection = postgreSQL_pool.getconn()
|
||||
|
||||
def __enter__(self):
|
||||
if self.cursor is None:
|
||||
self.cursor = self.connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
|
||||
self.cursor.recreate = self.recreate_cursor
|
||||
return self.cursor
|
||||
|
||||
def __exit__(self, *args):
|
||||
try:
|
||||
self.connection.commit()
|
||||
self.cursor.close()
|
||||
if self.long_query or self.unlimited_query:
|
||||
self.connection.close()
|
||||
except Exception as error:
|
||||
logging.error("Error while committing/closing PG-connection", error)
|
||||
if str(error) == "connection already closed" \
|
||||
and not self.long_query \
|
||||
and not self.unlimited_query \
|
||||
and config('PG_POOL', cast=bool, default=True):
|
||||
logging.info("Recreating the connexion pool")
|
||||
make_pool()
|
||||
else:
|
||||
raise error
|
||||
finally:
|
||||
if config('PG_POOL', cast=bool, default=True) \
|
||||
and not self.long_query \
|
||||
and not self.unlimited_query:
|
||||
postgreSQL_pool.putconn(self.connection)
|
||||
|
||||
def recreate_cursor(self, rollback=False):
|
||||
if rollback:
|
||||
try:
|
||||
self.connection.rollback()
|
||||
except Exception as error:
|
||||
logging.error("Error while rollbacking connection for recreation", error)
|
||||
try:
|
||||
self.cursor.close()
|
||||
except Exception as error:
|
||||
logging.error("Error while closing cursor for recreation", error)
|
||||
self.cursor = None
|
||||
return self.__enter__()
|
||||
|
||||
|
||||
async def init():
|
||||
logging.info(f">PG_POOL:{config('PG_POOL', default=None)}")
|
||||
if config('PG_POOL', cast=bool, default=True):
|
||||
make_pool()
|
||||
|
||||
|
||||
async def terminate():
|
||||
global postgreSQL_pool
|
||||
if postgreSQL_pool is not None:
|
||||
try:
|
||||
postgreSQL_pool.closeall()
|
||||
logging.info("Closed all connexions to PostgreSQL")
|
||||
except (Exception, psycopg2.DatabaseError) as error:
|
||||
logging.error("Error while closing all connexions to PostgreSQL", error)
|
||||
11
ee/recommendation/signals.sql
Normal file
11
ee/recommendation/signals.sql
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
CREATE TABLE IF NOT EXISTS frontend_signals
|
||||
(
|
||||
project_id bigint NOT NULL,
|
||||
user_id text NOT NULL,
|
||||
timestamp bigint NOT NULL,
|
||||
action text NOT NULL,
|
||||
source text NOT NULL,
|
||||
category text NOT NULL,
|
||||
data json
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS frontend_signals_user_id_idx ON frontend_signals (user_id);
|
||||
20
ee/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql
Normal file
20
ee/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
BEGIN;
|
||||
CREATE OR REPLACE FUNCTION openreplay_version()
|
||||
RETURNS text AS
|
||||
$$
|
||||
SELECT 'v1.10.0-ee'
|
||||
$$ LANGUAGE sql IMMUTABLE;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS frontend_signals
|
||||
(
|
||||
project_id bigint NOT NULL,
|
||||
user_id integer NOT NULL references users (user_id) ON DELETE CASCADE,
|
||||
timestamp bigint NOT NULL,
|
||||
action text NOT NULL,
|
||||
source text NOT NULL,
|
||||
category text NOT NULL,
|
||||
data jsonb
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS frontend_signals_user_id_idx ON frontend_signals (user_id);
|
||||
|
||||
COMMIT;
|
||||
|
|
@ -647,6 +647,19 @@ $$
|
|||
CREATE INDEX IF NOT EXISTS user_favorite_sessions_user_id_session_id_idx ON user_favorite_sessions (user_id, session_id);
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS frontend_signals
|
||||
(
|
||||
project_id bigint NOT NULL,
|
||||
user_id text NOT NULL,
|
||||
timestamp bigint NOT NULL,
|
||||
action text NOT NULL,
|
||||
source text NOT NULL,
|
||||
category text NOT NULL,
|
||||
data json
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS frontend_signals_user_id_idx ON frontend_signals (user_id);
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS assigned_sessions
|
||||
(
|
||||
session_id bigint NOT NULL REFERENCES sessions (session_id) ON DELETE CASCADE,
|
||||
|
|
|
|||
|
|
@ -2,13 +2,10 @@ import { configure, addDecorator } from '@storybook/react';
|
|||
import { Provider } from 'react-redux';
|
||||
import store from '../app/store';
|
||||
import { MemoryRouter } from "react-router"
|
||||
import { PlayerProvider } from '../app/player/store'
|
||||
|
||||
const withProvider = (story) => (
|
||||
<Provider store={store}>
|
||||
<PlayerProvider>
|
||||
{ story() }
|
||||
</PlayerProvider>
|
||||
</Provider>
|
||||
)
|
||||
|
||||
|
|
@ -33,4 +30,4 @@ configure(
|
|||
require.context('../app', true, /\.stories\.js$/),
|
||||
],
|
||||
module
|
||||
);
|
||||
);
|
||||
|
|
@ -14,9 +14,13 @@ COPY nginx.conf /etc/nginx/conf.d/default.conf
|
|||
# Default step in docker build
|
||||
FROM nginx:alpine
|
||||
LABEL maintainer=Rajesh<rajesh@openreplay.com>
|
||||
ARG GIT_SHA
|
||||
LABEL GIT_SHA=$GIT_SHA
|
||||
COPY --from=builder /work/public /var/www/openreplay
|
||||
COPY nginx.conf /etc/nginx/conf.d/default.conf
|
||||
|
||||
ENV GIT_SHA=$GIT_SHA
|
||||
|
||||
EXPOSE 8080
|
||||
RUN chown -R nginx:nginx /var/cache/nginx && \
|
||||
chown -R nginx:nginx /var/log/nginx && \
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ import { Notification } from 'UI';
|
|||
import { Loader } from 'UI';
|
||||
import { fetchUserInfo } from 'Duck/user';
|
||||
import withSiteIdUpdater from 'HOCs/withSiteIdUpdater';
|
||||
import WidgetViewPure from 'Components/Dashboard/components/WidgetView';
|
||||
import Header from 'Components/Header/Header';
|
||||
import { fetchList as fetchSiteList } from 'Duck/site';
|
||||
import { fetchList as fetchAnnouncements } from 'Duck/announcements';
|
||||
|
|
@ -37,6 +36,7 @@ const ErrorsPure = lazy(() => import('Components/Errors/Errors'));
|
|||
const FunnelDetailsPure = lazy(() => import('Components/Funnels/FunnelDetails'));
|
||||
const FunnelIssueDetails = lazy(() => import('Components/Funnels/FunnelIssueDetails'));
|
||||
const FunnelPagePure = lazy(() => import('Components/Funnels/FunnelPage'));
|
||||
const MultiviewPure = lazy(() => import('Components/Session_/Multiview/Multiview.tsx'));
|
||||
|
||||
const BugFinder = withSiteIdUpdater(BugFinderPure);
|
||||
const Dashboard = withSiteIdUpdater(DashboardPure);
|
||||
|
|
@ -49,6 +49,7 @@ const Errors = withSiteIdUpdater(ErrorsPure);
|
|||
const FunnelPage = withSiteIdUpdater(FunnelPagePure);
|
||||
const FunnelsDetails = withSiteIdUpdater(FunnelDetailsPure);
|
||||
const FunnelIssue = withSiteIdUpdater(FunnelIssueDetails);
|
||||
const Multiview = withSiteIdUpdater(MultiviewPure)
|
||||
const withSiteId = routes.withSiteId;
|
||||
|
||||
const METRICS_PATH = routes.metrics();
|
||||
|
|
@ -67,6 +68,7 @@ const DASHBOARD_METRIC_DETAILS_PATH = routes.dashboardMetricDetails();
|
|||
// const WIDGET_PATAH = routes.dashboardMetric();
|
||||
const SESSIONS_PATH = routes.sessions();
|
||||
const ASSIST_PATH = routes.assist();
|
||||
const RECORDINGS_PATH = routes.recordings();
|
||||
const ERRORS_PATH = routes.errors();
|
||||
const ERROR_PATH = routes.error();
|
||||
const FUNNEL_PATH = routes.funnels();
|
||||
|
|
@ -80,6 +82,8 @@ const FORGOT_PASSWORD = routes.forgotPassword();
|
|||
const CLIENT_PATH = routes.client();
|
||||
const ONBOARDING_PATH = routes.onboarding();
|
||||
const ONBOARDING_REDIRECT_PATH = routes.onboarding(OB_DEFAULT_TAB);
|
||||
const MULTIVIEW_PATH = routes.multiview();
|
||||
const MULTIVIEW_INDEX_PATH = routes.multiviewIndex();
|
||||
|
||||
@withStore
|
||||
@withRouter
|
||||
|
|
@ -170,8 +174,14 @@ class Router extends React.Component {
|
|||
render() {
|
||||
const { isLoggedIn, jwt, siteId, sites, loading, changePassword, location, existingTenant, onboarding, isEnterprise } = this.props;
|
||||
const siteIdList = sites.map(({ id }) => id).toJS();
|
||||
const hideHeader = (location.pathname && location.pathname.includes('/session/')) || location.pathname.includes('/assist/');
|
||||
const isPlayer = isRoute(SESSION_PATH, location.pathname) || isRoute(LIVE_SESSION_PATH, location.pathname);
|
||||
const hideHeader = (location.pathname && location.pathname.includes('/session/'))
|
||||
|| location.pathname.includes('/assist/')
|
||||
|| location.pathname.includes('multiview');
|
||||
const isPlayer = isRoute(SESSION_PATH, location.pathname)
|
||||
|| isRoute(LIVE_SESSION_PATH, location.pathname)
|
||||
|| isRoute(MULTIVIEW_PATH, location.pathname)
|
||||
|| isRoute(MULTIVIEW_INDEX_PATH, location.pathname);
|
||||
|
||||
const redirectToOnboarding = !onboarding && localStorage.getItem(GLOBAL_HAS_NO_RECORDINGS) === 'true'
|
||||
|
||||
return isLoggedIn ? (
|
||||
|
|
@ -194,6 +204,12 @@ class Router extends React.Component {
|
|||
state: tenantId,
|
||||
});
|
||||
break;
|
||||
case '/integrations/msteams':
|
||||
client.post('integrations/msteams/add', {
|
||||
code: location.search.split('=')[1],
|
||||
state: tenantId,
|
||||
});
|
||||
break;
|
||||
}
|
||||
return <Redirect to={CLIENT_PATH} />;
|
||||
}}
|
||||
|
|
@ -212,7 +228,10 @@ class Router extends React.Component {
|
|||
<Route exact strict path={withSiteId(DASHBOARD_METRIC_CREATE_PATH, siteIdList)} component={Dashboard} />
|
||||
<Route exact strict path={withSiteId(DASHBOARD_METRIC_DETAILS_PATH, siteIdList)} component={Dashboard} />
|
||||
|
||||
<Route exact path={withSiteId(MULTIVIEW_INDEX_PATH, siteIdList)} component={Multiview} />
|
||||
<Route path={withSiteId(MULTIVIEW_PATH, siteIdList)} component={Multiview} />
|
||||
<Route exact strict path={withSiteId(ASSIST_PATH, siteIdList)} component={Assist} />
|
||||
<Route exact strict path={withSiteId(RECORDINGS_PATH, siteIdList)} component={Assist} />
|
||||
<Route exact strict path={withSiteId(ERRORS_PATH, siteIdList)} component={Errors} />
|
||||
<Route exact strict path={withSiteId(ERROR_PATH, siteIdList)} component={Errors} />
|
||||
<Route exact strict path={withSiteId(FUNNEL_PATH, siteIdList)} component={FunnelPage} />
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ const siteIdRequiredPaths = [
|
|||
'/metadata',
|
||||
'/integrations/sentry/events',
|
||||
'/integrations/slack/notify',
|
||||
'/integrations/msteams/notify',
|
||||
'/assignments',
|
||||
'/integration/sources',
|
||||
'/issue_types',
|
||||
|
|
@ -94,7 +95,16 @@ export default class APIClient {
|
|||
) {
|
||||
edp = `${ edp }/${ this.siteId }`
|
||||
}
|
||||
return fetch(edp + path, this.init);
|
||||
return fetch(edp + path, this.init)
|
||||
.then(response => {
|
||||
if (response.ok) {
|
||||
return response
|
||||
} else {
|
||||
throw new Error(
|
||||
`! ${this.init.method} error on ${path}; ${response.status}`
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
get(path, params, options) {
|
||||
|
|
|
|||
44
frontend/app/assets/integrations/teams.svg
Normal file
44
frontend/app/assets/integrations/teams.svg
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="-334.32495 -518.3335 2897.4829 3110.001">
|
||||
<path
|
||||
d="M1554.637 777.5h575.713c54.391 0 98.483 44.092 98.483 98.483v524.398c0 199.901-162.051 361.952-361.952 361.952h-1.711c-199.901.028-361.975-162-362.004-361.901V828.971c.001-28.427 23.045-51.471 51.471-51.471z"
|
||||
fill="#5059C9" />
|
||||
<circle r="233.25" cy="440.583" cx="1943.75" fill="#5059C9" />
|
||||
<circle r="336.917" cy="336.917" cx="1218.083" fill="#7B83EB" />
|
||||
<path
|
||||
d="M1667.323 777.5H717.01c-53.743 1.33-96.257 45.931-95.01 99.676v598.105c-7.505 322.519 247.657 590.16 570.167 598.053 322.51-7.893 577.671-275.534 570.167-598.053V877.176c1.245-53.745-41.268-98.346-95.011-99.676z"
|
||||
fill="#7B83EB" />
|
||||
<path
|
||||
d="M1244 777.5v838.145c-.258 38.435-23.549 72.964-59.09 87.598a91.856 91.856 0 01-35.765 7.257H667.613c-6.738-17.105-12.958-34.21-18.142-51.833a631.287 631.287 0 01-27.472-183.49V877.02c-1.246-53.659 41.198-98.19 94.855-99.52z"
|
||||
opacity=".1" />
|
||||
<path
|
||||
d="M1192.167 777.5v889.978a91.802 91.802 0 01-7.257 35.765c-14.634 35.541-49.163 58.833-87.598 59.09H691.975c-8.812-17.105-17.105-34.21-24.362-51.833-7.257-17.623-12.958-34.21-18.142-51.833a631.282 631.282 0 01-27.472-183.49V877.02c-1.246-53.659 41.198-98.19 94.855-99.52z"
|
||||
opacity=".2" />
|
||||
<path
|
||||
d="M1192.167 777.5v786.312c-.395 52.223-42.632 94.46-94.855 94.855h-447.84A631.282 631.282 0 01622 1475.177V877.02c-1.246-53.659 41.198-98.19 94.855-99.52z"
|
||||
opacity=".2" />
|
||||
<path
|
||||
d="M1140.333 777.5v786.312c-.395 52.223-42.632 94.46-94.855 94.855H649.472A631.282 631.282 0 01622 1475.177V877.02c-1.246-53.659 41.198-98.19 94.855-99.52z"
|
||||
opacity=".2" />
|
||||
<path
|
||||
d="M1244 509.522v163.275c-8.812.518-17.105 1.037-25.917 1.037-8.812 0-17.105-.518-25.917-1.037a284.472 284.472 0 01-51.833-8.293c-104.963-24.857-191.679-98.469-233.25-198.003a288.02 288.02 0 01-16.587-51.833h258.648c52.305.198 94.657 42.549 94.856 94.854z"
|
||||
opacity=".1" />
|
||||
<path
|
||||
d="M1192.167 561.355v111.442a284.472 284.472 0 01-51.833-8.293c-104.963-24.857-191.679-98.469-233.25-198.003h190.228c52.304.198 94.656 42.55 94.855 94.854z"
|
||||
opacity=".2" />
|
||||
<path
|
||||
d="M1192.167 561.355v111.442a284.472 284.472 0 01-51.833-8.293c-104.963-24.857-191.679-98.469-233.25-198.003h190.228c52.304.198 94.656 42.55 94.855 94.854z"
|
||||
opacity=".2" />
|
||||
<path
|
||||
d="M1140.333 561.355v103.148c-104.963-24.857-191.679-98.469-233.25-198.003h138.395c52.305.199 94.656 42.551 94.855 94.855z"
|
||||
opacity=".2" />
|
||||
<linearGradient gradientTransform="matrix(1 0 0 -1 0 2075.333)" y2="394.261" x2="942.234" y1="1683.073" x1="198.099"
|
||||
gradientUnits="userSpaceOnUse" id="a">
|
||||
<stop offset="0" stop-color="#5a62c3" />
|
||||
<stop offset=".5" stop-color="#4d55bd" />
|
||||
<stop offset="1" stop-color="#3940ab" />
|
||||
</linearGradient>
|
||||
<path
|
||||
d="M95.01 466.5h950.312c52.473 0 95.01 42.538 95.01 95.01v950.312c0 52.473-42.538 95.01-95.01 95.01H95.01c-52.473 0-95.01-42.538-95.01-95.01V561.51c0-52.472 42.538-95.01 95.01-95.01z"
|
||||
fill="url(#a)" />
|
||||
<path d="M820.211 828.193h-189.97v517.297h-121.03V828.193H320.123V727.844h500.088z" fill="#FFF" />
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 3.1 KiB |
|
|
@ -1,6 +1,5 @@
|
|||
import React, { useEffect } from 'react';
|
||||
import { Button, Form, Input, SegmentSelection, Checkbox, Message, Link, Icon } from 'UI';
|
||||
import { alertMetrics as metrics } from 'App/constants';
|
||||
import { Button, Form, Input, SegmentSelection, Checkbox, Icon } from 'UI';
|
||||
import { alertConditions as conditions } from 'App/constants';
|
||||
import { client, CLIENT_TABS } from 'App/routes';
|
||||
import { connect } from 'react-redux';
|
||||
|
|
@ -12,319 +11,381 @@ import { fetchTriggerOptions } from 'Duck/alerts';
|
|||
import Select from 'Shared/Select';
|
||||
|
||||
const thresholdOptions = [
|
||||
{ label: '15 minutes', value: 15 },
|
||||
{ label: '30 minutes', value: 30 },
|
||||
{ label: '1 hour', value: 60 },
|
||||
{ label: '2 hours', value: 120 },
|
||||
{ label: '4 hours', value: 240 },
|
||||
{ label: '1 day', value: 1440 },
|
||||
{ label: '15 minutes', value: 15 },
|
||||
{ label: '30 minutes', value: 30 },
|
||||
{ label: '1 hour', value: 60 },
|
||||
{ label: '2 hours', value: 120 },
|
||||
{ label: '4 hours', value: 240 },
|
||||
{ label: '1 day', value: 1440 },
|
||||
];
|
||||
|
||||
const changeOptions = [
|
||||
{ label: 'change', value: 'change' },
|
||||
{ label: '% change', value: 'percent' },
|
||||
{ label: 'change', value: 'change' },
|
||||
{ label: '% change', value: 'percent' },
|
||||
];
|
||||
|
||||
const Circle = ({ text }) => <div className="circle mr-4 w-6 h-6 rounded-full bg-gray-light flex items-center justify-center">{text}</div>;
|
||||
const Circle = ({ text }) => (
|
||||
<div className="circle mr-4 w-6 h-6 rounded-full bg-gray-light flex items-center justify-center">
|
||||
{text}
|
||||
</div>
|
||||
);
|
||||
|
||||
const Section = ({ index, title, description, content }) => (
|
||||
<div className="w-full">
|
||||
<div className="flex items-start">
|
||||
<Circle text={index} />
|
||||
<div>
|
||||
<span className="font-medium">{title}</span>
|
||||
{description && <div className="text-sm color-gray-medium">{description}</div>}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="ml-10">{content}</div>
|
||||
<div className="w-full">
|
||||
<div className="flex items-start">
|
||||
<Circle text={index} />
|
||||
<div>
|
||||
<span className="font-medium">{title}</span>
|
||||
{description && <div className="text-sm color-gray-medium">{description}</div>}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="ml-10">{content}</div>
|
||||
</div>
|
||||
);
|
||||
|
||||
const integrationsRoute = client(CLIENT_TABS.INTEGRATIONS);
|
||||
|
||||
const AlertForm = (props) => {
|
||||
const {
|
||||
instance,
|
||||
slackChannels,
|
||||
webhooks,
|
||||
loading,
|
||||
onDelete,
|
||||
deleting,
|
||||
triggerOptions,
|
||||
metricId,
|
||||
style = { width: '580px', height: '100vh' },
|
||||
} = props;
|
||||
const write = ({ target: { value, name } }) => props.edit({ [name]: value });
|
||||
const writeOption = (e, { name, value }) => props.edit({ [name]: value.value });
|
||||
const onChangeCheck = ({ target: { checked, name } }) => props.edit({ [name]: checked });
|
||||
// const onChangeOption = ({ checked, name }) => props.edit({ [ name ]: checked })
|
||||
// const onChangeCheck = (e) => { console.log(e) }
|
||||
const {
|
||||
instance,
|
||||
slackChannels,
|
||||
msTeamsChannels,
|
||||
webhooks,
|
||||
loading,
|
||||
onDelete,
|
||||
deleting,
|
||||
triggerOptions,
|
||||
style = { width: '580px', height: '100vh' },
|
||||
} = props;
|
||||
const write = ({ target: { value, name } }) => props.edit({ [name]: value });
|
||||
const writeOption = (e, { name, value }) => props.edit({ [name]: value.value });
|
||||
const onChangeCheck = ({ target: { checked, name } }) => props.edit({ [name]: checked });
|
||||
// const onChangeOption = ({ checked, name }) => props.edit({ [ name ]: checked })
|
||||
// const onChangeCheck = (e) => { console.log(e) }
|
||||
|
||||
useEffect(() => {
|
||||
props.fetchTriggerOptions();
|
||||
}, []);
|
||||
useEffect(() => {
|
||||
props.fetchTriggerOptions();
|
||||
}, []);
|
||||
|
||||
const writeQueryOption = (e, { name, value }) => {
|
||||
const { query } = instance;
|
||||
props.edit({ query: { ...query, [name]: value } });
|
||||
};
|
||||
const writeQueryOption = (e, { name, value }) => {
|
||||
const { query } = instance;
|
||||
props.edit({ query: { ...query, [name]: value } });
|
||||
};
|
||||
|
||||
const writeQuery = ({ target: { value, name } }) => {
|
||||
const { query } = instance;
|
||||
props.edit({ query: { ...query, [name]: value } });
|
||||
};
|
||||
const writeQuery = ({ target: { value, name } }) => {
|
||||
const { query } = instance;
|
||||
props.edit({ query: { ...query, [name]: value } });
|
||||
};
|
||||
|
||||
const metric = instance && instance.query.left ? triggerOptions.find((i) => i.value === instance.query.left) : null;
|
||||
const unit = metric ? metric.unit : '';
|
||||
const isThreshold = instance.detectionMethod === 'threshold';
|
||||
const metric =
|
||||
instance && instance.query.left
|
||||
? triggerOptions.find((i) => i.value === instance.query.left)
|
||||
: null;
|
||||
const unit = metric ? metric.unit : '';
|
||||
const isThreshold = instance.detectionMethod === 'threshold';
|
||||
|
||||
return (
|
||||
<Form className={cn('p-6 pb-10', stl.wrapper)} style={style} onSubmit={() => props.onSubmit(instance)} id="alert-form">
|
||||
<div className={cn(stl.content, '-mx-6 px-6 pb-12')}>
|
||||
<input
|
||||
autoFocus={true}
|
||||
className="text-lg border border-gray-light rounded w-full"
|
||||
name="name"
|
||||
style={{ fontSize: '18px', padding: '10px', fontWeight: '600' }}
|
||||
value={instance && instance.name}
|
||||
onChange={write}
|
||||
placeholder="Untiltled Alert"
|
||||
id="name-field"
|
||||
/>
|
||||
<div className="mb-8" />
|
||||
<Section
|
||||
index="1"
|
||||
title={'What kind of alert do you want to set?'}
|
||||
content={
|
||||
<div>
|
||||
<SegmentSelection
|
||||
primary
|
||||
name="detectionMethod"
|
||||
className="my-3"
|
||||
onSelect={(e, { name, value }) => props.edit({ [name]: value })}
|
||||
value={{ value: instance.detectionMethod }}
|
||||
list={[
|
||||
{ name: 'Threshold', value: 'threshold' },
|
||||
{ name: 'Change', value: 'change' },
|
||||
]}
|
||||
/>
|
||||
<div className="text-sm color-gray-medium">
|
||||
{isThreshold && 'Eg. Alert me if memory.avg is greater than 500mb over the past 4 hours.'}
|
||||
{!isThreshold &&
|
||||
'Eg. Alert me if % change of memory.avg is greater than 10% over the past 4 hours compared to the previous 4 hours.'}
|
||||
</div>
|
||||
<div className="my-4" />
|
||||
</div>
|
||||
}
|
||||
/>
|
||||
|
||||
<hr className="my-8" />
|
||||
|
||||
<Section
|
||||
index="2"
|
||||
title="Condition"
|
||||
content={
|
||||
<div>
|
||||
{!isThreshold && (
|
||||
<div className="flex items-center my-3">
|
||||
<label className="w-2/6 flex-shrink-0 font-normal">{'Trigger when'}</label>
|
||||
<Select
|
||||
className="w-4/6"
|
||||
placeholder="change"
|
||||
options={changeOptions}
|
||||
name="change"
|
||||
defaultValue={instance.change}
|
||||
onChange={({ value }) => writeOption(null, { name: 'change', value })}
|
||||
id="change-dropdown"
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div className="flex items-center my-3">
|
||||
<label className="w-2/6 flex-shrink-0 font-normal">{isThreshold ? 'Trigger when' : 'of'}</label>
|
||||
<Select
|
||||
className="w-4/6"
|
||||
placeholder="Select Metric"
|
||||
isSearchable={true}
|
||||
options={triggerOptions}
|
||||
name="left"
|
||||
value={triggerOptions.find((i) => i.value === instance.query.left)}
|
||||
// onChange={ writeQueryOption }
|
||||
onChange={({ value }) => writeQueryOption(null, { name: 'left', value: value.value })}
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div className="flex items-center my-3">
|
||||
<label className="w-2/6 flex-shrink-0 font-normal">{'is'}</label>
|
||||
<div className="w-4/6 flex items-center">
|
||||
<Select
|
||||
placeholder="Select Condition"
|
||||
options={conditions}
|
||||
name="operator"
|
||||
defaultValue={instance.query.operator}
|
||||
// onChange={ writeQueryOption }
|
||||
onChange={({ value }) => writeQueryOption(null, { name: 'operator', value: value.value })}
|
||||
/>
|
||||
{unit && (
|
||||
<>
|
||||
<Input
|
||||
className="px-4"
|
||||
style={{ marginRight: '31px' }}
|
||||
// label={{ basic: true, content: unit }}
|
||||
// labelPosition='right'
|
||||
name="right"
|
||||
value={instance.query.right}
|
||||
onChange={writeQuery}
|
||||
placeholder="E.g. 3"
|
||||
/>
|
||||
<span className="ml-2">{'test'}</span>
|
||||
</>
|
||||
)}
|
||||
{!unit && (
|
||||
<Input
|
||||
wrapperClassName="ml-2"
|
||||
// className="pl-4"
|
||||
name="right"
|
||||
value={instance.query.right}
|
||||
onChange={writeQuery}
|
||||
placeholder="Specify value"
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="flex items-center my-3">
|
||||
<label className="w-2/6 flex-shrink-0 font-normal">{'over the past'}</label>
|
||||
<Select
|
||||
className="w-2/6"
|
||||
placeholder="Select timeframe"
|
||||
options={thresholdOptions}
|
||||
name="currentPeriod"
|
||||
defaultValue={instance.currentPeriod}
|
||||
// onChange={ writeOption }
|
||||
onChange={({ value }) => writeOption(null, { name: 'currentPeriod', value })}
|
||||
/>
|
||||
</div>
|
||||
{!isThreshold && (
|
||||
<div className="flex items-center my-3">
|
||||
<label className="w-2/6 flex-shrink-0 font-normal">{'compared to previous'}</label>
|
||||
<Select
|
||||
className="w-2/6"
|
||||
placeholder="Select timeframe"
|
||||
options={thresholdOptions}
|
||||
name="previousPeriod"
|
||||
defaultValue={instance.previousPeriod}
|
||||
// onChange={ writeOption }
|
||||
onChange={({ value }) => writeOption(null, { name: 'previousPeriod', value })}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
}
|
||||
/>
|
||||
|
||||
<hr className="my-8" />
|
||||
|
||||
<Section
|
||||
index="3"
|
||||
title="Notify Through"
|
||||
description="You'll be noticed in app notifications. Additionally opt in to receive alerts on:"
|
||||
content={
|
||||
<div className="flex flex-col">
|
||||
<div className="flex items-center my-4">
|
||||
<Checkbox
|
||||
name="slack"
|
||||
className="mr-8"
|
||||
type="checkbox"
|
||||
checked={instance.slack}
|
||||
onClick={onChangeCheck}
|
||||
label="Slack"
|
||||
/>
|
||||
<Checkbox
|
||||
name="email"
|
||||
type="checkbox"
|
||||
checked={instance.email}
|
||||
onClick={onChangeCheck}
|
||||
className="mr-8"
|
||||
label="Email"
|
||||
/>
|
||||
<Checkbox name="webhook" type="checkbox" checked={instance.webhook} onClick={onChangeCheck} label="Webhook" />
|
||||
</div>
|
||||
|
||||
{instance.slack && (
|
||||
<div className="flex items-start my-4">
|
||||
<label className="w-2/6 flex-shrink-0 font-normal pt-2">{'Slack'}</label>
|
||||
<div className="w-4/6">
|
||||
<DropdownChips
|
||||
fluid
|
||||
selected={instance.slackInput}
|
||||
options={slackChannels}
|
||||
placeholder="Select Channel"
|
||||
onChange={(selected) => props.edit({ slackInput: selected })}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{instance.email && (
|
||||
<div className="flex items-start my-4">
|
||||
<label className="w-2/6 flex-shrink-0 font-normal pt-2">{'Email'}</label>
|
||||
<div className="w-4/6">
|
||||
<DropdownChips
|
||||
textFiled
|
||||
validate={validateEmail}
|
||||
selected={instance.emailInput}
|
||||
placeholder="Type and press Enter key"
|
||||
onChange={(selected) => props.edit({ emailInput: selected })}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{instance.webhook && (
|
||||
<div className="flex items-start my-4">
|
||||
<label className="w-2/6 flex-shrink-0 font-normal pt-2">{'Webhook'}</label>
|
||||
<DropdownChips
|
||||
fluid
|
||||
selected={instance.webhookInput}
|
||||
options={webhooks}
|
||||
placeholder="Select Webhook"
|
||||
onChange={(selected) => props.edit({ webhookInput: selected })}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
}
|
||||
/>
|
||||
return (
|
||||
<Form
|
||||
className={cn('p-6 pb-10', stl.wrapper)}
|
||||
style={style}
|
||||
onSubmit={() => props.onSubmit(instance)}
|
||||
id="alert-form"
|
||||
>
|
||||
<div className={cn(stl.content, '-mx-6 px-6 pb-12')}>
|
||||
<input
|
||||
autoFocus={true}
|
||||
className="text-lg border border-gray-light rounded w-full"
|
||||
name="name"
|
||||
style={{ fontSize: '18px', padding: '10px', fontWeight: '600' }}
|
||||
value={instance && instance.name}
|
||||
onChange={write}
|
||||
placeholder="Untiltled Alert"
|
||||
id="name-field"
|
||||
/>
|
||||
<div className="mb-8" />
|
||||
<Section
|
||||
index="1"
|
||||
title={'What kind of alert do you want to set?'}
|
||||
content={
|
||||
<div>
|
||||
<SegmentSelection
|
||||
primary
|
||||
name="detectionMethod"
|
||||
className="my-3"
|
||||
onSelect={(e, { name, value }) => props.edit({ [name]: value })}
|
||||
value={{ value: instance.detectionMethod }}
|
||||
list={[
|
||||
{ name: 'Threshold', value: 'threshold' },
|
||||
{ name: 'Change', value: 'change' },
|
||||
]}
|
||||
/>
|
||||
<div className="text-sm color-gray-medium">
|
||||
{isThreshold &&
|
||||
'Eg. Alert me if memory.avg is greater than 500mb over the past 4 hours.'}
|
||||
{!isThreshold &&
|
||||
'Eg. Alert me if % change of memory.avg is greater than 10% over the past 4 hours compared to the previous 4 hours.'}
|
||||
</div>
|
||||
<div className="my-4" />
|
||||
</div>
|
||||
}
|
||||
/>
|
||||
|
||||
<div className="flex items-center justify-between absolute bottom-0 left-0 right-0 p-6 border-t z-10 bg-white">
|
||||
<div className="flex items-center">
|
||||
<Button loading={loading} variant="primary" type="submit" disabled={loading || !instance.validate()} id="submit-button">
|
||||
{instance.exists() ? 'Update' : 'Create'}
|
||||
</Button>
|
||||
<div className="mx-1" />
|
||||
<Button onClick={props.onClose}>Cancel</Button>
|
||||
<hr className="my-8" />
|
||||
|
||||
<Section
|
||||
index="2"
|
||||
title="Condition"
|
||||
content={
|
||||
<div>
|
||||
{!isThreshold && (
|
||||
<div className="flex items-center my-3">
|
||||
<label className="w-2/6 flex-shrink-0 font-normal">{'Trigger when'}</label>
|
||||
<Select
|
||||
className="w-4/6"
|
||||
placeholder="change"
|
||||
options={changeOptions}
|
||||
name="change"
|
||||
defaultValue={instance.change}
|
||||
onChange={({ value }) => writeOption(null, { name: 'change', value })}
|
||||
id="change-dropdown"
|
||||
/>
|
||||
</div>
|
||||
<div>
|
||||
{instance.exists() && (
|
||||
<Button hover variant="text" loading={deleting} type="button" onClick={() => onDelete(instance)} id="trash-button">
|
||||
<Icon name="trash" color="gray-medium" size="18" />
|
||||
</Button>
|
||||
)}
|
||||
)}
|
||||
|
||||
<div className="flex items-center my-3">
|
||||
<label className="w-2/6 flex-shrink-0 font-normal">
|
||||
{isThreshold ? 'Trigger when' : 'of'}
|
||||
</label>
|
||||
<Select
|
||||
className="w-4/6"
|
||||
placeholder="Select Metric"
|
||||
isSearchable={true}
|
||||
options={triggerOptions}
|
||||
name="left"
|
||||
value={triggerOptions.find((i) => i.value === instance.query.left)}
|
||||
// onChange={ writeQueryOption }
|
||||
onChange={({ value }) =>
|
||||
writeQueryOption(null, { name: 'left', value: value.value })
|
||||
}
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div className="flex items-center my-3">
|
||||
<label className="w-2/6 flex-shrink-0 font-normal">{'is'}</label>
|
||||
<div className="w-4/6 flex items-center">
|
||||
<Select
|
||||
placeholder="Select Condition"
|
||||
options={conditions}
|
||||
name="operator"
|
||||
defaultValue={instance.query.operator}
|
||||
// onChange={ writeQueryOption }
|
||||
onChange={({ value }) =>
|
||||
writeQueryOption(null, { name: 'operator', value: value.value })
|
||||
}
|
||||
/>
|
||||
{unit && (
|
||||
<>
|
||||
<Input
|
||||
className="px-4"
|
||||
style={{ marginRight: '31px' }}
|
||||
// label={{ basic: true, content: unit }}
|
||||
// labelPosition='right'
|
||||
name="right"
|
||||
value={instance.query.right}
|
||||
onChange={writeQuery}
|
||||
placeholder="E.g. 3"
|
||||
/>
|
||||
<span className="ml-2">{'test'}</span>
|
||||
</>
|
||||
)}
|
||||
{!unit && (
|
||||
<Input
|
||||
wrapperClassName="ml-2"
|
||||
// className="pl-4"
|
||||
name="right"
|
||||
value={instance.query.right}
|
||||
onChange={writeQuery}
|
||||
placeholder="Specify value"
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="flex items-center my-3">
|
||||
<label className="w-2/6 flex-shrink-0 font-normal">{'over the past'}</label>
|
||||
<Select
|
||||
className="w-2/6"
|
||||
placeholder="Select timeframe"
|
||||
options={thresholdOptions}
|
||||
name="currentPeriod"
|
||||
defaultValue={instance.currentPeriod}
|
||||
// onChange={ writeOption }
|
||||
onChange={({ value }) => writeOption(null, { name: 'currentPeriod', value })}
|
||||
/>
|
||||
</div>
|
||||
{!isThreshold && (
|
||||
<div className="flex items-center my-3">
|
||||
<label className="w-2/6 flex-shrink-0 font-normal">
|
||||
{'compared to previous'}
|
||||
</label>
|
||||
<Select
|
||||
className="w-2/6"
|
||||
placeholder="Select timeframe"
|
||||
options={thresholdOptions}
|
||||
name="previousPeriod"
|
||||
defaultValue={instance.previousPeriod}
|
||||
// onChange={ writeOption }
|
||||
onChange={({ value }) => writeOption(null, { name: 'previousPeriod', value })}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</Form>
|
||||
);
|
||||
}
|
||||
/>
|
||||
|
||||
<hr className="my-8" />
|
||||
|
||||
<Section
|
||||
index="3"
|
||||
title="Notify Through"
|
||||
description="You'll be noticed in app notifications. Additionally opt in to receive alerts on:"
|
||||
content={
|
||||
<div className="flex flex-col">
|
||||
<div className="flex items-center my-4">
|
||||
<Checkbox
|
||||
name="slack"
|
||||
className="mr-8"
|
||||
type="checkbox"
|
||||
checked={instance.slack}
|
||||
onClick={onChangeCheck}
|
||||
label="Slack"
|
||||
/>
|
||||
<Checkbox
|
||||
name="msteams"
|
||||
className="mr-8"
|
||||
type="checkbox"
|
||||
checked={instance.msteams}
|
||||
onClick={onChangeCheck}
|
||||
label="MS Teams"
|
||||
/>
|
||||
<Checkbox
|
||||
name="email"
|
||||
type="checkbox"
|
||||
checked={instance.email}
|
||||
onClick={onChangeCheck}
|
||||
className="mr-8"
|
||||
label="Email"
|
||||
/>
|
||||
<Checkbox
|
||||
name="webhook"
|
||||
type="checkbox"
|
||||
checked={instance.webhook}
|
||||
onClick={onChangeCheck}
|
||||
label="Webhook"
|
||||
/>
|
||||
</div>
|
||||
|
||||
{instance.slack && (
|
||||
<div className="flex items-start my-4">
|
||||
<label className="w-2/6 flex-shrink-0 font-normal pt-2">{'Slack'}</label>
|
||||
<div className="w-4/6">
|
||||
<DropdownChips
|
||||
fluid
|
||||
selected={instance.slackInput}
|
||||
options={slackChannels}
|
||||
placeholder="Select Channel"
|
||||
onChange={(selected) => props.edit({ slackInput: selected })}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
{instance.msteams && (
|
||||
<div className="flex items-start my-4">
|
||||
<label className="w-2/6 flex-shrink-0 font-normal pt-2">{'MS Teams'}</label>
|
||||
<div className="w-4/6">
|
||||
<DropdownChips
|
||||
fluid
|
||||
selected={instance.msteamsInput}
|
||||
options={msTeamsChannels}
|
||||
placeholder="Select Channel"
|
||||
onChange={(selected) => props.edit({ msteamsInput: selected })}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{instance.email && (
|
||||
<div className="flex items-start my-4">
|
||||
<label className="w-2/6 flex-shrink-0 font-normal pt-2">{'Email'}</label>
|
||||
<div className="w-4/6">
|
||||
<DropdownChips
|
||||
textFiled
|
||||
validate={validateEmail}
|
||||
selected={instance.emailInput}
|
||||
placeholder="Type and press Enter key"
|
||||
onChange={(selected) => props.edit({ emailInput: selected })}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{instance.webhook && (
|
||||
<div className="flex items-start my-4">
|
||||
<label className="w-2/6 flex-shrink-0 font-normal pt-2">{'Webhook'}</label>
|
||||
<DropdownChips
|
||||
fluid
|
||||
selected={instance.webhookInput}
|
||||
options={webhooks}
|
||||
placeholder="Select Webhook"
|
||||
onChange={(selected) => props.edit({ webhookInput: selected })}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
}
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div className="flex items-center justify-between absolute bottom-0 left-0 right-0 p-6 border-t z-10 bg-white">
|
||||
<div className="flex items-center">
|
||||
<Button
|
||||
loading={loading}
|
||||
variant="primary"
|
||||
type="submit"
|
||||
disabled={loading || !instance.validate()}
|
||||
id="submit-button"
|
||||
>
|
||||
{instance.exists() ? 'Update' : 'Create'}
|
||||
</Button>
|
||||
<div className="mx-1" />
|
||||
<Button onClick={props.onClose}>Cancel</Button>
|
||||
</div>
|
||||
<div>
|
||||
{instance.exists() && (
|
||||
<Button
|
||||
hover
|
||||
variant="text"
|
||||
loading={deleting}
|
||||
type="button"
|
||||
onClick={() => onDelete(instance)}
|
||||
id="trash-button"
|
||||
>
|
||||
<Icon name="trash" color="gray-medium" size="18" />
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</Form>
|
||||
);
|
||||
};
|
||||
|
||||
export default connect(
|
||||
(state) => ({
|
||||
instance: state.getIn(['alerts', 'instance']),
|
||||
triggerOptions: state.getIn(['alerts', 'triggerOptions']),
|
||||
loading: state.getIn(['alerts', 'saveRequest', 'loading']),
|
||||
deleting: state.getIn(['alerts', 'removeRequest', 'loading']),
|
||||
}),
|
||||
{ fetchTriggerOptions }
|
||||
(state) => ({
|
||||
instance: state.getIn(['alerts', 'instance']),
|
||||
triggerOptions: state.getIn(['alerts', 'triggerOptions']),
|
||||
loading: state.getIn(['alerts', 'saveRequest', 'loading']),
|
||||
deleting: state.getIn(['alerts', 'removeRequest', 'loading']),
|
||||
}),
|
||||
{ fetchTriggerOptions }
|
||||
)(AlertForm);
|
||||
|
|
|
|||
|
|
@ -17,6 +17,8 @@ const AlertItem = props => {
|
|||
|
||||
const getNotifyChannel = alert => {
|
||||
let str = '';
|
||||
if (alert.msteams)
|
||||
str = 'MS Teams'
|
||||
if (alert.slack)
|
||||
str = 'Slack';
|
||||
if (alert.email)
|
||||
|
|
@ -36,7 +38,7 @@ const AlertItem = props => {
|
|||
className={cn(stl.wrapper, 'p-4 py-6 relative group cursor-pointer', { [stl.active]: active })}
|
||||
onClick={onEdit}
|
||||
id="alert-item"
|
||||
>
|
||||
>
|
||||
<AlertTypeLabel type={alert.detectionMethod} />
|
||||
<div className="capitalize font-medium">{alert.name}</div>
|
||||
<div className="mt-2 text-sm color-gray-medium">
|
||||
|
|
|
|||
|
|
@ -1,26 +1,67 @@
|
|||
import React from 'react';
|
||||
import LiveSessionList from 'Shared/LiveSessionList';
|
||||
import LiveSessionSearch from 'Shared/LiveSessionSearch';
|
||||
import cn from 'classnames'
|
||||
import { withRouter, RouteComponentProps } from 'react-router-dom';
|
||||
import withPageTitle from 'HOCs/withPageTitle';
|
||||
import withPermissions from 'HOCs/withPermissions'
|
||||
// import SessionSearch from '../shared/SessionSearch';
|
||||
// import MainSearchBar from '../shared/MainSearchBar';
|
||||
import AssistSearchField from './AssistSearchField';
|
||||
import withPermissions from 'HOCs/withPermissions';
|
||||
import AssistRouter from './AssistRouter';
|
||||
import { SideMenuitem } from 'UI';
|
||||
import { withSiteId, assist, recordings } from 'App/routes';
|
||||
import { connect } from 'react-redux';
|
||||
|
||||
function Assist() {
|
||||
return (
|
||||
<div className="page-margin container-90 flex relative">
|
||||
interface Props extends RouteComponentProps {
|
||||
siteId: string;
|
||||
history: any;
|
||||
isEnterprise: boolean;
|
||||
}
|
||||
|
||||
function Assist(props: Props) {
|
||||
const { history, siteId, isEnterprise } = props;
|
||||
const isAssist = history.location.pathname.includes('assist');
|
||||
const isRecords = history.location.pathname.includes('recordings');
|
||||
|
||||
const redirect = (path: string) => {
|
||||
history.push(withSiteId(path, siteId));
|
||||
};
|
||||
if (isEnterprise) {
|
||||
return (
|
||||
<div className="page-margin container-90 flex relative">
|
||||
<div className="flex-1 flex">
|
||||
<div className={cn("w-full mx-auto")} style={{ maxWidth: '1300px'}}>
|
||||
<AssistSearchField />
|
||||
<LiveSessionSearch />
|
||||
<div className="my-4" />
|
||||
<LiveSessionList />
|
||||
<div className="side-menu">
|
||||
<SideMenuitem
|
||||
active={isAssist}
|
||||
id="menu-assist"
|
||||
title="Live Sessions"
|
||||
iconName="play-circle-bold"
|
||||
onClick={() => redirect(assist())}
|
||||
/>
|
||||
<SideMenuitem
|
||||
active={isRecords}
|
||||
id="menu-rec"
|
||||
title="Recordings"
|
||||
iconName="record-circle"
|
||||
onClick={() => redirect(recordings())}
|
||||
/>
|
||||
</div>
|
||||
<div className="side-menu-margined w-full">
|
||||
<AssistRouter />
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="page-margin container-90 flex relative">
|
||||
<AssistRouter />
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
export default withPageTitle("Assist - OpenReplay")(withPermissions(['ASSIST_LIVE'])(Assist));
|
||||
const Cont = connect((state: any) => ({
|
||||
isEnterprise:
|
||||
state.getIn(['user', 'account', 'edition']) === 'ee' ||
|
||||
state.getIn(['user', 'authDetails', 'edition']) === 'ee',
|
||||
}))(Assist);
|
||||
|
||||
export default withPageTitle('Assist - OpenReplay')(
|
||||
withPermissions(['ASSIST_LIVE'])(withRouter(Cont))
|
||||
);
|
||||
|
|
|
|||
39
frontend/app/components/Assist/AssistRouter.tsx
Normal file
39
frontend/app/components/Assist/AssistRouter.tsx
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
import React from 'react';
|
||||
import { Switch, Route } from 'react-router';
|
||||
import { withRouter, RouteComponentProps } from 'react-router-dom';
|
||||
|
||||
import {
|
||||
assist,
|
||||
recordings,
|
||||
withSiteId,
|
||||
} from 'App/routes';
|
||||
import AssistView from './AssistView'
|
||||
import Recordings from './RecordingsList/Recordings'
|
||||
|
||||
interface Props extends RouteComponentProps {
|
||||
match: any;
|
||||
}
|
||||
|
||||
function AssistRouter(props: Props) {
|
||||
const {
|
||||
match: {
|
||||
params: { siteId },
|
||||
},
|
||||
} = props;
|
||||
|
||||
return (
|
||||
<div className="w-full">
|
||||
<Switch>
|
||||
<Route exact strict path={withSiteId(assist(), siteId)}>
|
||||
<AssistView />
|
||||
</Route>
|
||||
|
||||
<Route exact strict path={withSiteId(recordings(), siteId)}>
|
||||
<Recordings />
|
||||
</Route>
|
||||
</Switch>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export default withRouter(AssistRouter);
|
||||
|
|
@ -16,7 +16,7 @@ function AssistSearchField(props: Props) {
|
|||
const hasEvents = props.appliedFilter.filters.filter((i: any) => i.isEvent).size > 0;
|
||||
const hasFilters = props.appliedFilter.filters.filter((i: any) => !i.isEvent).size > 0;
|
||||
return (
|
||||
<div className="flex items-center">
|
||||
<div className="flex items-center w-full">
|
||||
<div style={{ width: '60%', marginRight: '10px' }}>
|
||||
<SessionSearchField fetchFilterSearch={props.fetchFilterSearch} addFilterByKeyAndValue={props.addFilterByKeyAndValue} />
|
||||
</div>
|
||||
|
|
|
|||
17
frontend/app/components/Assist/AssistView.tsx
Normal file
17
frontend/app/components/Assist/AssistView.tsx
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
import React from 'react';
|
||||
import LiveSessionList from 'Shared/LiveSessionList';
|
||||
import LiveSessionSearch from 'Shared/LiveSessionSearch';
|
||||
import AssistSearchField from './AssistSearchField';
|
||||
|
||||
function AssistView() {
|
||||
return (
|
||||
<div className="w-full mx-auto" style={{ maxWidth: '1300px'}}>
|
||||
<AssistSearchField />
|
||||
<LiveSessionSearch />
|
||||
<div className="my-4" />
|
||||
<LiveSessionList />
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
export default AssistView;
|
||||
|
|
@ -6,7 +6,7 @@ import stl from './chatWindow.module.css';
|
|||
import ChatControls from '../ChatControls/ChatControls';
|
||||
import Draggable from 'react-draggable';
|
||||
import type { LocalStream } from 'Player';
|
||||
import { toggleVideoLocalStream } from 'Player'
|
||||
import { PlayerContext } from 'App/components/Session/playerContext';
|
||||
|
||||
export interface Props {
|
||||
incomeStream: MediaStream[] | null;
|
||||
|
|
@ -17,6 +17,10 @@ export interface Props {
|
|||
}
|
||||
|
||||
function ChatWindow({ userId, incomeStream, localStream, endCall, isPrestart }: Props) {
|
||||
const { player } = React.useContext(PlayerContext)
|
||||
|
||||
const toggleVideoLocalStream = player.assistManager.toggleVideoLocalStream;
|
||||
|
||||
const [localVideoEnabled, setLocalVideoEnabled] = useState(false);
|
||||
const [anyRemoteEnabled, setRemoteEnabled] = useState(false);
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,103 @@
|
|||
import { useObserver } from 'mobx-react-lite';
|
||||
import React from 'react';
|
||||
import { Button, Modal, Form, Icon, Checkbox, Input } from 'UI';
|
||||
|
||||
interface Props {
|
||||
show: boolean;
|
||||
title: string;
|
||||
closeHandler?: () => void;
|
||||
onSave: (title: string) => void;
|
||||
}
|
||||
function EditRecordingModal(props: Props) {
|
||||
const { show, closeHandler, title, onSave } = props;
|
||||
const [text, setText] = React.useState(title)
|
||||
|
||||
React.useEffect(() => {
|
||||
const handleEsc = (e: any) => e.key === 'Escape' && closeHandler?.()
|
||||
document.addEventListener("keydown", handleEsc, false);
|
||||
return () => {
|
||||
document.removeEventListener("keydown", handleEsc, false);
|
||||
}
|
||||
}, [])
|
||||
|
||||
const write = ({ target: { value, name } }: any) => setText(value)
|
||||
|
||||
const save = () => {
|
||||
onSave(text)
|
||||
}
|
||||
return useObserver(() => (
|
||||
<Modal open={ show } onClose={closeHandler}>
|
||||
<Modal.Header className="flex items-center justify-between">
|
||||
<div>{ 'Edit Recording' }</div>
|
||||
<div onClick={ closeHandler }>
|
||||
<Icon
|
||||
color="gray-dark"
|
||||
size="14"
|
||||
name="close"
|
||||
/>
|
||||
</div>
|
||||
</Modal.Header>
|
||||
|
||||
<Modal.Content>
|
||||
<Form onSubmit={save}>
|
||||
<Form.Field>
|
||||
<label>{'Title:'}</label>
|
||||
<Input
|
||||
className=""
|
||||
name="name"
|
||||
value={ text }
|
||||
onChange={write}
|
||||
placeholder="Title"
|
||||
maxLength={100}
|
||||
autoFocus
|
||||
/>
|
||||
</Form.Field>
|
||||
|
||||
{/* <Form.Field>
|
||||
<label>{'Description:'}</label>
|
||||
<Input
|
||||
className=""
|
||||
type="textarea"
|
||||
name="description"
|
||||
value={ dashboard.description }
|
||||
onChange={write}
|
||||
placeholder="Description"
|
||||
maxLength={300}
|
||||
autoFocus={!focusTitle}
|
||||
/>
|
||||
</Form.Field>
|
||||
|
||||
<Form.Field>
|
||||
<div className="flex items-center">
|
||||
<Checkbox
|
||||
name="isPublic"
|
||||
className="font-medium mr-3"
|
||||
type="checkbox"
|
||||
checked={ dashboard.isPublic }
|
||||
onClick={ () => dashboard.update({ 'isPublic': !dashboard.isPublic }) }
|
||||
/>
|
||||
<div className="flex items-center cursor-pointer" onClick={ () => dashboard.update({ 'isPublic': !dashboard.isPublic }) }>
|
||||
<Icon name="user-friends" size="16" />
|
||||
<span className="ml-2"> Team can see and edit the dashboard.</span>
|
||||
</div>
|
||||
</div>
|
||||
</Form.Field> */}
|
||||
</Form>
|
||||
</Modal.Content>
|
||||
<Modal.Footer>
|
||||
<div className="-mx-2 px-2">
|
||||
<Button
|
||||
variant="primary"
|
||||
onClick={ save }
|
||||
className="float-left mr-2"
|
||||
>
|
||||
Save
|
||||
</Button>
|
||||
<Button className="mr-2" onClick={ closeHandler }>{ 'Cancel' }</Button>
|
||||
</div>
|
||||
</Modal.Footer>
|
||||
</Modal>
|
||||
));
|
||||
}
|
||||
|
||||
export default EditRecordingModal;
|
||||
44
frontend/app/components/Assist/RecordingsList/Recordings.tsx
Normal file
44
frontend/app/components/Assist/RecordingsList/Recordings.tsx
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
import React from 'react';
|
||||
import { PageTitle } from 'UI';
|
||||
import Select from 'Shared/Select';
|
||||
import RecordingsSearch from './RecordingsSearch';
|
||||
import RecordingsList from './RecordingsList';
|
||||
import { useStore } from 'App/mstore';
|
||||
import { connect } from 'react-redux';
|
||||
|
||||
function Recordings({ userId }: { userId: string }) {
|
||||
const { recordingsStore } = useStore();
|
||||
|
||||
const recordingsOwner = [
|
||||
{ value: '0', label: 'All Recordings' },
|
||||
{ value: userId, label: 'My Recordings' },
|
||||
];
|
||||
|
||||
return (
|
||||
<div style={{ maxWidth: '1300px', margin: 'auto' }} className="bg-white rounded py-4 border">
|
||||
<div className="flex items-center mb-4 justify-between px-6">
|
||||
<div className="flex items-baseline mr-3">
|
||||
<PageTitle title="Recordings" />
|
||||
</div>
|
||||
<div className="ml-auto flex items-center">
|
||||
<Select
|
||||
name="recsOwner"
|
||||
plain
|
||||
right
|
||||
options={recordingsOwner}
|
||||
onChange={({ value }) => recordingsStore.setUserId(value.value)}
|
||||
defaultValue={recordingsOwner[0].value}
|
||||
/>
|
||||
<div className="ml-4 w-1/4" style={{ minWidth: 300 }}>
|
||||
<RecordingsSearch />
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<RecordingsList />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export default connect((state: any) => ({ userId: state.getIn(['user', 'account', 'id']) }))(
|
||||
Recordings
|
||||
);
|
||||
|
|
@ -0,0 +1,73 @@
|
|||
import { observer } from 'mobx-react-lite';
|
||||
import React from 'react';
|
||||
import { NoContent, Pagination, Icon } from 'UI';
|
||||
import { useStore } from 'App/mstore';
|
||||
import { filterList } from 'App/utils';
|
||||
import { sliceListPerPage } from 'App/utils';
|
||||
import RecordsListItem from './RecordsListItem';
|
||||
|
||||
function RecordingsList() {
|
||||
const { recordingsStore } = useStore();
|
||||
const [shownRecordings, setRecordings] = React.useState([]);
|
||||
const recordings = recordingsStore.recordings;
|
||||
const recordsSearch = recordingsStore.search;
|
||||
|
||||
React.useEffect(() => {
|
||||
recordingsStore.fetchRecordings();
|
||||
}, []);
|
||||
|
||||
React.useEffect(() => {
|
||||
setRecordings(filterList(recordings, recordsSearch, ['createdBy', 'name']));
|
||||
}, [recordsSearch]);
|
||||
|
||||
const list = recordsSearch !== '' ? shownRecordings : recordings;
|
||||
const lenth = list.length;
|
||||
|
||||
return (
|
||||
<NoContent
|
||||
show={lenth === 0}
|
||||
title={
|
||||
<div className="flex flex-col items-center justify-center">
|
||||
<Icon name="no-recordings" size={80} color="figmaColors-accent-secondary" />
|
||||
<div className="text-center text-gray-600 my-4">
|
||||
{recordsSearch !== ''
|
||||
? 'No matching results'
|
||||
: "No recordings available yet."}
|
||||
</div>
|
||||
</div>
|
||||
}
|
||||
>
|
||||
<div className="mt-3 border-b">
|
||||
<div className="grid grid-cols-12 py-2 font-medium px-6">
|
||||
<div className="col-span-8">Name</div>
|
||||
<div className="col-span-4">Last Modified</div>
|
||||
</div>
|
||||
|
||||
{sliceListPerPage(list, recordingsStore.page - 1, recordingsStore.pageSize).map(
|
||||
(record: any) => (
|
||||
<React.Fragment key={record.recordId}>
|
||||
<RecordsListItem record={record} />
|
||||
</React.Fragment>
|
||||
)
|
||||
)}
|
||||
</div>
|
||||
|
||||
<div className="w-full flex items-center justify-between pt-4 px-6">
|
||||
<div className="text-disabled-text">
|
||||
Showing{' '}
|
||||
<span className="font-semibold">{Math.min(list.length, recordingsStore.pageSize)}</span>{' '}
|
||||
out of <span className="font-semibold">{list.length}</span> Recording
|
||||
</div>
|
||||
<Pagination
|
||||
page={recordingsStore.page}
|
||||
totalPages={Math.ceil(lenth / recordingsStore.pageSize)}
|
||||
onPageChange={(page) => recordingsStore.updatePage(page)}
|
||||
limit={recordingsStore.pageSize}
|
||||
debounceRequest={100}
|
||||
/>
|
||||
</div>
|
||||
</NoContent>
|
||||
);
|
||||
}
|
||||
|
||||
export default observer(RecordingsList);
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue