diff --git a/.github/workflows/alerts-ee.yaml b/.github/workflows/alerts-ee.yaml index 1667ff22b..10482a7cb 100644 --- a/.github/workflows/alerts-ee.yaml +++ b/.github/workflows/alerts-ee.yaml @@ -10,8 +10,21 @@ on: branches: - api-v1.10.0 paths: - - ee/api/** - - api/** + - "ee/api/**" + - "api/**" + - "!api/.gitignore" + - "!api/routers" + - "!api/app.py" + - "!api/*-dev.sh" + - "!api/requirements.txt" + - "!api/requirements-crons.txt" + - "!ee/api/.gitignore" + - "!ee/api/routers" + - "!ee/api/app.py" + - "!ee/api/*-dev.sh" + - "!ee/api/requirements.txt" + - "!ee/api/requirements-crons.txt" + name: Build and Deploy Alerts EE @@ -21,115 +34,115 @@ jobs: runs-on: ubuntu-latest steps: - - name: Checkout - uses: actions/checkout@v2 - with: - # We need to diff with old commit - # to see which workers got changed. - fetch-depth: 2 + - name: Checkout + uses: actions/checkout@v2 + with: + # We need to diff with old commit + # to see which workers got changed. + fetch-depth: 2 - - name: Docker login - run: | - docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}" + - name: Docker login + run: | + docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}" - - uses: azure/k8s-set-context@v1 - with: - method: kubeconfig - kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret. - id: setcontext + - uses: azure/k8s-set-context@v1 + with: + method: kubeconfig + kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret. + id: setcontext - # Caching docker images - - uses: satackey/action-docker-layer-caching@v0.0.11 - # Ignore the failure of a step and avoid terminating the job. - continue-on-error: true + # Caching docker images + - uses: satackey/action-docker-layer-caching@v0.0.11 + # Ignore the failure of a step and avoid terminating the job. + continue-on-error: true - - name: Building and Pushing api image - id: build-image - env: - DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} - IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee - ENVIRONMENT: staging - run: | - skip_security_checks=${{ github.event.inputs.skip_security_checks }} - cd api - PUSH_IMAGE=0 bash -x ./build_alerts.sh ee - [[ "x$skip_security_checks" == "xtrue" ]] || { - curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./ + - name: Building and Pushing api image + id: build-image + env: + DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} + IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee + ENVIRONMENT: staging + run: | + skip_security_checks=${{ github.event.inputs.skip_security_checks }} + cd api + PUSH_IMAGE=0 bash -x ./build_alerts.sh ee + [[ "x$skip_security_checks" == "xtrue" ]] || { + curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./ + images=("alerts") + for image in ${images[*]};do + ./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG + done + err_code=$? + [[ $err_code -ne 0 ]] && { + exit $err_code + } + } && { + echo "Skipping Security Checks" + } images=("alerts") for image in ${images[*]};do - ./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG + docker push $DOCKER_REPO/$image:$IMAGE_TAG + done + - name: Creating old image input + run: | + # + # Create yaml with existing image tags + # + kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\ + tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt + + echo > /tmp/image_override.yaml + + for line in `cat /tmp/image_tag.txt`; + do + image_array=($(echo "$line" | tr ':' '\n')) + cat <> /tmp/image_override.yaml + ${image_array[0]}: + image: + # We've to strip off the -ee, as helm will append it. + tag: `echo ${image_array[1]} | cut -d '-' -f 1` + EOF done - err_code=$? - [[ $err_code -ne 0 ]] && { - exit $err_code - } - } && { - echo "Skipping Security Checks" - } - images=("alerts") - for image in ${images[*]};do - docker push $DOCKER_REPO/$image:$IMAGE_TAG - done - - name: Creating old image input - run: | - # - # Create yaml with existing image tags - # - kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\ - tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt - echo > /tmp/image_override.yaml + - name: Deploy to kubernetes + run: | + cd scripts/helmcharts/ + + ## Update secerts + sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml + sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml + sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml + sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml + sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml + sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml + sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml + + # Update changed image tag + sed -i "/alerts/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml + + cat /tmp/image_override.yaml + # Deploy command + mv openreplay/charts/{ingress-nginx,alerts,quickwit} /tmp + rm -rf openreplay/charts/* + mv /tmp/{ingress-nginx,alerts,quickwit} openreplay/charts/ + helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f - + env: + DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} + # We're not passing -ee flag, because helm will add that. + IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} + ENVIRONMENT: staging - for line in `cat /tmp/image_tag.txt`; - do - image_array=($(echo "$line" | tr ':' '\n')) - cat <> /tmp/image_override.yaml - ${image_array[0]}: - image: - # We've to strip off the -ee, as helm will append it. - tag: `echo ${image_array[1]} | cut -d '-' -f 1` - EOF - done - - - name: Deploy to kubernetes - run: | - cd scripts/helmcharts/ - - ## Update secerts - sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml - sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml - sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml - sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml - sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml - sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml - sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml - - # Update changed image tag - sed -i "/alerts/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml - - cat /tmp/image_override.yaml - # Deploy command - mv openreplay/charts/{ingress-nginx,alerts,quickwit} /tmp - rm -rf openreplay/charts/* - mv /tmp/{ingress-nginx,alerts,quickwit} openreplay/charts/ - helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f - - env: - DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} - # We're not passing -ee flag, because helm will add that. - IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} - ENVIRONMENT: staging - - - name: Alert slack - if: ${{ failure() }} - uses: rtCamp/action-slack-notify@v2 - env: - SLACK_CHANNEL: ee - SLACK_TITLE: "Failed ${{ github.workflow }}" - SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff' - SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }} - SLACK_USERNAME: "OR Bot" - SLACK_MESSAGE: 'Build failed :bomb:' + - name: Alert slack + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_CHANNEL: ee + SLACK_TITLE: "Failed ${{ github.workflow }}" + SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff' + SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }} + SLACK_USERNAME: "OR Bot" + SLACK_MESSAGE: 'Build failed :bomb:' # - name: Debug Job # # if: ${{ failure() }} diff --git a/.github/workflows/alerts.yaml b/.github/workflows/alerts.yaml index 85d25f498..539cc5e65 100644 --- a/.github/workflows/alerts.yaml +++ b/.github/workflows/alerts.yaml @@ -10,7 +10,13 @@ on: branches: - api-v1.10.0 paths: - - api/** + - "api/**" + - "!api/.gitignore" + - "!api/routers" + - "!api/app.py" + - "!api/*-dev.sh" + - "!api/requirements.txt" + - "!api/requirements-crons.txt" name: Build and Deploy Alerts @@ -20,112 +26,112 @@ jobs: runs-on: ubuntu-latest steps: - - name: Checkout - uses: actions/checkout@v2 - with: - # We need to diff with old commit - # to see which workers got changed. - fetch-depth: 2 + - name: Checkout + uses: actions/checkout@v2 + with: + # We need to diff with old commit + # to see which workers got changed. + fetch-depth: 2 - - name: Docker login - run: | - docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}" + - name: Docker login + run: | + docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}" - - uses: azure/k8s-set-context@v1 - with: - method: kubeconfig - kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret. - id: setcontext + - uses: azure/k8s-set-context@v1 + with: + method: kubeconfig + kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret. + id: setcontext - # Caching docker images - - uses: satackey/action-docker-layer-caching@v0.0.11 - # Ignore the failure of a step and avoid terminating the job. - continue-on-error: true + # Caching docker images + - uses: satackey/action-docker-layer-caching@v0.0.11 + # Ignore the failure of a step and avoid terminating the job. + continue-on-error: true - - name: Building and Pushing Alerts image - id: build-image - env: - DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }} - IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} - ENVIRONMENT: staging - run: | - skip_security_checks=${{ github.event.inputs.skip_security_checks }} - cd api - PUSH_IMAGE=0 bash -x ./build_alerts.sh - [[ "x$skip_security_checks" == "xtrue" ]] || { - curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./ + - name: Building and Pushing Alerts image + id: build-image + env: + DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }} + IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} + ENVIRONMENT: staging + run: | + skip_security_checks=${{ github.event.inputs.skip_security_checks }} + cd api + PUSH_IMAGE=0 bash -x ./build_alerts.sh + [[ "x$skip_security_checks" == "xtrue" ]] || { + curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./ + images=("alerts") + for image in ${images[*]};do + ./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG + done + err_code=$? + [[ $err_code -ne 0 ]] && { + exit $err_code + } + } && { + echo "Skipping Security Checks" + } images=("alerts") for image in ${images[*]};do - ./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG + docker push $DOCKER_REPO/$image:$IMAGE_TAG + done + - name: Creating old image input + run: | + # + # Create yaml with existing image tags + # + kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\ + tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt + + echo > /tmp/image_override.yaml + + for line in `cat /tmp/image_tag.txt`; + do + image_array=($(echo "$line" | tr ':' '\n')) + cat <> /tmp/image_override.yaml + ${image_array[0]}: + image: + tag: ${image_array[1]} + EOF done - err_code=$? - [[ $err_code -ne 0 ]] && { - exit $err_code - } - } && { - echo "Skipping Security Checks" - } - images=("alerts") - for image in ${images[*]};do - docker push $DOCKER_REPO/$image:$IMAGE_TAG - done - - name: Creating old image input - run: | - # - # Create yaml with existing image tags - # - kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\ - tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt - echo > /tmp/image_override.yaml + - name: Deploy to kubernetes + run: | + cd scripts/helmcharts/ + + ## Update secerts + sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml + sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml + sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml + sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml + sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml + sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml + + # Update changed image tag + sed -i "/alerts/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml + + cat /tmp/image_override.yaml + # Deploy command + mv openreplay/charts/{ingress-nginx,alerts,quickwit} /tmp + rm -rf openreplay/charts/* + mv /tmp/{ingress-nginx,alerts,quickwit} openreplay/charts/ + helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f - + env: + DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }} + IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} + ENVIRONMENT: staging - for line in `cat /tmp/image_tag.txt`; - do - image_array=($(echo "$line" | tr ':' '\n')) - cat <> /tmp/image_override.yaml - ${image_array[0]}: - image: - tag: ${image_array[1]} - EOF - done - - - name: Deploy to kubernetes - run: | - cd scripts/helmcharts/ - - ## Update secerts - sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml - sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml - sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml - sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml - sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml - sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml - - # Update changed image tag - sed -i "/alerts/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml - - cat /tmp/image_override.yaml - # Deploy command - mv openreplay/charts/{ingress-nginx,alerts,quickwit} /tmp - rm -rf openreplay/charts/* - mv /tmp/{ingress-nginx,alerts,quickwit} openreplay/charts/ - helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f - - env: - DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }} - IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} - ENVIRONMENT: staging - - - name: Alert slack - if: ${{ failure() }} - uses: rtCamp/action-slack-notify@v2 - env: - SLACK_CHANNEL: foss - SLACK_TITLE: "Failed ${{ github.workflow }}" - SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff' - SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }} - SLACK_USERNAME: "OR Bot" - SLACK_MESSAGE: 'Build failed :bomb:' + - name: Alert slack + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_CHANNEL: foss + SLACK_TITLE: "Failed ${{ github.workflow }}" + SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff' + SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }} + SLACK_USERNAME: "OR Bot" + SLACK_MESSAGE: 'Build failed :bomb:' # - name: Debug Job # if: ${{ failure() }} diff --git a/.github/workflows/api-ee.yaml b/.github/workflows/api-ee.yaml index 1405f6e81..b2a31f276 100644 --- a/.github/workflows/api-ee.yaml +++ b/.github/workflows/api-ee.yaml @@ -10,8 +10,18 @@ on: branches: - api-v1.10.0 paths: - - ee/api/** - - api/** + - "ee/api/**" + - "api/**" + - "!api/.gitignore" + - "!api/app_alerts.py" + - "!api/*-dev.sh" + - "!api/requirements-*.txt" + - "!ee/api/.gitignore" + - "!ee/api/app_alerts.py" + - "!ee/api/app_crons.py" + - "!ee/api/*-dev.sh" + - "!ee/api/requirements-*.txt" + name: Build and Deploy Chalice EE diff --git a/.github/workflows/api.yaml b/.github/workflows/api.yaml index 91d6c45a1..26d59ff87 100644 --- a/.github/workflows/api.yaml +++ b/.github/workflows/api.yaml @@ -8,9 +8,13 @@ on: default: 'false' push: branches: - - dev + - api-v1.10.0 paths: - - api/** + - "api/**" + - "!api/.gitignore" + - "!api/app_alerts.py" + - "!api/*-dev.sh" + - "!api/requirements-*.txt" name: Build and Deploy Chalice diff --git a/.github/workflows/assist-ee.yaml b/.github/workflows/assist-ee.yaml new file mode 100644 index 000000000..fc237d371 --- /dev/null +++ b/.github/workflows/assist-ee.yaml @@ -0,0 +1,120 @@ +# This action will push the assist changes to aws +on: + workflow_dispatch: + push: + branches: + - dev + paths: + - "ee/utilities/**" + - "utilities/**" + - "!utilities/.gitignore" + - "!utilities/*-dev.sh" + +name: Build and Deploy Assist EE + +jobs: + deploy: + name: Deploy + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + # We need to diff with old commit + # to see which workers got changed. + fetch-depth: 2 + + - name: Docker login + run: | + docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}" + + - uses: azure/k8s-set-context@v1 + with: + method: kubeconfig + kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret. + id: setcontext + + - name: Building and Pushing Assist image + id: build-image + env: + DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} + IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee + ENVIRONMENT: staging + run: | + skip_security_checks=${{ github.event.inputs.skip_security_checks }} + cd utilities + PUSH_IMAGE=0 bash -x ./build.sh ee + [[ "x$skip_security_checks" == "xtrue" ]] || { + curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./ + images=("assist") + for image in ${images[*]};do + ./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG + done + err_code=$? + [[ $err_code -ne 0 ]] && { + exit $err_code + } + } && { + echo "Skipping Security Checks" + } + images=("assist") + for image in ${images[*]};do + docker push $DOCKER_REPO/$image:$IMAGE_TAG + done + - name: Creating old image input + run: | + # + # Create yaml with existing image tags + # + kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\ + tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt + + echo > /tmp/image_override.yaml + + for line in `cat /tmp/image_tag.txt`; + do + image_array=($(echo "$line" | tr ':' '\n')) + cat <> /tmp/image_override.yaml + ${image_array[0]}: + image: + # We've to strip off the -ee, as helm will append it. + tag: `echo ${image_array[1]} | cut -d '-' -f 1` + EOF + done + - name: Deploy to kubernetes + run: | + cd scripts/helmcharts/ + + ## Update secerts + sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml + sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml + sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml + sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml + sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml + sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml + sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml + + # Update changed image tag + sed -i "/assist/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml + + cat /tmp/image_override.yaml + # Deploy command + mv openreplay/charts/{ingress-nginx,chalice,quickwit} /tmp + rm -rf openreplay/charts/* + mv /tmp/{ingress-nginx,chalice,quickwit} openreplay/charts/ + helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f - + env: + DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} + # We're not passing -ee flag, because helm will add that. + IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} + ENVIRONMENT: staging + + # - name: Debug Job + # if: ${{ failure() }} + # uses: mxschmitt/action-tmate@v3 + # env: + # DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} + # IMAGE_TAG: ${{ github.sha }} + # ENVIRONMENT: staging + # diff --git a/.github/workflows/assist.yaml b/.github/workflows/assist.yaml new file mode 100644 index 000000000..65ca0348c --- /dev/null +++ b/.github/workflows/assist.yaml @@ -0,0 +1,119 @@ +# This action will push the assist changes to aws +on: + workflow_dispatch: + push: + branches: + - dev + paths: + - "utilities/**" + - "!utilities/.gitignore" + - "!utilities/*-dev.sh" + +name: Build and Deploy Assist + +jobs: + deploy: + name: Deploy + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + # We need to diff with old commit + # to see which workers got changed. + fetch-depth: 2 + + - name: Docker login + run: | + docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}" + + - uses: azure/k8s-set-context@v1 + with: + method: kubeconfig + kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret. + id: setcontext + + - name: Building and Pushing Assist image + id: build-image + env: + DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }} + IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} + ENVIRONMENT: staging + run: | + skip_security_checks=${{ github.event.inputs.skip_security_checks }} + cd utilities + PUSH_IMAGE=0 bash -x ./build.sh + [[ "x$skip_security_checks" == "xtrue" ]] || { + curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./ + images=("assist") + for image in ${images[*]};do + ./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG + done + err_code=$? + [[ $err_code -ne 0 ]] && { + exit $err_code + } + } && { + echo "Skipping Security Checks" + } + images=("assist") + for image in ${images[*]};do + docker push $DOCKER_REPO/$image:$IMAGE_TAG + done + - name: Creating old image input + run: | + # + # Create yaml with existing image tags + # + kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\ + tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt + + echo > /tmp/image_override.yaml + + for line in `cat /tmp/image_tag.txt`; + do + image_array=($(echo "$line" | tr ':' '\n')) + cat <> /tmp/image_override.yaml + ${image_array[0]}: + image: + # We've to strip off the -ee, as helm will append it. + tag: `echo ${image_array[1]} | cut -d '-' -f 1` + EOF + done + - name: Deploy to kubernetes + run: | + cd scripts/helmcharts/ + + ## Update secerts + sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml + sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml + sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml + sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml + sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml + sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml + sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.OSS_LICENSE_KEY }}\"/g" vars.yaml + + # Update changed image tag + sed -i "/assist/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml + + cat /tmp/image_override.yaml + # Deploy command + mv openreplay/charts/{ingress-nginx,chalice,quickwit} /tmp + rm -rf openreplay/charts/* + mv /tmp/{ingress-nginx,chalice,quickwit} openreplay/charts/ + helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f - + env: + DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }} + # We're not passing -ee flag, because helm will add that. + IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} + ENVIRONMENT: staging + + # - name: Debug Job + # if: ${{ failure() }} + # uses: mxschmitt/action-tmate@v3 + # env: + # DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }} + # IMAGE_TAG: ${{ github.sha }} + # ENVIRONMENT: staging + # diff --git a/.github/workflows/crons-ee.yaml b/.github/workflows/crons-ee.yaml index 0792edd9a..762dae33e 100644 --- a/.github/workflows/crons-ee.yaml +++ b/.github/workflows/crons-ee.yaml @@ -10,8 +10,20 @@ on: branches: - api-v1.10.0 paths: - - ee/api/** - - api/** + - "ee/api/**" + - "api/**" + - "!api/.gitignore" + - "!api/app.py" + - "!api/app_alerts.py" + - "!api/*-dev.sh" + - "!api/requirements.txt" + - "!api/requirements-alerts.txt" + - "!ee/api/.gitignore" + - "!ee/api/app.py" + - "!ee/api/app_alerts.py" + - "!ee/api/*-dev.sh" + - "!ee/api/requirements.txt" + - "!ee/api/requirements-crons.txt" name: Build and Deploy Crons EE @@ -110,9 +122,9 @@ jobs: cat /tmp/image_override.yaml # Deploy command - mv openreplay/charts/{ingress-nginx,crons,quickwit} /tmp + mv openreplay/charts/{ingress-nginx,utilities,quickwit} /tmp rm -rf openreplay/charts/* - mv /tmp/{ingress-nginx,crons,quickwit} openreplay/charts/ + mv /tmp/{ingress-nginx,utilities,quickwit} openreplay/charts/ helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f - env: DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} diff --git a/.github/workflows/peers-ee.yaml b/.github/workflows/peers-ee.yaml new file mode 100644 index 000000000..5db7436da --- /dev/null +++ b/.github/workflows/peers-ee.yaml @@ -0,0 +1,69 @@ +# This action will push the peers changes to aws +on: + workflow_dispatch: + push: + branches: + - dev + paths: + - "ee/peers/**" + - "peers/**" + - "!peers/.gitignore" + - "!peers/*-dev.sh" + +name: Build and Deploy Peers + +jobs: + deploy: + name: Deploy + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + # We need to diff with old commit + # to see which workers got changed. + fetch-depth: 2 + + - name: Docker login + run: | + docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}" + + - uses: azure/k8s-set-context@v1 + with: + method: kubeconfig + kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret. + id: setcontext + + - name: Building and Pushing api image + id: build-image + env: + DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} + IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} + ENVIRONMENT: staging + run: | + cd peers + PUSH_IMAGE=1 bash build.sh ee + - name: Deploy to kubernetes + run: | + cd scripts/helmcharts/ + sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.EE_REGISTRY_URL }}\"#g" vars.yaml + sed -i "s#minio_access_key.*#minio_access_key: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\" #g" vars.yaml + sed -i "s#minio_secret_key.*#minio_secret_key: \"${{ secrets.EE_MINIO_SECRET_KEY }}\" #g" vars.yaml + sed -i "s#domain_name.*#domain_name: \"ee.openreplay.com\" #g" vars.yaml + sed -i "s#kubeconfig.*#kubeconfig_path: ${KUBECONFIG}#g" vars.yaml + sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml + bash kube-install.sh --app peers + env: + DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} + IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} + ENVIRONMENT: staging + + # - name: Debug Job + # if: ${{ failure() }} + # uses: mxschmitt/action-tmate@v3 + # env: + # DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} + # IMAGE_TAG: ${{ github.sha }} + # ENVIRONMENT: staging + # diff --git a/.github/workflows/utilities.yaml b/.github/workflows/peers.yaml similarity index 90% rename from .github/workflows/utilities.yaml rename to .github/workflows/peers.yaml index 4a4fad5d3..7b2a715d8 100644 --- a/.github/workflows/utilities.yaml +++ b/.github/workflows/peers.yaml @@ -1,13 +1,15 @@ -# This action will push the utilities changes to aws +# This action will push the peers changes to aws on: workflow_dispatch: push: branches: - dev paths: - - utilities/** + - "peers/**" + - "!peers/.gitignore" + - "!peers/*-dev.sh" -name: Build and Deploy Utilities +name: Build and Deploy Peers jobs: deploy: @@ -39,7 +41,7 @@ jobs: IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} ENVIRONMENT: staging run: | - cd utilities + cd peers PUSH_IMAGE=1 bash build.sh - name: Deploy to kubernetes run: | @@ -50,7 +52,7 @@ jobs: sed -i "s#domain_name.*#domain_name: \"foss.openreplay.com\" #g" vars.yaml sed -i "s#kubeconfig.*#kubeconfig_path: ${KUBECONFIG}#g" vars.yaml sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml - bash kube-install.sh --app utilities + bash kube-install.sh --app peers env: DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }} IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} diff --git a/.github/workflows/sourcemaps-reader.yaml b/.github/workflows/sourcemaps-reader.yaml index 2d8aed9c2..095a70784 100644 --- a/.github/workflows/sourcemaps-reader.yaml +++ b/.github/workflows/sourcemaps-reader.yaml @@ -5,7 +5,9 @@ on: branches: - dev paths: - - sourcemap-reader/** + - "sourcemap-reader/**" + - "!sourcemap-reader/.gitignore" + - "!sourcemap-reader/*-dev.sh" name: Build and Deploy sourcemap-reader diff --git a/README.md b/README.md index 363c64d1c..05608a3c1 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ OpenReplay is a session replay suite you can host yourself, that lets you see what users do on your web app, helping you troubleshoot issues faster. It's the only open-source alternative to products such as FullStory and LogRocket. - **Session replay.** OpenReplay replays what users do, but not only. It also shows you what went under the hood, how your website or app behaves by capturing network activity, console logs, JS errors, store actions/state, page speed metrics, cpu/memory usage and much more. -- **Low footprint**. With a ~18KB (.gz) tracker that asynchronously sends minimal data for a very limited impact on performance. +- **Low footprint**. With a ~19KB (.gz) tracker that asynchronously sends minimal data for a very limited impact on performance. - **Self-hosted**. No more security compliance checks, 3rd-parties processing user data. Everything OpenReplay captures stays in your cloud for a complete control over your data. - **Privacy controls**. Fine-grained security features for sanitizing user data. - **Easy deploy**. With support of major public cloud providers (AWS, GCP, Azure, DigitalOcean). diff --git a/api/app_alerts.py b/api/app_alerts.py index 7107423de..111bad2a1 100644 --- a/api/app_alerts.py +++ b/api/app_alerts.py @@ -53,3 +53,10 @@ async def stop_server(): await shutdown() import os, signal os.kill(1, signal.SIGTERM) + + +if config("LOCAL_DEV", default=False, cast=bool): + @app.get('/private/trigger', tags=["private"]) + async def trigger_main_cron(): + logging.info("Triggering main cron") + alerts_processor.process() diff --git a/api/chalicelib/core/alerts_processor.py b/api/chalicelib/core/alerts_processor.py index 8049b2f39..4babe64ce 100644 --- a/api/chalicelib/core/alerts_processor.py +++ b/api/chalicelib/core/alerts_processor.py @@ -49,10 +49,12 @@ LeftToDb = { schemas.AlertColumn.errors__4xx_5xx__count: { "table": "events.resources INNER JOIN public.sessions USING(session_id)", "formula": "COUNT(session_id)", "condition": "status/100!=2"}, - schemas.AlertColumn.errors__4xx__count: {"table": "events.resources INNER JOIN public.sessions USING(session_id)", - "formula": "COUNT(session_id)", "condition": "status/100=4"}, - schemas.AlertColumn.errors__5xx__count: {"table": "events.resources INNER JOIN public.sessions USING(session_id)", - "formula": "COUNT(session_id)", "condition": "status/100=5"}, + schemas.AlertColumn.errors__4xx__count: { + "table": "events.resources INNER JOIN public.sessions USING(session_id)", + "formula": "COUNT(session_id)", "condition": "status/100=4"}, + schemas.AlertColumn.errors__5xx__count: { + "table": "events.resources INNER JOIN public.sessions USING(session_id)", + "formula": "COUNT(session_id)", "condition": "status/100=5"}, schemas.AlertColumn.errors__javascript__impacted_sessions__count: { "table": "events.resources INNER JOIN public.sessions USING(session_id)", "formula": "COUNT(DISTINCT session_id)", "condition": "success= FALSE AND type='script'"}, @@ -95,7 +97,7 @@ def can_check(a) -> bool: a["options"].get("lastNotification") is None or a["options"]["lastNotification"] <= 0 or ((now - a["options"]["lastNotification"]) > a["options"]["renotifyInterval"] * 60 * 1000)) \ - and ((now - a["createdAt"]) % (TimeInterval[repetitionBase] * 60 * 1000)) < 60 * 1000 + and ((now - a["createdAt"]) % (TimeInterval[repetitionBase] * 60 * 1000)) < 60 * 1000 def Build(a): @@ -119,7 +121,7 @@ def Build(a): subQ = f"""SELECT {colDef["formula"]} AS value FROM {colDef["table"]} WHERE project_id = %(project_id)s - {"AND " + colDef["condition"] if colDef.get("condition") is not None else ""}""" + {"AND " + colDef["condition"] if colDef.get("condition") else ""}""" j_s = colDef.get("joinSessions", True) main_table = colDef["table"] is_ss = main_table == "public.sessions" @@ -142,8 +144,7 @@ def Build(a): "startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000, "timestamp_sub2": TimeUTC.now() - 2 * a["options"]["currentPeriod"] * 60 * 1000} else: - sub1 = f"""{subQ} AND timestamp>=%(startDate)s - AND timestamp<=%(now)s + sub1 = f"""{subQ} {"AND timestamp >= %(startDate)s AND timestamp <= %(now)s" if not is_ss else ""} {"AND start_ts >= %(startDate)s AND start_ts <= %(now)s" if j_s else ""}""" params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000 sub2 = f"""{subQ} {"AND timestamp < %(startDate)s AND timestamp >= %(timestamp_sub2)s" if not is_ss else ""} @@ -206,7 +207,7 @@ def process(): cur = cur.recreate(rollback=True) if len(notifications) > 0: cur.execute( - cur.mogrify(f"""UPDATE public.Alerts + cur.mogrify(f"""UPDATE public.alerts SET options = options||'{{"lastNotification":{TimeUTC.now()}}}'::jsonb WHERE alert_id IN %(ids)s;""", {"ids": tuple([n["alertId"] for n in notifications])})) if len(notifications) > 0: diff --git a/api/chalicelib/core/dashboards.py b/api/chalicelib/core/dashboards.py index 93c2b8675..89f56176b 100644 --- a/api/chalicelib/core/dashboards.py +++ b/api/chalicelib/core/dashboards.py @@ -114,17 +114,19 @@ def update_dashboard(project_id, user_id, dashboard_id, data: schemas.EditDashbo row = cur.fetchone() offset = row["count"] pg_query = f"""UPDATE dashboards - SET name = %(name)s, + SET name = %(name)s, description= %(description)s {", is_public = %(is_public)s" if data.is_public is not None else ""} {", is_pinned = %(is_pinned)s" if data.is_pinned is not None else ""} - WHERE dashboards.project_id = %(projectId)s + WHERE dashboards.project_id = %(projectId)s AND dashboard_id = %(dashboard_id)s - AND (dashboards.user_id = %(userId)s OR is_public)""" + AND (dashboards.user_id = %(userId)s OR is_public) + RETURNING dashboard_id,name,description,is_public,created_at;""" if data.metrics is not None and len(data.metrics) > 0: pg_query = f"""WITH dash AS ({pg_query}) - INSERT INTO dashboard_widgets(dashboard_id, metric_id, user_id, config) - VALUES {",".join([f"(%(dashboard_id)s, %(metric_id_{i})s, %(userId)s, (SELECT default_config FROM metrics WHERE metric_id=%(metric_id_{i})s)||%(config_{i})s)" for i in range(len(data.metrics))])};""" + INSERT INTO dashboard_widgets(dashboard_id, metric_id, user_id, config) + VALUES {",".join([f"(%(dashboard_id)s, %(metric_id_{i})s, %(userId)s, (SELECT default_config FROM metrics WHERE metric_id=%(metric_id_{i})s)||%(config_{i})s)" for i in range(len(data.metrics))])} + RETURNING dash.*;""" for i, m in enumerate(data.metrics): params[f"metric_id_{i}"] = m # params[f"config_{i}"] = schemas.AddWidgetToDashboardPayloadSchema.schema() \ @@ -134,8 +136,10 @@ def update_dashboard(project_id, user_id, dashboard_id, data: schemas.EditDashbo params[f"config_{i}"] = json.dumps({"position": i + offset}) cur.execute(cur.mogrify(pg_query, params)) - - return {"success": True} + row = cur.fetchone() + if row: + row["created_at"] = TimeUTC.datetime_to_timestamp(row["created_at"]) + return helper.dict_to_camel_case(row) def get_widget(project_id, user_id, dashboard_id, widget_id): diff --git a/api/chalicelib/core/metadata.py b/api/chalicelib/core/metadata.py index 7b426cddb..eba0d7a22 100644 --- a/api/chalicelib/core/metadata.py +++ b/api/chalicelib/core/metadata.py @@ -19,13 +19,13 @@ def __exists_by_name(project_id: int, name: str, exclude_index: Optional[int]) - constraints = column_names() if exclude_index: del constraints[exclude_index - 1] - for c in constraints: - c += " ILIKE %(name)s" + for i in range(len(constraints)): + constraints[i] += " ILIKE %(name)s" query = cur.mogrify(f"""SELECT EXISTS(SELECT 1 - FROM public.projects - WHERE project_id = %(project_id)s - AND deleted_at ISNULL - AND ({" OR ".join(constraints)})) AS exists;""", + FROM public.projects + WHERE project_id = %(project_id)s + AND deleted_at ISNULL + AND ({" OR ".join(constraints)})) AS exists;""", {"project_id": project_id, "name": name}) cur.execute(query=query) row = cur.fetchone() diff --git a/api/chalicelib/core/projects.py b/api/chalicelib/core/projects.py index 278a4593d..edecd5eba 100644 --- a/api/chalicelib/core/projects.py +++ b/api/chalicelib/core/projects.py @@ -13,10 +13,10 @@ from chalicelib.utils.TimeUTC import TimeUTC def __exists_by_name(name: str, exclude_id: Optional[int]) -> bool: with pg_client.PostgresClient() as cur: query = cur.mogrify(f"""SELECT EXISTS(SELECT 1 - FROM public.projects - WHERE deleted_at IS NULL - AND name ILIKE %(name)s - {"AND project_id!=%(exclude_id))s" if exclude_id else ""}) AS exists;""", + FROM public.projects + WHERE deleted_at IS NULL + AND name ILIKE %(name)s + {"AND project_id!=%(exclude_id)s" if exclude_id else ""}) AS exists;""", {"name": name, "exclude_id": exclude_id}) cur.execute(query=query) diff --git a/api/chalicelib/core/sessions.py b/api/chalicelib/core/sessions.py index 512c05769..c95bed903 100644 --- a/api/chalicelib/core/sessions.py +++ b/api/chalicelib/core/sessions.py @@ -301,7 +301,7 @@ def search2_series(data: schemas.SessionsSearchPayloadSchema, project_id: int, d extra_col = "" extra_where = "" pre_query = "" - distinct_on="s.session_id" + distinct_on = "s.session_id" if metric_of == schemas.MetricOfTable.user_country: main_col = "user_country" elif metric_of == schemas.MetricOfTable.user_device: @@ -321,7 +321,7 @@ def search2_series(data: schemas.SessionsSearchPayloadSchema, project_id: int, d elif metric_of == schemas.MetricOfTable.visited_url: main_col = "path" extra_col = ", path" - distinct_on+=",path" + distinct_on += ",path" main_query = cur.mogrify(f"""{pre_query} SELECT COUNT(*) AS count, COALESCE(JSONB_AGG(users_sessions) FILTER ( WHERE rn <= 200 ), '[]'::JSONB) AS values FROM (SELECT {main_col} AS name, @@ -1194,8 +1194,9 @@ def delete_sessions_by_user_ids(project_id, user_ids): def count_all(): with pg_client.PostgresClient(unlimited_query=True) as cur: - row = cur.execute(query="SELECT COUNT(session_id) AS count FROM public.sessions") - return row.get("count", 0) + cur.execute(query="SELECT COUNT(session_id) AS count FROM public.sessions") + row = cur.fetchone() + return row.get("count", 0) if row else 0 def session_exists(project_id, session_id): @@ -1203,7 +1204,8 @@ def session_exists(project_id, session_id): query = cur.mogrify("""SELECT 1 FROM public.sessions WHERE session_id=%(session_id)s - AND project_id=%(project_id)s""", + AND project_id=%(project_id)s + LIMIT 1;""", {"project_id": project_id, "session_id": session_id}) cur.execute(query) row = cur.fetchone() diff --git a/api/chalicelib/core/users.py b/api/chalicelib/core/users.py index e5ae6e72b..c4933f92c 100644 --- a/api/chalicelib/core/users.py +++ b/api/chalicelib/core/users.py @@ -514,14 +514,6 @@ def set_password_invitation(user_id, new_password): } -def count_members(): - with pg_client.PostgresClient() as cur: - cur.execute("""SELECT COUNT(user_id) - FROM public.users WHERE deleted_at IS NULL;""") - r = cur.fetchone() - return r["count"] - - def email_exists(email): with pg_client.PostgresClient() as cur: cur.execute( diff --git a/api/chalicelib/core/webhook.py b/api/chalicelib/core/webhook.py index 8ce166b03..d0ed97d08 100644 --- a/api/chalicelib/core/webhook.py +++ b/api/chalicelib/core/webhook.py @@ -110,11 +110,11 @@ def exists_by_name(name: str, exclude_id: Optional[int], webhook_type: str = sch tenant_id: Optional[int] = None) -> bool: with pg_client.PostgresClient() as cur: query = cur.mogrify(f"""SELECT EXISTS(SELECT 1 - FROM public.webhooks - WHERE name ILIKE %(name)s - AND deleted_at ISNULL - AND type=%(webhook_type)s - {"AND webhook_id!=%(exclude_id))s" if exclude_id else ""}) AS exists;""", + FROM public.webhooks + WHERE name ILIKE %(name)s + AND deleted_at ISNULL + AND type=%(webhook_type)s + {"AND webhook_id!=%(exclude_id)s" if exclude_id else ""}) AS exists;""", {"name": name, "exclude_id": exclude_id, "webhook_type": webhook_type}) cur.execute(query) row = cur.fetchone() diff --git a/api/chalicelib/utils/helper.py b/api/chalicelib/utils/helper.py index 85e34ec80..369aff40a 100644 --- a/api/chalicelib/utils/helper.py +++ b/api/chalicelib/utils/helper.py @@ -283,6 +283,7 @@ def custom_alert_to_front(values): # to support frontend format for payload if values.get("seriesId") is not None and values["query"]["left"] == schemas.AlertColumn.custom: values["query"]["left"] = values["seriesId"] + values["seriesId"] = None return values diff --git a/api/chalicelib/utils/s3.py b/api/chalicelib/utils/s3.py index c2e5b58c7..366a5d181 100644 --- a/api/chalicelib/utils/s3.py +++ b/api/chalicelib/utils/s3.py @@ -55,7 +55,7 @@ def get_presigned_url_for_sharing(bucket, expires_in, key, check_exists=False): ) -def get_presigned_url_for_upload_deprecated(bucket, expires_in, key, **args): +def get_presigned_url_for_upload(bucket, expires_in, key, **args): return client.generate_presigned_url( 'put_object', Params={ @@ -66,10 +66,7 @@ def get_presigned_url_for_upload_deprecated(bucket, expires_in, key, **args): ) - - - -def get_presigned_url_for_upload(bucket, expires_in, key, conditions=None, public=False, content_type=None): +def get_presigned_url_for_upload_secure(bucket, expires_in, key, conditions=None, public=False, content_type=None): acl = 'private' if public: acl = 'public-read' diff --git a/api/run-alerts-dev.sh b/api/run-alerts-dev.sh new file mode 100755 index 000000000..54db30171 --- /dev/null +++ b/api/run-alerts-dev.sh @@ -0,0 +1,3 @@ +#!/bin/zsh + +uvicorn app_alerts:app --reload \ No newline at end of file diff --git a/api/schemas.py b/api/schemas.py index ab057426a..5cae3a31a 100644 --- a/api/schemas.py +++ b/api/schemas.py @@ -363,7 +363,8 @@ class AlertSchema(BaseModel): @root_validator(pre=True) def transform_alert(cls, values): - if values.get("seriesId") is None and isinstance(values["query"]["left"], int): + values["seriesId"] = None + if isinstance(values["query"]["left"], int): values["seriesId"] = values["query"]["left"] values["query"]["left"] = AlertColumn.custom diff --git a/backend/cmd/assets/main.go b/backend/cmd/assets/main.go index b41dedd87..b05ecbe52 100644 --- a/backend/cmd/assets/main.go +++ b/backend/cmd/assets/main.go @@ -1,9 +1,7 @@ package main import ( - "context" "log" - "openreplay/backend/pkg/pprof" "os" "os/signal" "syscall" @@ -13,12 +11,16 @@ import ( "openreplay/backend/internal/assets/cacher" config "openreplay/backend/internal/config/assets" "openreplay/backend/pkg/messages" - "openreplay/backend/pkg/monitoring" + "openreplay/backend/pkg/metrics" + assetsMetrics "openreplay/backend/pkg/metrics/assets" + "openreplay/backend/pkg/pprof" "openreplay/backend/pkg/queue" ) func main() { - metrics := monitoring.New("assets") + m := metrics.New() + m.Register(assetsMetrics.List()) + log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) cfg := config.New() @@ -26,18 +28,13 @@ func main() { pprof.StartProfilingServer() } - cacher := cacher.NewCacher(cfg, metrics) - - totalAssets, err := metrics.RegisterCounter("assets_total") - if err != nil { - log.Printf("can't create assets_total metric: %s", err) - } + cacher := cacher.NewCacher(cfg) msgHandler := func(msg messages.Message) { switch m := msg.(type) { case *messages.AssetCache: cacher.CacheURL(m.SessionID(), m.URL) - totalAssets.Add(context.Background(), 1) + assetsMetrics.IncreaseProcessesSessions() // TODO: connect to "raw" topic in order to listen for JSException case *messages.JSException: sourceList, err := assets.ExtractJSExceptionSources(&m.Payload) diff --git a/backend/cmd/db/main.go b/backend/cmd/db/main.go index f9440a908..84b0d81ed 100644 --- a/backend/cmd/db/main.go +++ b/backend/cmd/db/main.go @@ -3,8 +3,6 @@ package main import ( "errors" "log" - types2 "openreplay/backend/pkg/db/types" - "openreplay/backend/pkg/pprof" "os" "os/signal" "syscall" @@ -14,16 +12,21 @@ import ( "openreplay/backend/internal/db/datasaver" "openreplay/backend/pkg/db/cache" "openreplay/backend/pkg/db/postgres" + types2 "openreplay/backend/pkg/db/types" "openreplay/backend/pkg/handlers" custom2 "openreplay/backend/pkg/handlers/custom" "openreplay/backend/pkg/messages" - "openreplay/backend/pkg/monitoring" + "openreplay/backend/pkg/metrics" + databaseMetrics "openreplay/backend/pkg/metrics/database" + "openreplay/backend/pkg/pprof" "openreplay/backend/pkg/queue" "openreplay/backend/pkg/sessions" ) func main() { - metrics := monitoring.New("db") + m := metrics.New() + m.Register(databaseMetrics.List()) + log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) cfg := db.New() @@ -33,7 +36,7 @@ func main() { // Init database pg := cache.NewPGCache( - postgres.NewConn(cfg.Postgres.String(), cfg.BatchQueueLimit, cfg.BatchSizeLimit, metrics), cfg.ProjectExpirationTimeoutMs) + postgres.NewConn(cfg.Postgres.String(), cfg.BatchQueueLimit, cfg.BatchSizeLimit), cfg.ProjectExpirationTimeoutMs) defer pg.Close() // HandlersFabric returns the list of message handlers we want to be applied to each incoming message. diff --git a/backend/cmd/ender/main.go b/backend/cmd/ender/main.go index 74b0b8bd2..da7ca9b89 100644 --- a/backend/cmd/ender/main.go +++ b/backend/cmd/ender/main.go @@ -2,8 +2,6 @@ package main import ( "log" - "openreplay/backend/internal/storage" - "openreplay/backend/pkg/pprof" "os" "os/signal" "strings" @@ -12,16 +10,23 @@ import ( "openreplay/backend/internal/config/ender" "openreplay/backend/internal/sessionender" + "openreplay/backend/internal/storage" "openreplay/backend/pkg/db/cache" "openreplay/backend/pkg/db/postgres" "openreplay/backend/pkg/intervals" "openreplay/backend/pkg/messages" - "openreplay/backend/pkg/monitoring" + "openreplay/backend/pkg/metrics" + databaseMetrics "openreplay/backend/pkg/metrics/database" + enderMetrics "openreplay/backend/pkg/metrics/ender" + "openreplay/backend/pkg/pprof" "openreplay/backend/pkg/queue" ) func main() { - metrics := monitoring.New("ender") + m := metrics.New() + m.Register(enderMetrics.List()) + m.Register(databaseMetrics.List()) + log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) cfg := ender.New() @@ -29,10 +34,10 @@ func main() { pprof.StartProfilingServer() } - pg := cache.NewPGCache(postgres.NewConn(cfg.Postgres.String(), 0, 0, metrics), cfg.ProjectExpirationTimeoutMs) + pg := cache.NewPGCache(postgres.NewConn(cfg.Postgres.String(), 0, 0), cfg.ProjectExpirationTimeoutMs) defer pg.Close() - sessions, err := sessionender.New(metrics, intervals.EVENTS_SESSION_END_TIMEOUT, cfg.PartitionsNumber) + sessions, err := sessionender.New(intervals.EVENTS_SESSION_END_TIMEOUT, cfg.PartitionsNumber) if err != nil { log.Printf("can't init ender service: %s", err) return diff --git a/backend/cmd/http/main.go b/backend/cmd/http/main.go index 4fb82b635..83eedaf29 100644 --- a/backend/cmd/http/main.go +++ b/backend/cmd/http/main.go @@ -2,23 +2,28 @@ package main import ( "log" - "openreplay/backend/internal/config/http" - "openreplay/backend/internal/http/router" - "openreplay/backend/internal/http/server" - "openreplay/backend/internal/http/services" - "openreplay/backend/pkg/monitoring" - "openreplay/backend/pkg/pprof" "os" "os/signal" "syscall" + "openreplay/backend/internal/config/http" + "openreplay/backend/internal/http/router" + "openreplay/backend/internal/http/server" + "openreplay/backend/internal/http/services" "openreplay/backend/pkg/db/cache" "openreplay/backend/pkg/db/postgres" + "openreplay/backend/pkg/metrics" + databaseMetrics "openreplay/backend/pkg/metrics/database" + httpMetrics "openreplay/backend/pkg/metrics/http" + "openreplay/backend/pkg/pprof" "openreplay/backend/pkg/queue" ) func main() { - metrics := monitoring.New("http") + m := metrics.New() + m.Register(httpMetrics.List()) + m.Register(databaseMetrics.List()) + log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) cfg := http.New() @@ -31,14 +36,14 @@ func main() { defer producer.Close(15000) // Connect to database - dbConn := cache.NewPGCache(postgres.NewConn(cfg.Postgres.String(), 0, 0, metrics), 1000*60*20) + dbConn := cache.NewPGCache(postgres.NewConn(cfg.Postgres.String(), 0, 0), 1000*60*20) defer dbConn.Close() // Build all services services := services.New(cfg, producer, dbConn) // Init server's routes - router, err := router.NewRouter(cfg, services, metrics) + router, err := router.NewRouter(cfg, services) if err != nil { log.Fatalf("failed while creating engine: %s", err) } diff --git a/backend/cmd/integrations/main.go b/backend/cmd/integrations/main.go index 8c6d56966..3fa07ee9c 100644 --- a/backend/cmd/integrations/main.go +++ b/backend/cmd/integrations/main.go @@ -2,24 +2,26 @@ package main import ( "log" - config "openreplay/backend/internal/config/integrations" - "openreplay/backend/internal/integrations/clientManager" - "openreplay/backend/pkg/monitoring" - "openreplay/backend/pkg/pprof" - "time" - "os" "os/signal" "syscall" + "time" + config "openreplay/backend/internal/config/integrations" + "openreplay/backend/internal/integrations/clientManager" "openreplay/backend/pkg/db/postgres" "openreplay/backend/pkg/intervals" + "openreplay/backend/pkg/metrics" + databaseMetrics "openreplay/backend/pkg/metrics/database" + "openreplay/backend/pkg/pprof" "openreplay/backend/pkg/queue" "openreplay/backend/pkg/token" ) func main() { - metrics := monitoring.New("integrations") + m := metrics.New() + m.Register(databaseMetrics.List()) + log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) cfg := config.New() @@ -27,7 +29,7 @@ func main() { pprof.StartProfilingServer() } - pg := postgres.NewConn(cfg.Postgres.String(), 0, 0, metrics) + pg := postgres.NewConn(cfg.Postgres.String(), 0, 0) defer pg.Close() tokenizer := token.NewTokenizer(cfg.TokenSecret) diff --git a/backend/cmd/sink/main.go b/backend/cmd/sink/main.go index 74e0b1db1..4bbaeeee4 100644 --- a/backend/cmd/sink/main.go +++ b/backend/cmd/sink/main.go @@ -2,10 +2,8 @@ package main import ( "bytes" - "context" "encoding/binary" "log" - "openreplay/backend/pkg/pprof" "os" "os/signal" "syscall" @@ -16,13 +14,16 @@ import ( "openreplay/backend/internal/sink/sessionwriter" "openreplay/backend/internal/storage" "openreplay/backend/pkg/messages" - "openreplay/backend/pkg/monitoring" + "openreplay/backend/pkg/metrics" + sinkMetrics "openreplay/backend/pkg/metrics/sink" + "openreplay/backend/pkg/pprof" "openreplay/backend/pkg/queue" "openreplay/backend/pkg/url/assets" ) func main() { - metrics := monitoring.New("sink") + m := metrics.New() + m.Register(sinkMetrics.List()) log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) cfg := sink.New() @@ -39,22 +40,8 @@ func main() { producer := queue.NewProducer(cfg.MessageSizeLimit, true) defer producer.Close(cfg.ProducerCloseTimeout) rewriter := assets.NewRewriter(cfg.AssetsOrigin) - assetMessageHandler := assetscache.New(cfg, rewriter, producer, metrics) - + assetMessageHandler := assetscache.New(cfg, rewriter, producer) counter := storage.NewLogCounter() - // Session message metrics - totalMessages, err := metrics.RegisterCounter("messages_total") - if err != nil { - log.Printf("can't create messages_total metric: %s", err) - } - savedMessages, err := metrics.RegisterCounter("messages_saved") - if err != nil { - log.Printf("can't create messages_saved metric: %s", err) - } - messageSize, err := metrics.RegisterHistogram("messages_size") - if err != nil { - log.Printf("can't create messages_size metric: %s", err) - } var ( sessionID uint64 @@ -74,11 +61,12 @@ func main() { if domBuffer.Len() <= 0 && devBuffer.Len() <= 0 { return } + sinkMetrics.RecordWrittenBytes(float64(domBuffer.Len()), "dom") + sinkMetrics.RecordWrittenBytes(float64(devBuffer.Len()), "devtools") // Write buffered batches to the session if err := writer.Write(sessionID, domBuffer.Bytes(), devBuffer.Bytes()); err != nil { log.Printf("writer error: %s", err) - return } // Prepare buffer for the next batch @@ -88,8 +76,7 @@ func main() { return } - // [METRICS] Increase the number of processed messages - totalMessages.Add(context.Background(), 1) + sinkMetrics.IncreaseTotalMessages() // Send SessionEnd trigger to storage service if msg.TypeID() == messages.MsgSessionEnd { @@ -187,9 +174,8 @@ func main() { } } - // [METRICS] Increase the number of written to the files messages and the message size - messageSize.Record(context.Background(), float64(len(msg.Encode()))) - savedMessages.Add(context.Background(), 1) + sinkMetrics.IncreaseWrittenMessages() + sinkMetrics.RecordMessageSize(float64(len(msg.Encode()))) } consumer := queue.NewConsumer( diff --git a/backend/cmd/storage/main.go b/backend/cmd/storage/main.go index dcb1b53ed..472324b95 100644 --- a/backend/cmd/storage/main.go +++ b/backend/cmd/storage/main.go @@ -2,7 +2,6 @@ package main import ( "log" - "openreplay/backend/pkg/pprof" "os" "os/signal" "syscall" @@ -12,13 +11,17 @@ import ( "openreplay/backend/internal/storage" "openreplay/backend/pkg/failover" "openreplay/backend/pkg/messages" - "openreplay/backend/pkg/monitoring" + "openreplay/backend/pkg/metrics" + storageMetrics "openreplay/backend/pkg/metrics/storage" + "openreplay/backend/pkg/pprof" "openreplay/backend/pkg/queue" - s3storage "openreplay/backend/pkg/storage" + cloud "openreplay/backend/pkg/storage" ) func main() { - metrics := monitoring.New("storage") + m := metrics.New() + m.Register(storageMetrics.List()) + log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) cfg := config.New() @@ -26,8 +29,8 @@ func main() { pprof.StartProfilingServer() } - s3 := s3storage.NewS3(cfg.S3Region, cfg.S3Bucket) - srv, err := storage.New(cfg, s3, metrics) + s3 := cloud.NewS3(cfg.S3Region, cfg.S3Bucket) + srv, err := storage.New(cfg, s3) if err != nil { log.Printf("can't init storage service: %s", err) return diff --git a/backend/go.mod b/backend/go.mod index 0615fb0cb..9633f2b18 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -20,14 +20,11 @@ require ( github.com/klauspost/pgzip v1.2.5 github.com/oschwald/maxminddb-golang v1.7.0 github.com/pkg/errors v0.9.1 + github.com/prometheus/client_golang v1.12.1 github.com/sethvargo/go-envconfig v0.7.0 github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce github.com/ua-parser/uap-go v0.0.0-20200325213135-e1c09f13e2fe - go.opentelemetry.io/otel v1.7.0 - go.opentelemetry.io/otel/exporters/prometheus v0.30.0 - go.opentelemetry.io/otel/metric v0.30.0 - go.opentelemetry.io/otel/sdk/metric v0.30.0 - golang.org/x/net v0.0.0-20220906165146-f3363e06e74c + golang.org/x/net v0.1.1-0.20221104162952-702349b0e862 google.golang.org/api v0.81.0 ) @@ -38,8 +35,6 @@ require ( cloud.google.com/go/storage v1.14.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect - github.com/go-logr/logr v1.2.3 // indirect - github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/google/go-cmp v0.5.8 // indirect @@ -55,20 +50,19 @@ require ( github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect github.com/paulmach/orb v0.7.1 // indirect github.com/pierrec/lz4/v4 v4.1.15 // indirect - github.com/prometheus/client_golang v1.12.1 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.32.1 // indirect github.com/prometheus/procfs v0.7.3 // indirect github.com/shopspring/decimal v1.3.1 // indirect github.com/stretchr/testify v1.8.0 // indirect go.opencensus.io v0.23.0 // indirect - go.opentelemetry.io/otel/sdk v1.7.0 // indirect + go.opentelemetry.io/otel v1.7.0 // indirect go.opentelemetry.io/otel/trace v1.7.0 // indirect golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 // indirect golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 // indirect golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 // indirect - golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 // indirect - golang.org/x/text v0.4.0 // indirect + golang.org/x/sys v0.1.0 // indirect + golang.org/x/text v0.7.0 // indirect golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd // indirect diff --git a/backend/go.sum b/backend/go.sum index 5aa3ae3de..676cf479b 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -80,8 +80,6 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/aws/aws-sdk-go v1.44.98 h1:fX+NxebSdO/9T6DTNOLhpC+Vv6RNkKRfsMg0a7o/yBo= github.com/aws/aws-sdk-go v1.44.98/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= -github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -156,9 +154,7 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= @@ -489,14 +485,6 @@ go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/otel v1.7.0 h1:Z2lA3Tdch0iDcrhJXDIlC94XE+bxok1F9B+4Lz/lGsM= go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk= -go.opentelemetry.io/otel/exporters/prometheus v0.30.0 h1:YXo5ZY5nofaEYMCMTTMaRH2cLDZB8+0UGuk5RwMfIo0= -go.opentelemetry.io/otel/exporters/prometheus v0.30.0/go.mod h1:qN5feW+0/d661KDtJuATEmHtw5bKBK7NSvNEP927zSs= -go.opentelemetry.io/otel/metric v0.30.0 h1:Hs8eQZ8aQgs0U49diZoaS6Uaxw3+bBE3lcMUKBFIk3c= -go.opentelemetry.io/otel/metric v0.30.0/go.mod h1:/ShZ7+TS4dHzDFmfi1kSXMhMVubNoP0oIaBp70J6UXU= -go.opentelemetry.io/otel/sdk v1.7.0 h1:4OmStpcKVOfvDOgCt7UriAPtKolwIhxpnSNI/yK+1B0= -go.opentelemetry.io/otel/sdk v1.7.0/go.mod h1:uTEOTwaqIVuTGiJN7ii13Ibp75wJmYUDe374q6cZwUU= -go.opentelemetry.io/otel/sdk/metric v0.30.0 h1:XTqQ4y3erR2Oj8xSAOL5ovO5011ch2ELg51z4fVkpME= -go.opentelemetry.io/otel/sdk/metric v0.30.0/go.mod h1:8AKFRi5HyvTR0RRty3paN1aMC9HMT+NzcEhw/BLkLX8= go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJqC6o= go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= @@ -601,8 +589,8 @@ golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220906165146-f3363e06e74c h1:yKufUcDwucU5urd+50/Opbt4AYpqthk7wHpHok8f1lo= -golang.org/x/net v0.0.0-20220906165146-f3363e06e74c/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.1.1-0.20221104162952-702349b0e862 h1:KrLJ+iz8J6j6VVr/OCfULAcK+xozUmWE43fKpMR4MlI= +golang.org/x/net v0.1.1-0.20221104162952-702349b0e862/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -690,7 +678,6 @@ golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -715,8 +702,8 @@ golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220429233432-b5fbb4746d32/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 h1:WIoqL4EROvwiPdUtaip4VcDdpZ4kha7wBWZrbVKCIZg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -728,8 +715,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/backend/internal/assets/cacher/cacher.go b/backend/internal/assets/cacher/cacher.go index 8bbee092f..4b0353a9a 100644 --- a/backend/internal/assets/cacher/cacher.go +++ b/backend/internal/assets/cacher/cacher.go @@ -1,16 +1,13 @@ package cacher import ( - "context" "crypto/tls" "fmt" - "go.opentelemetry.io/otel/metric/instrument/syncfloat64" "io" "io/ioutil" - "log" "mime" "net/http" - "openreplay/backend/pkg/monitoring" + metrics "openreplay/backend/pkg/metrics/assets" "path/filepath" "strings" "time" @@ -25,30 +22,22 @@ import ( const MAX_CACHE_DEPTH = 5 type cacher struct { - timeoutMap *timeoutMap // Concurrency implemented - s3 *storage.S3 // AWS Docs: "These clients are safe to use concurrently." - httpClient *http.Client // Docs: "Clients are safe for concurrent use by multiple goroutines." - rewriter *assets.Rewriter // Read only - Errors chan error - sizeLimit int - downloadedAssets syncfloat64.Counter - requestHeaders map[string]string - workers *WorkerPool + timeoutMap *timeoutMap // Concurrency implemented + s3 *storage.S3 // AWS Docs: "These clients are safe to use concurrently." + httpClient *http.Client // Docs: "Clients are safe for concurrent use by multiple goroutines." + rewriter *assets.Rewriter // Read only + Errors chan error + sizeLimit int + requestHeaders map[string]string + workers *WorkerPool } func (c *cacher) CanCache() bool { return c.workers.CanAddTask() } -func NewCacher(cfg *config.Config, metrics *monitoring.Metrics) *cacher { +func NewCacher(cfg *config.Config) *cacher { rewriter := assets.NewRewriter(cfg.AssetsOrigin) - if metrics == nil { - log.Fatalf("metrics are empty") - } - downloadedAssets, err := metrics.RegisterCounter("assets_downloaded") - if err != nil { - log.Printf("can't create downloaded_assets metric: %s", err) - } c := &cacher{ timeoutMap: newTimeoutMap(), s3: storage.NewS3(cfg.AWSRegion, cfg.S3BucketAssets), @@ -59,11 +48,10 @@ func NewCacher(cfg *config.Config, metrics *monitoring.Metrics) *cacher { TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, }, }, - rewriter: rewriter, - Errors: make(chan error), - sizeLimit: cfg.AssetsSizeLimit, - downloadedAssets: downloadedAssets, - requestHeaders: cfg.AssetsRequestHeaders, + rewriter: rewriter, + Errors: make(chan error), + sizeLimit: cfg.AssetsSizeLimit, + requestHeaders: cfg.AssetsRequestHeaders, } c.workers = NewPool(64, c.CacheFile) return c @@ -75,6 +63,7 @@ func (c *cacher) CacheFile(task *Task) { func (c *cacher) cacheURL(t *Task) { t.retries-- + start := time.Now() req, _ := http.NewRequest("GET", t.requestURL, nil) if t.retries%2 == 0 { req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 6.1; rv:31.0) Gecko/20100101 Firefox/31.0") @@ -87,6 +76,7 @@ func (c *cacher) cacheURL(t *Task) { c.Errors <- errors.Wrap(err, t.urlContext) return } + metrics.RecordDownloadDuration(float64(time.Now().Sub(start).Milliseconds()), res.StatusCode) defer res.Body.Close() if res.StatusCode >= 400 { printErr := true @@ -122,12 +112,15 @@ func (c *cacher) cacheURL(t *Task) { } // TODO: implement in streams + start = time.Now() err = c.s3.Upload(strings.NewReader(strData), t.cachePath, contentType, false) if err != nil { + metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), true) c.Errors <- errors.Wrap(err, t.urlContext) return } - c.downloadedAssets.Add(context.Background(), 1) + metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), false) + metrics.IncreaseSavedSessions() if isCSS { if t.depth > 0 { diff --git a/backend/internal/http/router/handlers-ios.go b/backend/internal/http/router/handlers-ios.go index e0fc73b6f..b11918d54 100644 --- a/backend/internal/http/router/handlers-ios.go +++ b/backend/internal/http/router/handlers-ios.go @@ -22,28 +22,28 @@ func (e *Router) startSessionHandlerIOS(w http.ResponseWriter, r *http.Request) req := &StartIOSSessionRequest{} if r.Body == nil { - ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty")) + ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, 0) return } body := http.MaxBytesReader(w, r.Body, e.cfg.JsonSizeLimit) defer body.Close() if err := json.NewDecoder(body).Decode(req); err != nil { - ResponseWithError(w, http.StatusBadRequest, err) + ResponseWithError(w, http.StatusBadRequest, err, startTime, r.URL.Path, 0) return } if req.ProjectKey == nil { - ResponseWithError(w, http.StatusForbidden, errors.New("ProjectKey value required")) + ResponseWithError(w, http.StatusForbidden, errors.New("ProjectKey value required"), startTime, r.URL.Path, 0) return } p, err := e.services.Database.GetProjectByKey(*req.ProjectKey) if err != nil { if postgres.IsNoRowsErr(err) { - ResponseWithError(w, http.StatusNotFound, errors.New("Project doesn't exist or is not active")) + ResponseWithError(w, http.StatusNotFound, errors.New("Project doesn't exist or is not active"), startTime, r.URL.Path, 0) } else { - ResponseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging + ResponseWithError(w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0) // TODO: send error here only on staging } return } @@ -53,18 +53,18 @@ func (e *Router) startSessionHandlerIOS(w http.ResponseWriter, r *http.Request) if err != nil { // Starting the new one dice := byte(rand.Intn(100)) // [0, 100) if dice >= p.SampleRate { - ResponseWithError(w, http.StatusForbidden, errors.New("cancel")) + ResponseWithError(w, http.StatusForbidden, errors.New("cancel"), startTime, r.URL.Path, 0) return } ua := e.services.UaParser.ParseFromHTTPRequest(r) if ua == nil { - ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized")) + ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"), startTime, r.URL.Path, 0) return } sessionID, err := e.services.Flaker.Compose(uint64(startTime.UnixMilli())) if err != nil { - ResponseWithError(w, http.StatusInternalServerError, err) + ResponseWithError(w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0) return } // TODO: if EXPIRED => send message for two sessions association @@ -94,22 +94,24 @@ func (e *Router) startSessionHandlerIOS(w http.ResponseWriter, r *http.Request) UserUUID: userUUID, SessionID: strconv.FormatUint(tokenData.ID, 10), BeaconSizeLimit: e.cfg.BeaconSizeLimit, - }) + }, startTime, r.URL.Path, 0) } func (e *Router) pushMessagesHandlerIOS(w http.ResponseWriter, r *http.Request) { + startTime := time.Now() sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r) if err != nil { - ResponseWithError(w, http.StatusUnauthorized, err) + ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0) return } e.pushMessages(w, r, sessionData.ID, e.cfg.TopicRawIOS) } func (e *Router) pushLateMessagesHandlerIOS(w http.ResponseWriter, r *http.Request) { + startTime := time.Now() sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r) if err != nil && err != token.EXPIRED { - ResponseWithError(w, http.StatusUnauthorized, err) + ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0) return } // Check timestamps here? @@ -117,16 +119,17 @@ func (e *Router) pushLateMessagesHandlerIOS(w http.ResponseWriter, r *http.Reque } func (e *Router) imagesUploadHandlerIOS(w http.ResponseWriter, r *http.Request) { + startTime := time.Now() log.Printf("recieved imagerequest") sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r) if err != nil { // Should accept expired token? - ResponseWithError(w, http.StatusUnauthorized, err) + ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0) return } if r.Body == nil { - ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty")) + ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, 0) return } r.Body = http.MaxBytesReader(w, r.Body, e.cfg.FileSizeLimit) @@ -134,21 +137,21 @@ func (e *Router) imagesUploadHandlerIOS(w http.ResponseWriter, r *http.Request) err = r.ParseMultipartForm(1e6) // ~1Mb if err == http.ErrNotMultipart || err == http.ErrMissingBoundary { - ResponseWithError(w, http.StatusUnsupportedMediaType, err) + ResponseWithError(w, http.StatusUnsupportedMediaType, err, startTime, r.URL.Path, 0) return // } else if err == multipart.ErrMessageTooLarge // if non-files part exceeds 10 MB } else if err != nil { - ResponseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging + ResponseWithError(w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0) // TODO: send error here only on staging return } if r.MultipartForm == nil { - ResponseWithError(w, http.StatusInternalServerError, errors.New("Multipart not parsed")) + ResponseWithError(w, http.StatusInternalServerError, errors.New("Multipart not parsed"), startTime, r.URL.Path, 0) return } if len(r.MultipartForm.Value["projectKey"]) == 0 { - ResponseWithError(w, http.StatusBadRequest, errors.New("projectKey parameter missing")) // status for missing/wrong parameter? + ResponseWithError(w, http.StatusBadRequest, errors.New("projectKey parameter missing"), startTime, r.URL.Path, 0) // status for missing/wrong parameter? return } diff --git a/backend/internal/http/router/handlers-web.go b/backend/internal/http/router/handlers-web.go index 7afd184e5..52a37b7f0 100644 --- a/backend/internal/http/router/handlers-web.go +++ b/backend/internal/http/router/handlers-web.go @@ -3,18 +3,17 @@ package router import ( "encoding/json" "errors" - "github.com/Masterminds/semver" - "go.opentelemetry.io/otel/attribute" "io" "log" "math/rand" "net/http" - "openreplay/backend/internal/http/uuid" - "openreplay/backend/pkg/flakeid" "strconv" "time" + "github.com/Masterminds/semver" + "openreplay/backend/internal/http/uuid" "openreplay/backend/pkg/db/postgres" + "openreplay/backend/pkg/flakeid" . "openreplay/backend/pkg/messages" "openreplay/backend/pkg/token" ) @@ -28,13 +27,6 @@ func (e *Router) readBody(w http.ResponseWriter, r *http.Request, limit int64) ( if err != nil { return nil, err } - - reqSize := len(bodyBytes) - e.requestSize.Record( - r.Context(), - float64(reqSize), - []attribute.KeyValue{attribute.String("method", r.URL.Path)}..., - ) return bodyBytes, nil } @@ -56,40 +48,43 @@ func getSessionTimestamp(req *StartSessionRequest, startTimeMili int64) (ts uint func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) { startTime := time.Now() + bodySize := 0 // Check request body if r.Body == nil { - ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty")) + ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, bodySize) return } bodyBytes, err := e.readBody(w, r, e.cfg.JsonSizeLimit) if err != nil { log.Printf("error while reading request body: %s", err) - ResponseWithError(w, http.StatusRequestEntityTooLarge, err) + ResponseWithError(w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize) return } + bodySize = len(bodyBytes) // Parse request body req := &StartSessionRequest{} if err := json.Unmarshal(bodyBytes, req); err != nil { - ResponseWithError(w, http.StatusBadRequest, err) + ResponseWithError(w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize) return } // Handler's logic if req.ProjectKey == nil { - ResponseWithError(w, http.StatusForbidden, errors.New("ProjectKey value required")) + ResponseWithError(w, http.StatusForbidden, errors.New("ProjectKey value required"), startTime, r.URL.Path, bodySize) return } p, err := e.services.Database.GetProjectByKey(*req.ProjectKey) if err != nil { if postgres.IsNoRowsErr(err) { - ResponseWithError(w, http.StatusNotFound, errors.New("project doesn't exist or capture limit has been reached")) + ResponseWithError(w, http.StatusNotFound, + errors.New("project doesn't exist or capture limit has been reached"), startTime, r.URL.Path, bodySize) } else { log.Printf("can't get project by key: %s", err) - ResponseWithError(w, http.StatusInternalServerError, errors.New("can't get project by key")) + ResponseWithError(w, http.StatusInternalServerError, errors.New("can't get project by key"), startTime, r.URL.Path, bodySize) } return } @@ -99,19 +94,19 @@ func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) if err != nil || req.Reset { // Starting the new one dice := byte(rand.Intn(100)) // [0, 100) if dice >= p.SampleRate { - ResponseWithError(w, http.StatusForbidden, errors.New("cancel")) + ResponseWithError(w, http.StatusForbidden, errors.New("cancel"), startTime, r.URL.Path, bodySize) return } ua := e.services.UaParser.ParseFromHTTPRequest(r) if ua == nil { - ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized")) + ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"), startTime, r.URL.Path, bodySize) return } startTimeMili := startTime.UnixMilli() sessionID, err := e.services.Flaker.Compose(uint64(startTimeMili)) if err != nil { - ResponseWithError(w, http.StatusInternalServerError, err) + ResponseWithError(w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize) return } // TODO: if EXPIRED => send message for two sessions association @@ -163,29 +158,33 @@ func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) BeaconSizeLimit: e.getBeaconSize(tokenData.ID), StartTimestamp: int64(flakeid.ExtractTimestamp(tokenData.ID)), Delay: tokenData.Delay, - }) + }, startTime, r.URL.Path, bodySize) } func (e *Router) pushMessagesHandlerWeb(w http.ResponseWriter, r *http.Request) { + startTime := time.Now() + bodySize := 0 + // Check authorization sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r) if err != nil { - ResponseWithError(w, http.StatusUnauthorized, err) + ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize) return } // Check request body if r.Body == nil { - ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty")) + ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, bodySize) return } bodyBytes, err := e.readBody(w, r, e.getBeaconSize(sessionData.ID)) if err != nil { log.Printf("error while reading request body: %s", err) - ResponseWithError(w, http.StatusRequestEntityTooLarge, err) + ResponseWithError(w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize) return } + bodySize = len(bodyBytes) // Send processed messages to queue as array of bytes // TODO: check bytes for nonsense crap @@ -194,39 +193,43 @@ func (e *Router) pushMessagesHandlerWeb(w http.ResponseWriter, r *http.Request) log.Printf("can't send processed messages to queue: %s", err) } - w.WriteHeader(http.StatusOK) + ResponseOK(w, startTime, r.URL.Path, bodySize) } func (e *Router) notStartedHandlerWeb(w http.ResponseWriter, r *http.Request) { + startTime := time.Now() + bodySize := 0 + // Check request body if r.Body == nil { - ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty")) + ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, bodySize) return } bodyBytes, err := e.readBody(w, r, e.cfg.JsonSizeLimit) if err != nil { log.Printf("error while reading request body: %s", err) - ResponseWithError(w, http.StatusRequestEntityTooLarge, err) + ResponseWithError(w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize) return } + bodySize = len(bodyBytes) // Parse request body req := &NotStartedRequest{} if err := json.Unmarshal(bodyBytes, req); err != nil { - ResponseWithError(w, http.StatusBadRequest, err) + ResponseWithError(w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize) return } // Handler's logic if req.ProjectKey == nil { - ResponseWithError(w, http.StatusForbidden, errors.New("projectKey value required")) + ResponseWithError(w, http.StatusForbidden, errors.New("projectKey value required"), startTime, r.URL.Path, bodySize) return } ua := e.services.UaParser.ParseFromHTTPRequest(r) // TODO?: insert anyway if ua == nil { - ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized")) + ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"), startTime, r.URL.Path, bodySize) return } country := e.services.GeoIP.ExtractISOCodeFromHTTPRequest(r) @@ -248,5 +251,5 @@ func (e *Router) notStartedHandlerWeb(w http.ResponseWriter, r *http.Request) { log.Printf("Unable to insert Unstarted Session: %v\n", err) } - w.WriteHeader(http.StatusOK) + ResponseOK(w, startTime, r.URL.Path, bodySize) } diff --git a/backend/internal/http/router/handlers.go b/backend/internal/http/router/handlers.go index c36fdd668..425177341 100644 --- a/backend/internal/http/router/handlers.go +++ b/backend/internal/http/router/handlers.go @@ -6,9 +6,11 @@ import ( "io/ioutil" "log" "net/http" + "time" ) func (e *Router) pushMessages(w http.ResponseWriter, r *http.Request, sessionID uint64, topicName string) { + start := time.Now() body := http.MaxBytesReader(w, r.Body, e.cfg.BeaconSizeLimit) defer body.Close() @@ -21,7 +23,7 @@ func (e *Router) pushMessages(w http.ResponseWriter, r *http.Request, sessionID reader, err = gzip.NewReader(body) if err != nil { - ResponseWithError(w, http.StatusInternalServerError, err) // TODO: stage-dependent response + ResponseWithError(w, http.StatusInternalServerError, err, start, r.URL.Path, 0) // TODO: stage-dependent response return } //log.Println("Gzip reader init", reader) @@ -32,7 +34,7 @@ func (e *Router) pushMessages(w http.ResponseWriter, r *http.Request, sessionID //log.Println("Reader after switch:", reader) buf, err := ioutil.ReadAll(reader) if err != nil { - ResponseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging + ResponseWithError(w, http.StatusInternalServerError, err, start, r.URL.Path, 0) // TODO: send error here only on staging return } e.services.Producer.Produce(topicName, sessionID, buf) // What if not able to send? diff --git a/backend/internal/http/router/response.go b/backend/internal/http/router/response.go index 0b4725419..b66b7c563 100644 --- a/backend/internal/http/router/response.go +++ b/backend/internal/http/router/response.go @@ -4,21 +4,44 @@ import ( "encoding/json" "log" "net/http" + "time" + + metrics "openreplay/backend/pkg/metrics/http" ) -func ResponseWithJSON(w http.ResponseWriter, res interface{}) { +func recordMetrics(requestStart time.Time, url string, code, bodySize int) { + if bodySize > 0 { + metrics.RecordRequestSize(float64(bodySize), url, code) + } + metrics.IncreaseTotalRequests() + metrics.RecordRequestDuration(float64(time.Now().Sub(requestStart).Milliseconds()), url, code) +} + +func ResponseOK(w http.ResponseWriter, requestStart time.Time, url string, bodySize int) { + w.WriteHeader(http.StatusOK) + recordMetrics(requestStart, url, http.StatusOK, bodySize) +} + +func ResponseWithJSON(w http.ResponseWriter, res interface{}, requestStart time.Time, url string, bodySize int) { body, err := json.Marshal(res) if err != nil { log.Println(err) } w.Header().Set("Content-Type", "application/json") w.Write(body) + recordMetrics(requestStart, url, http.StatusOK, bodySize) } -func ResponseWithError(w http.ResponseWriter, code int, err error) { - type response struct { - Error string `json:"error"` +type response struct { + Error string `json:"error"` +} + +func ResponseWithError(w http.ResponseWriter, code int, err error, requestStart time.Time, url string, bodySize int) { + body, err := json.Marshal(&response{err.Error()}) + if err != nil { + log.Println(err) } w.WriteHeader(code) - ResponseWithJSON(w, &response{err.Error()}) + w.Write(body) + recordMetrics(requestStart, url, code, bodySize) } diff --git a/backend/internal/http/router/router.go b/backend/internal/http/router/router.go index 964016dfd..6cd7efe79 100644 --- a/backend/internal/http/router/router.go +++ b/backend/internal/http/router/router.go @@ -1,19 +1,16 @@ package router import ( - "context" "fmt" - "github.com/gorilla/mux" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/instrument/syncfloat64" "log" "net/http" + "sync" + "time" + + "github.com/gorilla/mux" http3 "openreplay/backend/internal/config/http" http2 "openreplay/backend/internal/http/services" "openreplay/backend/internal/http/util" - "openreplay/backend/pkg/monitoring" - "sync" - "time" ) type BeaconSize struct { @@ -25,21 +22,16 @@ type Router struct { router *mux.Router cfg *http3.Config services *http2.ServicesBuilder - requestSize syncfloat64.Histogram - requestDuration syncfloat64.Histogram - totalRequests syncfloat64.Counter mutex *sync.RWMutex beaconSizeCache map[uint64]*BeaconSize // Cache for session's beaconSize } -func NewRouter(cfg *http3.Config, services *http2.ServicesBuilder, metrics *monitoring.Metrics) (*Router, error) { +func NewRouter(cfg *http3.Config, services *http2.ServicesBuilder) (*Router, error) { switch { case cfg == nil: return nil, fmt.Errorf("config is empty") case services == nil: return nil, fmt.Errorf("services is empty") - case metrics == nil: - return nil, fmt.Errorf("metrics is empty") } e := &Router{ cfg: cfg, @@ -47,7 +39,6 @@ func NewRouter(cfg *http3.Config, services *http2.ServicesBuilder, metrics *moni mutex: &sync.RWMutex{}, beaconSizeCache: make(map[uint64]*BeaconSize), } - e.initMetrics(metrics) e.init() go e.clearBeaconSizes() return e, nil @@ -115,22 +106,6 @@ func (e *Router) init() { e.router.Use(e.corsMiddleware) } -func (e *Router) initMetrics(metrics *monitoring.Metrics) { - var err error - e.requestSize, err = metrics.RegisterHistogram("requests_body_size") - if err != nil { - log.Printf("can't create requests_body_size metric: %s", err) - } - e.requestDuration, err = metrics.RegisterHistogram("requests_duration") - if err != nil { - log.Printf("can't create requests_duration metric: %s", err) - } - e.totalRequests, err = metrics.RegisterCounter("requests_total") - if err != nil { - log.Printf("can't create requests_total metric: %s", err) - } -} - func (e *Router) root(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) } @@ -149,17 +124,8 @@ func (e *Router) corsMiddleware(next http.Handler) http.Handler { log.Printf("Request: %v - %v ", r.Method, util.SafeString(r.URL.Path)) - requestStart := time.Now() - // Serve request next.ServeHTTP(w, r) - - metricsContext, _ := context.WithTimeout(context.Background(), time.Millisecond*100) - e.totalRequests.Add(metricsContext, 1) - e.requestDuration.Record(metricsContext, - float64(time.Now().Sub(requestStart).Milliseconds()), - []attribute.KeyValue{attribute.String("method", r.URL.Path)}..., - ) }) } diff --git a/backend/internal/sessionender/ender.go b/backend/internal/sessionender/ender.go index c1c2c9b7f..e1ddb0ffe 100644 --- a/backend/internal/sessionender/ender.go +++ b/backend/internal/sessionender/ender.go @@ -1,13 +1,11 @@ package sessionender import ( - "context" - "fmt" - "go.opentelemetry.io/otel/metric/instrument/syncfloat64" "log" - "openreplay/backend/pkg/messages" - "openreplay/backend/pkg/monitoring" "time" + + "openreplay/backend/pkg/messages" + "openreplay/backend/pkg/metrics/ender" ) // EndedSessionHandler handler for ended sessions @@ -23,32 +21,16 @@ type session struct { // SessionEnder updates timestamp of last message for each session type SessionEnder struct { - timeout int64 - sessions map[uint64]*session // map[sessionID]session - timeCtrl *timeController - activeSessions syncfloat64.UpDownCounter - totalSessions syncfloat64.Counter + timeout int64 + sessions map[uint64]*session // map[sessionID]session + timeCtrl *timeController } -func New(metrics *monitoring.Metrics, timeout int64, parts int) (*SessionEnder, error) { - if metrics == nil { - return nil, fmt.Errorf("metrics module is empty") - } - activeSessions, err := metrics.RegisterUpDownCounter("sessions_active") - if err != nil { - return nil, fmt.Errorf("can't register session.active metric: %s", err) - } - totalSessions, err := metrics.RegisterCounter("sessions_total") - if err != nil { - return nil, fmt.Errorf("can't register session.total metric: %s", err) - } - +func New(timeout int64, parts int) (*SessionEnder, error) { return &SessionEnder{ - timeout: timeout, - sessions: make(map[uint64]*session), - timeCtrl: NewTimeController(parts), - activeSessions: activeSessions, - totalSessions: totalSessions, + timeout: timeout, + sessions: make(map[uint64]*session), + timeCtrl: NewTimeController(parts), }, nil } @@ -74,8 +56,8 @@ func (se *SessionEnder) UpdateSession(msg messages.Message) { lastUserTime: msgTimestamp, // last timestamp from user's machine isEnded: false, } - se.activeSessions.Add(context.Background(), 1) - se.totalSessions.Add(context.Background(), 1) + ender.IncreaseActiveSessions() + ender.IncreaseTotalSessions() return } // Keep the highest user's timestamp for correct session duration value @@ -100,7 +82,8 @@ func (se *SessionEnder) HandleEndedSessions(handler EndedSessionHandler) { sess.isEnded = true if handler(sessID, sess.lastUserTime) { delete(se.sessions, sessID) - se.activeSessions.Add(context.Background(), -1) + ender.DecreaseActiveSessions() + ender.IncreaseClosedSessions() removedSessions++ } else { log.Printf("sessID: %d, userTime: %d", sessID, sess.lastUserTime) diff --git a/backend/internal/sink/assetscache/assets.go b/backend/internal/sink/assetscache/assets.go index 4c63f6897..387ee5c92 100644 --- a/backend/internal/sink/assetscache/assets.go +++ b/backend/internal/sink/assetscache/assets.go @@ -1,20 +1,19 @@ package assetscache import ( - "context" "crypto/md5" - "go.opentelemetry.io/otel/metric/instrument/syncfloat64" "io" "log" "net/url" - "openreplay/backend/internal/config/sink" - "openreplay/backend/pkg/messages" - "openreplay/backend/pkg/monitoring" - "openreplay/backend/pkg/queue/types" - "openreplay/backend/pkg/url/assets" + metrics "openreplay/backend/pkg/metrics/sink" "strings" "sync" "time" + + "openreplay/backend/internal/config/sink" + "openreplay/backend/pkg/messages" + "openreplay/backend/pkg/queue/types" + "openreplay/backend/pkg/url/assets" ) type CachedAsset struct { @@ -23,52 +22,21 @@ type CachedAsset struct { } type AssetsCache struct { - mutex sync.RWMutex - cfg *sink.Config - rewriter *assets.Rewriter - producer types.Producer - cache map[string]*CachedAsset - blackList []string // use "example.com" to filter all domains or ".example.com" to filter only third-level domain - totalAssets syncfloat64.Counter - cachedAssets syncfloat64.Counter - skippedAssets syncfloat64.Counter - assetSize syncfloat64.Histogram - assetDuration syncfloat64.Histogram + mutex sync.RWMutex + cfg *sink.Config + rewriter *assets.Rewriter + producer types.Producer + cache map[string]*CachedAsset + blackList []string // use "example.com" to filter all domains or ".example.com" to filter only third-level domain } -func New(cfg *sink.Config, rewriter *assets.Rewriter, producer types.Producer, metrics *monitoring.Metrics) *AssetsCache { - // Assets metrics - totalAssets, err := metrics.RegisterCounter("assets_total") - if err != nil { - log.Printf("can't create assets_total metric: %s", err) - } - cachedAssets, err := metrics.RegisterCounter("assets_cached") - if err != nil { - log.Printf("can't create assets_cached metric: %s", err) - } - skippedAssets, err := metrics.RegisterCounter("assets_skipped") - if err != nil { - log.Printf("can't create assets_skipped metric: %s", err) - } - assetSize, err := metrics.RegisterHistogram("asset_size") - if err != nil { - log.Printf("can't create asset_size metric: %s", err) - } - assetDuration, err := metrics.RegisterHistogram("asset_duration") - if err != nil { - log.Printf("can't create asset_duration metric: %s", err) - } +func New(cfg *sink.Config, rewriter *assets.Rewriter, producer types.Producer) *AssetsCache { assetsCache := &AssetsCache{ - cfg: cfg, - rewriter: rewriter, - producer: producer, - cache: make(map[string]*CachedAsset, 64), - blackList: make([]string, 0), - totalAssets: totalAssets, - cachedAssets: cachedAssets, - skippedAssets: skippedAssets, - assetSize: assetSize, - assetDuration: assetDuration, + cfg: cfg, + rewriter: rewriter, + producer: producer, + cache: make(map[string]*CachedAsset, 64), + blackList: make([]string, 0), } // Parse black list for cache layer if len(cfg.CacheBlackList) > 0 { @@ -84,7 +52,7 @@ func New(cfg *sink.Config, rewriter *assets.Rewriter, producer types.Producer, m } func (e *AssetsCache) cleaner() { - cleanTick := time.Tick(time.Minute * 30) + cleanTick := time.Tick(time.Minute * 3) for { select { case <-cleanTick: @@ -105,6 +73,7 @@ func (e *AssetsCache) clearCache() { if int64(now.Sub(cache.ts).Minutes()) > e.cfg.CacheExpiration { deleted++ delete(e.cache, id) + metrics.DecreaseCachedAssets() } } log.Printf("cache cleaner: deleted %d/%d assets", deleted, cacheSize) @@ -232,8 +201,7 @@ func parseHost(baseURL string) (string, error) { } func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) string { - ctx := context.Background() - e.totalAssets.Add(ctx, 1) + metrics.IncreaseTotalAssets() // Try to find asset in cache h := md5.New() // Cut first part of url (scheme + host) @@ -255,7 +223,7 @@ func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) st e.mutex.RUnlock() if ok { if int64(time.Now().Sub(cachedAsset.ts).Minutes()) < e.cfg.CacheExpiration { - e.skippedAssets.Add(ctx, 1) + metrics.IncreaseSkippedAssets() return cachedAsset.msg } } @@ -267,8 +235,8 @@ func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) st start := time.Now() res := e.getRewrittenCSS(sessionID, baseURL, css) duration := time.Now().Sub(start).Milliseconds() - e.assetSize.Record(ctx, float64(len(res))) - e.assetDuration.Record(ctx, float64(duration)) + metrics.RecordAssetSize(float64(len(res))) + metrics.RecordProcessAssetDuration(float64(duration)) // Save asset to cache if we spent more than threshold if duration > e.cfg.CacheThreshold { e.mutex.Lock() @@ -277,7 +245,7 @@ func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) st ts: time.Now(), } e.mutex.Unlock() - e.cachedAssets.Add(ctx, 1) + metrics.IncreaseCachedAssets() } // Return rewritten asset return res diff --git a/backend/internal/storage/storage.go b/backend/internal/storage/storage.go index fbe9e2228..1e2507163 100644 --- a/backend/internal/storage/storage.go +++ b/backend/internal/storage/storage.go @@ -2,20 +2,20 @@ package storage import ( "bytes" - "context" "fmt" - gzip "github.com/klauspost/pgzip" - "go.opentelemetry.io/otel/metric/instrument/syncfloat64" "log" - config "openreplay/backend/internal/config/storage" - "openreplay/backend/pkg/messages" - "openreplay/backend/pkg/monitoring" - "openreplay/backend/pkg/storage" "os" "strconv" "strings" "sync" "time" + + config "openreplay/backend/internal/config/storage" + "openreplay/backend/pkg/messages" + metrics "openreplay/backend/pkg/metrics/storage" + "openreplay/backend/pkg/storage" + + gzip "github.com/klauspost/pgzip" ) type FileType string @@ -25,6 +25,13 @@ const ( DEV FileType = "/devtools.mob" ) +func (t FileType) String() string { + if t == DOM { + return "dom" + } + return "devtools" +} + type Task struct { id string doms *bytes.Buffer @@ -36,92 +43,23 @@ type Storage struct { cfg *config.Config s3 *storage.S3 startBytes []byte - - totalSessions syncfloat64.Counter - sessionDOMSize syncfloat64.Histogram - sessionDEVSize syncfloat64.Histogram - readingDOMTime syncfloat64.Histogram - readingDEVTime syncfloat64.Histogram - sortingDOMTime syncfloat64.Histogram - sortingDEVTime syncfloat64.Histogram - archivingDOMTime syncfloat64.Histogram - archivingDEVTime syncfloat64.Histogram - uploadingDOMTime syncfloat64.Histogram - uploadingDEVTime syncfloat64.Histogram - - tasks chan *Task - ready chan struct{} + tasks chan *Task + ready chan struct{} } -func New(cfg *config.Config, s3 *storage.S3, metrics *monitoring.Metrics) (*Storage, error) { +func New(cfg *config.Config, s3 *storage.S3) (*Storage, error) { switch { case cfg == nil: return nil, fmt.Errorf("config is empty") case s3 == nil: return nil, fmt.Errorf("s3 storage is empty") } - // Create metrics - totalSessions, err := metrics.RegisterCounter("sessions_total") - if err != nil { - log.Printf("can't create sessions_total metric: %s", err) - } - sessionDOMSize, err := metrics.RegisterHistogram("sessions_size") - if err != nil { - log.Printf("can't create session_size metric: %s", err) - } - sessionDevtoolsSize, err := metrics.RegisterHistogram("sessions_dt_size") - if err != nil { - log.Printf("can't create sessions_dt_size metric: %s", err) - } - readingDOMTime, err := metrics.RegisterHistogram("reading_duration") - if err != nil { - log.Printf("can't create reading_duration metric: %s", err) - } - readingDEVTime, err := metrics.RegisterHistogram("reading_dt_duration") - if err != nil { - log.Printf("can't create reading_duration metric: %s", err) - } - sortingDOMTime, err := metrics.RegisterHistogram("sorting_duration") - if err != nil { - log.Printf("can't create reading_duration metric: %s", err) - } - sortingDEVTime, err := metrics.RegisterHistogram("sorting_dt_duration") - if err != nil { - log.Printf("can't create reading_duration metric: %s", err) - } - archivingDOMTime, err := metrics.RegisterHistogram("archiving_duration") - if err != nil { - log.Printf("can't create archiving_duration metric: %s", err) - } - archivingDEVTime, err := metrics.RegisterHistogram("archiving_dt_duration") - if err != nil { - log.Printf("can't create archiving_duration metric: %s", err) - } - uploadingDOMTime, err := metrics.RegisterHistogram("uploading_duration") - if err != nil { - log.Printf("can't create uploading_duration metric: %s", err) - } - uploadingDEVTime, err := metrics.RegisterHistogram("uploading_dt_duration") - if err != nil { - log.Printf("can't create uploading_duration metric: %s", err) - } newStorage := &Storage{ - cfg: cfg, - s3: s3, - startBytes: make([]byte, cfg.FileSplitSize), - totalSessions: totalSessions, - sessionDOMSize: sessionDOMSize, - sessionDEVSize: sessionDevtoolsSize, - readingDOMTime: readingDOMTime, - readingDEVTime: readingDEVTime, - sortingDOMTime: sortingDOMTime, - sortingDEVTime: sortingDEVTime, - archivingDOMTime: archivingDOMTime, - archivingDEVTime: archivingDEVTime, - uploadingDOMTime: uploadingDOMTime, - uploadingDEVTime: uploadingDEVTime, - tasks: make(chan *Task, 1), - ready: make(chan struct{}), + cfg: cfg, + s3: s3, + startBytes: make([]byte, cfg.FileSplitSize), + tasks: make(chan *Task, 1), + ready: make(chan struct{}), } go newStorage.worker() return newStorage, nil @@ -187,11 +125,7 @@ func (s *Storage) openSession(filePath string, tp FileType) ([]byte, error) { if err != nil { return nil, fmt.Errorf("can't sort session, err: %s", err) } - if tp == DOM { - s.sortingDOMTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds())) - } else { - s.sortingDEVTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds())) - } + metrics.RecordSessionSortDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String()) return res, nil } @@ -215,26 +149,19 @@ func (s *Storage) prepareSession(path string, tp FileType, task *Task) error { if err != nil { return err } - durRead := time.Now().Sub(startRead).Milliseconds() - // Send metrics - ctx, _ := context.WithTimeout(context.Background(), time.Millisecond*200) - if tp == DOM { - s.sessionDOMSize.Record(ctx, float64(len(mob))) - s.readingDOMTime.Record(ctx, float64(durRead)) - } else { - s.sessionDEVSize.Record(ctx, float64(len(mob))) - s.readingDEVTime.Record(ctx, float64(durRead)) - } + metrics.RecordSessionSize(float64(len(mob)), tp.String()) + metrics.RecordSessionReadDuration(float64(time.Now().Sub(startRead).Milliseconds()), tp.String()) + // Encode and compress session if tp == DEV { - startCompress := time.Now() + start := time.Now() task.dev = s.compressSession(mob) - s.archivingDEVTime.Record(ctx, float64(time.Now().Sub(startCompress).Milliseconds())) + metrics.RecordSessionCompressDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String()) } else { if len(mob) <= s.cfg.FileSplitSize { - startCompress := time.Now() + start := time.Now() task.doms = s.compressSession(mob) - s.archivingDOMTime.Record(ctx, float64(time.Now().Sub(startCompress).Milliseconds())) + metrics.RecordSessionCompressDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String()) return nil } wg := &sync.WaitGroup{} @@ -253,7 +180,7 @@ func (s *Storage) prepareSession(path string, tp FileType, task *Task) error { wg.Done() }() wg.Wait() - s.archivingDOMTime.Record(ctx, float64(firstPart+secondPart)) + metrics.RecordSessionCompressDuration(float64(firstPart+secondPart), tp.String()) } return nil } @@ -324,11 +251,9 @@ func (s *Storage) uploadSession(task *Task) { wg.Done() }() wg.Wait() - // Record metrics - ctx, _ := context.WithTimeout(context.Background(), time.Millisecond*200) - s.uploadingDOMTime.Record(ctx, float64(uploadDoms+uploadDome)) - s.uploadingDEVTime.Record(ctx, float64(uploadDev)) - s.totalSessions.Add(ctx, 1) + metrics.RecordSessionUploadDuration(float64(uploadDoms+uploadDome), DOM.String()) + metrics.RecordSessionUploadDuration(float64(uploadDev), DEV.String()) + metrics.IncreaseStorageTotalSessions() } func (s *Storage) worker() { diff --git a/backend/pkg/db/postgres/batches.go b/backend/pkg/db/postgres/batches.go index c1283da10..8b9f2484d 100644 --- a/backend/pkg/db/postgres/batches.go +++ b/backend/pkg/db/postgres/batches.go @@ -1,14 +1,13 @@ package postgres import ( - "context" - "github.com/jackc/pgx/v4" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/instrument/syncfloat64" "log" - "openreplay/backend/pkg/monitoring" "strings" "time" + + "openreplay/backend/pkg/metrics/database" + + "github.com/jackc/pgx/v4" ) type batchItem struct { @@ -78,21 +77,17 @@ func NewBatchesTask(size int) *batchesTask { } type BatchSet struct { - c Pool - batches map[uint64]*SessionBatch - batchQueueLimit int - batchSizeLimit int - batchSizeBytes syncfloat64.Histogram - batchSizeLines syncfloat64.Histogram - sqlRequestTime syncfloat64.Histogram - sqlRequestCounter syncfloat64.Counter - updates map[uint64]*sessionUpdates - workerTask chan *batchesTask - done chan struct{} - finished chan struct{} + c Pool + batches map[uint64]*SessionBatch + batchQueueLimit int + batchSizeLimit int + updates map[uint64]*sessionUpdates + workerTask chan *batchesTask + done chan struct{} + finished chan struct{} } -func NewBatchSet(c Pool, queueLimit, sizeLimit int, metrics *monitoring.Metrics) *BatchSet { +func NewBatchSet(c Pool, queueLimit, sizeLimit int) *BatchSet { bs := &BatchSet{ c: c, batches: make(map[uint64]*SessionBatch), @@ -103,31 +98,10 @@ func NewBatchSet(c Pool, queueLimit, sizeLimit int, metrics *monitoring.Metrics) finished: make(chan struct{}), updates: make(map[uint64]*sessionUpdates), } - bs.initMetrics(metrics) go bs.worker() return bs } -func (conn *BatchSet) initMetrics(metrics *monitoring.Metrics) { - var err error - conn.batchSizeBytes, err = metrics.RegisterHistogram("batch_size_bytes") - if err != nil { - log.Printf("can't create batchSizeBytes metric: %s", err) - } - conn.batchSizeLines, err = metrics.RegisterHistogram("batch_size_lines") - if err != nil { - log.Printf("can't create batchSizeLines metric: %s", err) - } - conn.sqlRequestTime, err = metrics.RegisterHistogram("sql_request_time") - if err != nil { - log.Printf("can't create sqlRequestTime metric: %s", err) - } - conn.sqlRequestCounter, err = metrics.RegisterCounter("sql_request_number") - if err != nil { - log.Printf("can't create sqlRequestNumber metric: %s", err) - } -} - func (conn *BatchSet) getBatch(sessionID uint64) *SessionBatch { sessionID = sessionID % 10 if _, ok := conn.batches[sessionID]; !ok { @@ -194,11 +168,10 @@ func (conn *BatchSet) sendBatches(t *batchesTask) { // Append session update sql request to the end of batch batch.Prepare() // Record batch size in bytes and number of lines - conn.batchSizeBytes.Record(context.Background(), float64(batch.Size())) - conn.batchSizeLines.Record(context.Background(), float64(batch.Len())) + database.RecordBatchSize(float64(batch.Size())) + database.RecordBatchElements(float64(batch.Len())) start := time.Now() - isFailed := false // Send batch to db and execute br := conn.c.SendBatch(batch.batch) @@ -209,15 +182,10 @@ func (conn *BatchSet) sendBatches(t *batchesTask) { failedSql := batch.items[i] query := strings.ReplaceAll(failedSql.query, "\n", " ") log.Println("failed sql req:", query, failedSql.arguments) - isFailed = true } } br.Close() // returns err - dur := time.Now().Sub(start).Milliseconds() - conn.sqlRequestTime.Record(context.Background(), float64(dur), - attribute.String("method", "batch"), attribute.Bool("failed", isFailed)) - conn.sqlRequestCounter.Add(context.Background(), 1, - attribute.String("method", "batch"), attribute.Bool("failed", isFailed)) + database.RecordBatchInsertDuration(float64(time.Now().Sub(start).Milliseconds())) } } diff --git a/backend/pkg/db/postgres/bulk.go b/backend/pkg/db/postgres/bulk.go index 8c6c42f78..b6a2ddd35 100644 --- a/backend/pkg/db/postgres/bulk.go +++ b/backend/pkg/db/postgres/bulk.go @@ -2,13 +2,9 @@ package postgres import ( "bytes" - "context" "errors" "fmt" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/instrument/syncfloat64" - "log" - "openreplay/backend/pkg/monitoring" + "openreplay/backend/pkg/metrics/database" "time" ) @@ -25,15 +21,13 @@ type Bulk interface { } type bulkImpl struct { - conn Pool - table string - columns string - template string - setSize int - sizeLimit int - values []interface{} - bulkSize syncfloat64.Histogram - bulkDuration syncfloat64.Histogram + conn Pool + table string + columns string + template string + setSize int + sizeLimit int + values []interface{} } func (b *bulkImpl) Append(args ...interface{}) error { @@ -79,18 +73,15 @@ func (b *bulkImpl) send() error { return fmt.Errorf("send bulk err: %s", err) } // Save bulk metrics - ctx, _ := context.WithTimeout(context.Background(), time.Millisecond*200) - b.bulkDuration.Record(ctx, float64(time.Now().Sub(start).Milliseconds()), attribute.String("table", b.table)) - b.bulkSize.Record(ctx, float64(size), attribute.String("table", b.table)) + database.RecordBulkElements(float64(size), "pg", b.table) + database.RecordBulkInsertDuration(float64(time.Now().Sub(start).Milliseconds()), "pg", b.table) return nil } -func NewBulk(conn Pool, metrics *monitoring.Metrics, table, columns, template string, setSize, sizeLimit int) (Bulk, error) { +func NewBulk(conn Pool, table, columns, template string, setSize, sizeLimit int) (Bulk, error) { switch { case conn == nil: return nil, errors.New("db conn is empty") - case metrics == nil: - return nil, errors.New("metrics is empty") case table == "": return nil, errors.New("table is empty") case columns == "": @@ -102,23 +93,13 @@ func NewBulk(conn Pool, metrics *monitoring.Metrics, table, columns, template st case sizeLimit <= 0: return nil, errors.New("size limit is wrong") } - messagesInBulk, err := metrics.RegisterHistogram("messages_in_bulk") - if err != nil { - log.Printf("can't create messages_size metric: %s", err) - } - bulkInsertDuration, err := metrics.RegisterHistogram("bulk_insert_duration") - if err != nil { - log.Printf("can't create messages_size metric: %s", err) - } return &bulkImpl{ - conn: conn, - table: table, - columns: columns, - template: template, - setSize: setSize, - sizeLimit: sizeLimit, - values: make([]interface{}, 0, setSize*sizeLimit), - bulkSize: messagesInBulk, - bulkDuration: bulkInsertDuration, + conn: conn, + table: table, + columns: columns, + template: template, + setSize: setSize, + sizeLimit: sizeLimit, + values: make([]interface{}, 0, setSize*sizeLimit), }, nil } diff --git a/backend/pkg/db/postgres/bulks.go b/backend/pkg/db/postgres/bulks.go index 5774ba184..f3e9e95c9 100644 --- a/backend/pkg/db/postgres/bulks.go +++ b/backend/pkg/db/postgres/bulks.go @@ -2,7 +2,6 @@ package postgres import ( "log" - "openreplay/backend/pkg/monitoring" "time" ) @@ -30,16 +29,14 @@ type BulkSet struct { webCustomEvents Bulk webClickEvents Bulk webNetworkRequest Bulk - metrics *monitoring.Metrics workerTask chan *bulksTask done chan struct{} finished chan struct{} } -func NewBulkSet(c Pool, metrics *monitoring.Metrics) *BulkSet { +func NewBulkSet(c Pool) *BulkSet { bs := &BulkSet{ c: c, - metrics: metrics, workerTask: make(chan *bulksTask, 1), done: make(chan struct{}), finished: make(chan struct{}), @@ -86,7 +83,7 @@ func (conn *BulkSet) Get(name string) Bulk { func (conn *BulkSet) initBulks() { var err error - conn.autocompletes, err = NewBulk(conn.c, conn.metrics, + conn.autocompletes, err = NewBulk(conn.c, "autocomplete", "(value, type, project_id)", "($%d, $%d, $%d)", @@ -94,7 +91,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create autocomplete bulk: %s", err) } - conn.requests, err = NewBulk(conn.c, conn.metrics, + conn.requests, err = NewBulk(conn.c, "events_common.requests", "(session_id, timestamp, seq_index, url, duration, success)", "($%d, $%d, $%d, LEFT($%d, 8000), $%d, $%d)", @@ -102,7 +99,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create requests bulk: %s", err) } - conn.customEvents, err = NewBulk(conn.c, conn.metrics, + conn.customEvents, err = NewBulk(conn.c, "events_common.customs", "(session_id, timestamp, seq_index, name, payload)", "($%d, $%d, $%d, LEFT($%d, 2000), $%d)", @@ -110,7 +107,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create customEvents bulk: %s", err) } - conn.webPageEvents, err = NewBulk(conn.c, conn.metrics, + conn.webPageEvents, err = NewBulk(conn.c, "events.pages", "(session_id, message_id, timestamp, referrer, base_referrer, host, path, query, dom_content_loaded_time, "+ "load_time, response_end, first_paint_time, first_contentful_paint_time, speed_index, visually_complete, "+ @@ -122,7 +119,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create webPageEvents bulk: %s", err) } - conn.webInputEvents, err = NewBulk(conn.c, conn.metrics, + conn.webInputEvents, err = NewBulk(conn.c, "events.inputs", "(session_id, message_id, timestamp, value, label)", "($%d, $%d, $%d, LEFT($%d, 2000), NULLIF(LEFT($%d, 2000),''))", @@ -130,7 +127,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create webPageEvents bulk: %s", err) } - conn.webGraphQL, err = NewBulk(conn.c, conn.metrics, + conn.webGraphQL, err = NewBulk(conn.c, "events.graphql", "(session_id, timestamp, message_id, name, request_body, response_body)", "($%d, $%d, $%d, LEFT($%d, 2000), $%d, $%d)", @@ -138,7 +135,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create webPageEvents bulk: %s", err) } - conn.webErrors, err = NewBulk(conn.c, conn.metrics, + conn.webErrors, err = NewBulk(conn.c, "errors", "(error_id, project_id, source, name, message, payload)", "($%d, $%d, $%d, $%d, $%d, $%d::jsonb)", @@ -146,7 +143,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create webErrors bulk: %s", err) } - conn.webErrorEvents, err = NewBulk(conn.c, conn.metrics, + conn.webErrorEvents, err = NewBulk(conn.c, "events.errors", "(session_id, message_id, timestamp, error_id)", "($%d, $%d, $%d, $%d)", @@ -154,7 +151,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create webErrorEvents bulk: %s", err) } - conn.webErrorTags, err = NewBulk(conn.c, conn.metrics, + conn.webErrorTags, err = NewBulk(conn.c, "public.errors_tags", "(session_id, message_id, error_id, key, value)", "($%d, $%d, $%d, $%d, $%d)", @@ -162,7 +159,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create webErrorEvents bulk: %s", err) } - conn.webIssues, err = NewBulk(conn.c, conn.metrics, + conn.webIssues, err = NewBulk(conn.c, "issues", "(project_id, issue_id, type, context_string)", "($%d, $%d, $%d, $%d)", @@ -170,7 +167,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create webIssues bulk: %s", err) } - conn.webIssueEvents, err = NewBulk(conn.c, conn.metrics, + conn.webIssueEvents, err = NewBulk(conn.c, "events_common.issues", "(session_id, issue_id, timestamp, seq_index, payload)", "($%d, $%d, $%d, $%d, CAST($%d AS jsonb))", @@ -178,7 +175,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create webIssueEvents bulk: %s", err) } - conn.webCustomEvents, err = NewBulk(conn.c, conn.metrics, + conn.webCustomEvents, err = NewBulk(conn.c, "events_common.customs", "(session_id, seq_index, timestamp, name, payload, level)", "($%d, $%d, $%d, LEFT($%d, 2000), $%d, $%d)", @@ -186,7 +183,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create webCustomEvents bulk: %s", err) } - conn.webClickEvents, err = NewBulk(conn.c, conn.metrics, + conn.webClickEvents, err = NewBulk(conn.c, "events.clicks", "(session_id, message_id, timestamp, label, selector, url, path)", "($%d, $%d, $%d, NULLIF(LEFT($%d, 2000), ''), LEFT($%d, 8000), LEFT($%d, 2000), LEFT($%d, 2000))", @@ -194,7 +191,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create webClickEvents bulk: %s", err) } - conn.webNetworkRequest, err = NewBulk(conn.c, conn.metrics, + conn.webNetworkRequest, err = NewBulk(conn.c, "events_common.requests", "(session_id, timestamp, seq_index, url, host, path, query, request_body, response_body, status_code, method, duration, success)", "($%d, $%d, $%d, LEFT($%d, 8000), LEFT($%d, 300), LEFT($%d, 2000), LEFT($%d, 8000), $%d, $%d, $%d::smallint, NULLIF($%d, '')::http_method, $%d, $%d)", diff --git a/backend/pkg/db/postgres/connector.go b/backend/pkg/db/postgres/connector.go index 2e8f3d425..6904dc135 100644 --- a/backend/pkg/db/postgres/connector.go +++ b/backend/pkg/db/postgres/connector.go @@ -2,11 +2,10 @@ package postgres import ( "context" - "github.com/jackc/pgx/v4/pgxpool" - "go.opentelemetry.io/otel/metric/instrument/syncfloat64" "log" + + "github.com/jackc/pgx/v4/pgxpool" "openreplay/backend/pkg/db/types" - "openreplay/backend/pkg/monitoring" ) type CH interface { @@ -15,36 +14,28 @@ type CH interface { // Conn contains batches, bulks and cache for all sessions type Conn struct { - c Pool - batches *BatchSet - bulks *BulkSet - batchSizeBytes syncfloat64.Histogram - batchSizeLines syncfloat64.Histogram - sqlRequestTime syncfloat64.Histogram - sqlRequestCounter syncfloat64.Counter - chConn CH + c Pool + batches *BatchSet + bulks *BulkSet + chConn CH } func (conn *Conn) SetClickHouse(ch CH) { conn.chConn = ch } -func NewConn(url string, queueLimit, sizeLimit int, metrics *monitoring.Metrics) *Conn { - if metrics == nil { - log.Fatalf("metrics is nil") - } +func NewConn(url string, queueLimit, sizeLimit int) *Conn { c, err := pgxpool.Connect(context.Background(), url) if err != nil { log.Fatalf("pgxpool.Connect err: %s", err) } conn := &Conn{} - conn.initMetrics(metrics) - conn.c, err = NewPool(c, conn.sqlRequestTime, conn.sqlRequestCounter) + conn.c, err = NewPool(c) if err != nil { log.Fatalf("can't create new pool wrapper: %s", err) } - conn.bulks = NewBulkSet(conn.c, metrics) - conn.batches = NewBatchSet(conn.c, queueLimit, sizeLimit, metrics) + conn.bulks = NewBulkSet(conn.c) + conn.batches = NewBatchSet(conn.c, queueLimit, sizeLimit) return conn } @@ -55,26 +46,6 @@ func (conn *Conn) Close() error { return nil } -func (conn *Conn) initMetrics(metrics *monitoring.Metrics) { - var err error - conn.batchSizeBytes, err = metrics.RegisterHistogram("batch_size_bytes") - if err != nil { - log.Printf("can't create batchSizeBytes metric: %s", err) - } - conn.batchSizeLines, err = metrics.RegisterHistogram("batch_size_lines") - if err != nil { - log.Printf("can't create batchSizeLines metric: %s", err) - } - conn.sqlRequestTime, err = metrics.RegisterHistogram("sql_request_time") - if err != nil { - log.Printf("can't create sqlRequestTime metric: %s", err) - } - conn.sqlRequestCounter, err = metrics.RegisterCounter("sql_request_number") - if err != nil { - log.Printf("can't create sqlRequestNumber metric: %s", err) - } -} - func (conn *Conn) insertAutocompleteValue(sessionID uint64, projectID uint32, tp string, value string) { if len(value) == 0 { return diff --git a/backend/pkg/db/postgres/pool.go b/backend/pkg/db/postgres/pool.go index 5f9cbaa29..5214be8d0 100644 --- a/backend/pkg/db/postgres/pool.go +++ b/backend/pkg/db/postgres/pool.go @@ -3,12 +3,12 @@ package postgres import ( "context" "errors" - "github.com/jackc/pgx/v4" - "github.com/jackc/pgx/v4/pgxpool" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/instrument/syncfloat64" "strings" "time" + + "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v4/pgxpool" + "openreplay/backend/pkg/metrics/database" ) // Pool is a pgx.Pool wrapper with metrics integration @@ -22,19 +22,15 @@ type Pool interface { } type poolImpl struct { - conn *pgxpool.Pool - sqlRequestTime syncfloat64.Histogram - sqlRequestCounter syncfloat64.Counter + conn *pgxpool.Pool } func (p *poolImpl) Query(sql string, args ...interface{}) (pgx.Rows, error) { start := time.Now() res, err := p.conn.Query(getTimeoutContext(), sql, args...) method, table := methodName(sql) - p.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()), - attribute.String("method", method), attribute.String("table", table)) - p.sqlRequestCounter.Add(context.Background(), 1, - attribute.String("method", method), attribute.String("table", table)) + database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table) + database.IncreaseTotalRequests(method, table) return res, err } @@ -42,10 +38,8 @@ func (p *poolImpl) QueryRow(sql string, args ...interface{}) pgx.Row { start := time.Now() res := p.conn.QueryRow(getTimeoutContext(), sql, args...) method, table := methodName(sql) - p.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()), - attribute.String("method", method), attribute.String("table", table)) - p.sqlRequestCounter.Add(context.Background(), 1, - attribute.String("method", method), attribute.String("table", table)) + database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table) + database.IncreaseTotalRequests(method, table) return res } @@ -53,45 +47,37 @@ func (p *poolImpl) Exec(sql string, arguments ...interface{}) error { start := time.Now() _, err := p.conn.Exec(getTimeoutContext(), sql, arguments...) method, table := methodName(sql) - p.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()), - attribute.String("method", method), attribute.String("table", table)) - p.sqlRequestCounter.Add(context.Background(), 1, - attribute.String("method", method), attribute.String("table", table)) + database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table) + database.IncreaseTotalRequests(method, table) return err } func (p *poolImpl) SendBatch(b *pgx.Batch) pgx.BatchResults { start := time.Now() res := p.conn.SendBatch(getTimeoutContext(), b) - p.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()), - attribute.String("method", "sendBatch")) - p.sqlRequestCounter.Add(context.Background(), 1, - attribute.String("method", "sendBatch")) + database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "sendBatch", "") + database.IncreaseTotalRequests("sendBatch", "") return res } func (p *poolImpl) Begin() (*_Tx, error) { start := time.Now() tx, err := p.conn.Begin(context.Background()) - p.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()), - attribute.String("method", "begin")) - p.sqlRequestCounter.Add(context.Background(), 1, - attribute.String("method", "begin")) - return &_Tx{tx, p.sqlRequestTime, p.sqlRequestCounter}, err + database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "begin", "") + database.IncreaseTotalRequests("begin", "") + return &_Tx{tx}, err } func (p *poolImpl) Close() { p.conn.Close() } -func NewPool(conn *pgxpool.Pool, sqlRequestTime syncfloat64.Histogram, sqlRequestCounter syncfloat64.Counter) (Pool, error) { +func NewPool(conn *pgxpool.Pool) (Pool, error) { if conn == nil { return nil, errors.New("conn is empty") } return &poolImpl{ - conn: conn, - sqlRequestTime: sqlRequestTime, - sqlRequestCounter: sqlRequestCounter, + conn: conn, }, nil } @@ -99,38 +85,30 @@ func NewPool(conn *pgxpool.Pool, sqlRequestTime syncfloat64.Histogram, sqlReques type _Tx struct { pgx.Tx - sqlRequestTime syncfloat64.Histogram - sqlRequestCounter syncfloat64.Counter } func (tx *_Tx) exec(sql string, args ...interface{}) error { start := time.Now() _, err := tx.Exec(context.Background(), sql, args...) method, table := methodName(sql) - tx.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()), - attribute.String("method", method), attribute.String("table", table)) - tx.sqlRequestCounter.Add(context.Background(), 1, - attribute.String("method", method), attribute.String("table", table)) + database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table) + database.IncreaseTotalRequests(method, table) return err } func (tx *_Tx) rollback() error { start := time.Now() err := tx.Rollback(context.Background()) - tx.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()), - attribute.String("method", "rollback")) - tx.sqlRequestCounter.Add(context.Background(), 1, - attribute.String("method", "rollback")) + database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "rollback", "") + database.IncreaseTotalRequests("rollback", "") return err } func (tx *_Tx) commit() error { start := time.Now() err := tx.Commit(context.Background()) - tx.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()), - attribute.String("method", "commit")) - tx.sqlRequestCounter.Add(context.Background(), 1, - attribute.String("method", "commit")) + database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "commit", "") + database.IncreaseTotalRequests("commit", "") return err } @@ -169,7 +147,8 @@ func methodName(sql string) (string, string) { case "update": table = strings.TrimSpace(parts[1]) case "insert": - table = strings.TrimSpace(parts[2]) + tableNameParts := strings.Split(strings.TrimSpace(parts[2]), "(") + table = tableNameParts[0] } return cmd, table } diff --git a/backend/pkg/messages/iterator-sink.go b/backend/pkg/messages/iterator-sink.go index a5897c3b7..be12b63eb 100644 --- a/backend/pkg/messages/iterator-sink.go +++ b/backend/pkg/messages/iterator-sink.go @@ -3,6 +3,7 @@ package messages import ( "fmt" "log" + "openreplay/backend/pkg/metrics/sink" ) type sinkMessageIteratorImpl struct { @@ -53,6 +54,8 @@ func (i *sinkMessageIteratorImpl) sendBatchEnd() { } func (i *sinkMessageIteratorImpl) Iterate(batchData []byte, batchInfo *BatchInfo) { + sink.RecordBatchSize(float64(len(batchData))) + sink.IncreaseTotalBatches() // Create new message reader reader := NewMessageReader(batchData) diff --git a/backend/pkg/messages/iterator.go b/backend/pkg/messages/iterator.go index a6717257e..f7b014d30 100644 --- a/backend/pkg/messages/iterator.go +++ b/backend/pkg/messages/iterator.go @@ -74,12 +74,13 @@ func (i *messageIteratorImpl) Iterate(batchData []byte, batchInfo *BatchInfo) { i.messageInfo.Index++ msg := reader.Message() + msgType := msg.TypeID() // Preprocess "system" messages if _, ok := i.preFilter[msg.TypeID()]; ok { msg = msg.Decode() if msg == nil { - log.Printf("decode error, type: %d, info: %s", msg.TypeID(), i.batchInfo.Info()) + log.Printf("decode error, type: %d, info: %s", msgType, i.batchInfo.Info()) return } msg = transformDeprecated(msg) @@ -99,7 +100,7 @@ func (i *messageIteratorImpl) Iterate(batchData []byte, batchInfo *BatchInfo) { if i.autoDecode { msg = msg.Decode() if msg == nil { - log.Printf("decode error, type: %d, info: %s", msg.TypeID(), i.batchInfo.Info()) + log.Printf("decode error, type: %d, info: %s", msgType, i.batchInfo.Info()) return } } diff --git a/backend/pkg/messages/session-iterator.go b/backend/pkg/messages/session-iterator.go index eb9f32387..45daae4b8 100644 --- a/backend/pkg/messages/session-iterator.go +++ b/backend/pkg/messages/session-iterator.go @@ -40,13 +40,6 @@ func SplitMessages(data []byte) ([]*msgInfo, error) { return nil, fmt.Errorf("read message type err: %s", err) } - if msgType == MsgRedux { - log.Printf("redux") - } - if msgType == MsgFetch { - log.Printf("fetch") - } - // Read message body _, err = ReadMessage(msgType, reader) if err != nil { diff --git a/backend/pkg/metrics/assets/metrics.go b/backend/pkg/metrics/assets/metrics.go new file mode 100644 index 000000000..44af0dfa9 --- /dev/null +++ b/backend/pkg/metrics/assets/metrics.go @@ -0,0 +1,72 @@ +package assets + +import ( + "github.com/prometheus/client_golang/prometheus" + "openreplay/backend/pkg/metrics/common" + "strconv" +) + +var assetsProcessedSessions = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "assets", + Name: "processed_total", + Help: "A counter displaying the total count of processed assets.", + }, +) + +func IncreaseProcessesSessions() { + assetsProcessedSessions.Inc() +} + +var assetsSavedSessions = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "assets", + Name: "saved_total", + Help: "A counter displaying the total number of cached assets.", + }, +) + +func IncreaseSavedSessions() { + assetsSavedSessions.Inc() +} + +var assetsDownloadDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "assets", + Name: "download_duration_seconds", + Help: "A histogram displaying the duration of downloading for each asset in seconds.", + Buckets: common.DefaultDurationBuckets, + }, + []string{"response_code"}, +) + +func RecordDownloadDuration(durMillis float64, code int) { + assetsDownloadDuration.WithLabelValues(strconv.Itoa(code)).Observe(durMillis / 1000.0) +} + +var assetsUploadDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "assets", + Name: "upload_s3_duration_seconds", + Help: "A histogram displaying the duration of uploading to s3 for each asset in seconds.", + Buckets: common.DefaultDurationBuckets, + }, + []string{"failed"}, +) + +func RecordUploadDuration(durMillis float64, isFailed bool) { + failed := "false" + if isFailed { + failed = "true" + } + assetsUploadDuration.WithLabelValues(failed).Observe(durMillis / 1000.0) +} + +func List() []prometheus.Collector { + return []prometheus.Collector{ + assetsProcessedSessions, + assetsSavedSessions, + assetsDownloadDuration, + assetsUploadDuration, + } +} diff --git a/backend/pkg/metrics/common/metrics.go b/backend/pkg/metrics/common/metrics.go new file mode 100644 index 000000000..85b66c713 --- /dev/null +++ b/backend/pkg/metrics/common/metrics.go @@ -0,0 +1,11 @@ +package common + +// DefaultDurationBuckets is a set of buckets from 5 milliseconds to 1000 seconds (16.6667 minutes) +var DefaultDurationBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 25, 50, 100, 250, 500, 1000} + +// DefaultSizeBuckets is a set of buckets from 1 byte to 1_000_000_000 bytes (~1 Gb) +var DefaultSizeBuckets = []float64{1, 10, 50, 100, 250, 500, 1000, 2500, 5000, 10000, 25000, 50000, 100_000, 250_000, + 500_000, 1_000_000, 10_000_000, 100_000_000, 1_000_000_000} + +// DefaultBuckets is a set of buckets from 1 to 1_000_000 elements +var DefaultBuckets = []float64{1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10_000, 50_000, 100_000, 1_000_000} diff --git a/backend/pkg/metrics/database/metrics.go b/backend/pkg/metrics/database/metrics.go new file mode 100644 index 000000000..a9f3990cd --- /dev/null +++ b/backend/pkg/metrics/database/metrics.go @@ -0,0 +1,127 @@ +package database + +import ( + "github.com/prometheus/client_golang/prometheus" + "openreplay/backend/pkg/metrics/common" +) + +var dbBatchSize = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: "db", + Name: "batch_size_bytes", + Help: "A histogram displaying the batch size in bytes.", + Buckets: common.DefaultSizeBuckets, + }, +) + +func RecordBatchSize(size float64) { + dbBatchSize.Observe(size) +} + +var dbBatchElements = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: "db", + Name: "batch_size_elements", + Help: "A histogram displaying the number of SQL commands in each batch.", + Buckets: common.DefaultBuckets, + }, +) + +func RecordBatchElements(number float64) { + dbBatchElements.Observe(number) +} + +var dbBatchInsertDuration = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: "db", + Name: "batch_insert_duration_seconds", + Help: "A histogram displaying the duration of batch inserts in seconds.", + Buckets: common.DefaultDurationBuckets, + }, +) + +func RecordBatchInsertDuration(durMillis float64) { + dbBatchInsertDuration.Observe(durMillis / 1000.0) +} + +var dbBulkSize = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "db", + Name: "bulk_size_bytes", + Help: "A histogram displaying the bulk size in bytes.", + Buckets: common.DefaultSizeBuckets, + }, + []string{"db", "table"}, +) + +func RecordBulkSize(size float64, db, table string) { + dbBulkSize.WithLabelValues(db, table).Observe(size) +} + +var dbBulkElements = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "db", + Name: "bulk_size_elements", + Help: "A histogram displaying the size of data set in each bulk.", + Buckets: common.DefaultBuckets, + }, + []string{"db", "table"}, +) + +func RecordBulkElements(size float64, db, table string) { + dbBulkElements.WithLabelValues(db, table).Observe(size) +} + +var dbBulkInsertDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "db", + Name: "bulk_insert_duration_seconds", + Help: "A histogram displaying the duration of bulk inserts in seconds.", + Buckets: common.DefaultDurationBuckets, + }, + []string{"db", "table"}, +) + +func RecordBulkInsertDuration(durMillis float64, db, table string) { + dbBulkInsertDuration.WithLabelValues(db, table).Observe(durMillis / 1000.0) +} + +var dbRequestDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "db", + Name: "request_duration_seconds", + Help: "A histogram displaying the duration of each sql request in seconds.", + Buckets: common.DefaultDurationBuckets, + }, + []string{"method", "table"}, +) + +func RecordRequestDuration(durMillis float64, method, table string) { + dbRequestDuration.WithLabelValues(method, table).Observe(durMillis / 1000.0) +} + +var dbTotalRequests = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "db", + Name: "requests_total", + Help: "A counter showing the total number of all SQL requests.", + }, + []string{"method", "table"}, +) + +func IncreaseTotalRequests(method, table string) { + dbTotalRequests.WithLabelValues(method, table).Inc() +} + +func List() []prometheus.Collector { + return []prometheus.Collector{ + dbBatchSize, + dbBatchElements, + dbBatchInsertDuration, + dbBulkSize, + dbBulkElements, + dbBulkInsertDuration, + dbRequestDuration, + dbTotalRequests, + } +} diff --git a/backend/pkg/metrics/ender/metrics.go b/backend/pkg/metrics/ender/metrics.go new file mode 100644 index 000000000..5e3308554 --- /dev/null +++ b/backend/pkg/metrics/ender/metrics.go @@ -0,0 +1,51 @@ +package ender + +import "github.com/prometheus/client_golang/prometheus" + +var enderActiveSessions = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: "ender", + Name: "sessions_active", + Help: "A gauge displaying the number of active (live) sessions.", + }, +) + +func IncreaseActiveSessions() { + enderActiveSessions.Inc() +} + +func DecreaseActiveSessions() { + enderActiveSessions.Dec() +} + +var enderClosedSessions = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "ender", + Name: "sessions_closed", + Help: "A counter displaying the number of closed sessions (sent SessionEnd).", + }, +) + +func IncreaseClosedSessions() { + enderClosedSessions.Inc() +} + +var enderTotalSessions = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "ender", + Name: "sessions_total", + Help: "A counter displaying the number of all processed sessions.", + }, +) + +func IncreaseTotalSessions() { + enderTotalSessions.Inc() +} + +func List() []prometheus.Collector { + return []prometheus.Collector{ + enderActiveSessions, + enderClosedSessions, + enderTotalSessions, + } +} diff --git a/backend/pkg/metrics/http/metrics.go b/backend/pkg/metrics/http/metrics.go new file mode 100644 index 000000000..7a835d7f6 --- /dev/null +++ b/backend/pkg/metrics/http/metrics.go @@ -0,0 +1,55 @@ +package http + +import ( + "github.com/prometheus/client_golang/prometheus" + "openreplay/backend/pkg/metrics/common" + "strconv" +) + +var httpRequestSize = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "http", + Name: "request_size_bytes", + Help: "A histogram displaying the size of each HTTP request in bytes.", + Buckets: common.DefaultSizeBuckets, + }, + []string{"url", "response_code"}, +) + +func RecordRequestSize(size float64, url string, code int) { + httpRequestSize.WithLabelValues(url, strconv.Itoa(code)).Observe(size) +} + +var httpRequestDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "http", + Name: "request_duration_seconds", + Help: "A histogram displaying the duration of each HTTP request in seconds.", + Buckets: common.DefaultDurationBuckets, + }, + []string{"url", "response_code"}, +) + +func RecordRequestDuration(durMillis float64, url string, code int) { + httpRequestDuration.WithLabelValues(url, strconv.Itoa(code)).Observe(durMillis / 1000.0) +} + +var httpTotalRequests = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "http", + Name: "requests_total", + Help: "A counter displaying the number all HTTP requests.", + }, +) + +func IncreaseTotalRequests() { + httpTotalRequests.Inc() +} + +func List() []prometheus.Collector { + return []prometheus.Collector{ + httpRequestSize, + httpRequestDuration, + httpTotalRequests, + } +} diff --git a/backend/pkg/metrics/server.go b/backend/pkg/metrics/server.go new file mode 100644 index 000000000..fb3be5afc --- /dev/null +++ b/backend/pkg/metrics/server.go @@ -0,0 +1,40 @@ +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/collectors" + "github.com/prometheus/client_golang/prometheus/promhttp" + "log" + "net/http" +) + +type MetricServer struct { + registry *prometheus.Registry +} + +func New() *MetricServer { + registry := prometheus.NewRegistry() + // Add go runtime metrics and process collectors. + registry.MustRegister( + collectors.NewGoCollector(), + collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}), + ) + // Expose /metrics HTTP endpoint using the created custom registry. + http.Handle( + "/metrics", promhttp.HandlerFor( + registry, + promhttp.HandlerOpts{ + EnableOpenMetrics: true, + }), + ) + go func() { + log.Println(http.ListenAndServe(":8888", nil)) + }() + return &MetricServer{ + registry: registry, + } +} + +func (s *MetricServer) Register(cs []prometheus.Collector) { + s.registry.MustRegister(cs...) +} diff --git a/backend/pkg/metrics/sink/metrics.go b/backend/pkg/metrics/sink/metrics.go new file mode 100644 index 000000000..52cb73ba1 --- /dev/null +++ b/backend/pkg/metrics/sink/metrics.go @@ -0,0 +1,185 @@ +package sink + +import ( + "github.com/prometheus/client_golang/prometheus" + "openreplay/backend/pkg/metrics/common" +) + +var sinkMessageSize = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: "sink", + Name: "message_size_bytes", + Help: "A histogram displaying the size of each message in bytes.", + Buckets: common.DefaultSizeBuckets, + }, +) + +func RecordMessageSize(size float64) { + sinkMessageSize.Observe(size) +} + +var sinkWrittenMessages = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "sink", + Name: "messages_written", + Help: "A counter displaying the total number of all written messages.", + }, +) + +func IncreaseWrittenMessages() { + sinkWrittenMessages.Inc() +} + +var sinkTotalMessages = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "sink", + Name: "messages_total", + Help: "A counter displaying the total number of all processed messages.", + }, +) + +func IncreaseTotalMessages() { + sinkTotalMessages.Inc() +} + +var sinkBatchSize = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: "sink", + Name: "batch_size_bytes", + Help: "A histogram displaying the size of each batch in bytes.", + Buckets: common.DefaultSizeBuckets, + }, +) + +func RecordBatchSize(size float64) { + sinkBatchSize.Observe(size) +} + +var sinkTotalBatches = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "sink", + Name: "batches_total", + Help: "A counter displaying the total number of all written batches.", + }, +) + +func IncreaseTotalBatches() { + sinkTotalBatches.Inc() +} + +var sinkWrittenBytes = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "sink", + Name: "written_bytes", + Help: "A histogram displaying the size of buffer in bytes written to session file.", + Buckets: common.DefaultSizeBuckets, + }, + []string{"file_type"}, +) + +func RecordWrittenBytes(size float64, fileType string) { + if size == 0 { + return + } + sinkWrittenBytes.WithLabelValues(fileType).Observe(size) + IncreaseTotalWrittenBytes(size, fileType) +} + +var sinkTotalWrittenBytes = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "sink", + Name: "written_bytes_total", + Help: "A counter displaying the total number of bytes written to all session files.", + }, + []string{"file_type"}, +) + +func IncreaseTotalWrittenBytes(size float64, fileType string) { + if size == 0 { + return + } + sinkTotalWrittenBytes.WithLabelValues(fileType).Add(size) +} + +var sinkCachedAssets = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: "sink", + Name: "assets_cached", + Help: "A gauge displaying the current number of cached assets.", + }, +) + +func IncreaseCachedAssets() { + sinkCachedAssets.Inc() +} + +func DecreaseCachedAssets() { + sinkCachedAssets.Dec() +} + +var sinkSkippedAssets = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "sink", + Name: "assets_skipped", + Help: "A counter displaying the total number of all skipped assets.", + }, +) + +func IncreaseSkippedAssets() { + sinkSkippedAssets.Inc() +} + +var sinkTotalAssets = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "sink", + Name: "assets_total", + Help: "A counter displaying the total number of all processed assets.", + }, +) + +func IncreaseTotalAssets() { + sinkTotalAssets.Inc() +} + +var sinkAssetSize = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: "sink", + Name: "asset_size_bytes", + Help: "A histogram displaying the size of each asset in bytes.", + Buckets: common.DefaultSizeBuckets, + }, +) + +func RecordAssetSize(size float64) { + sinkAssetSize.Observe(size) +} + +var sinkProcessAssetDuration = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: "sink", + Name: "asset_process_duration_seconds", + Help: "A histogram displaying the duration of processing for each asset in seconds.", + Buckets: common.DefaultDurationBuckets, + }, +) + +func RecordProcessAssetDuration(durMillis float64) { + sinkProcessAssetDuration.Observe(durMillis / 1000.0) +} + +func List() []prometheus.Collector { + return []prometheus.Collector{ + sinkMessageSize, + sinkWrittenMessages, + sinkTotalMessages, + sinkBatchSize, + sinkTotalBatches, + sinkWrittenBytes, + sinkTotalWrittenBytes, + sinkCachedAssets, + sinkSkippedAssets, + sinkTotalAssets, + sinkAssetSize, + sinkProcessAssetDuration, + } +} diff --git a/backend/pkg/metrics/storage/metrics.go b/backend/pkg/metrics/storage/metrics.go new file mode 100644 index 000000000..26459c90d --- /dev/null +++ b/backend/pkg/metrics/storage/metrics.go @@ -0,0 +1,114 @@ +package storage + +import ( + "github.com/prometheus/client_golang/prometheus" + "openreplay/backend/pkg/metrics/common" +) + +var storageSessionSize = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "storage", + Name: "session_size_bytes", + Help: "A histogram displaying the size of each session file in bytes prior to any manipulation.", + Buckets: common.DefaultSizeBuckets, + }, + []string{"file_type"}, +) + +func RecordSessionSize(fileSize float64, fileType string) { + storageSessionSize.WithLabelValues(fileType).Observe(fileSize) +} + +var storageTotalSessions = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "storage", + Name: "sessions_total", + Help: "A counter displaying the total number of all processed sessions.", + }, +) + +func IncreaseStorageTotalSessions() { + storageTotalSessions.Inc() +} + +var storageSessionReadDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "storage", + Name: "read_duration_seconds", + Help: "A histogram displaying the duration of reading for each session in seconds.", + Buckets: common.DefaultDurationBuckets, + }, + []string{"file_type"}, +) + +func RecordSessionReadDuration(durMillis float64, fileType string) { + storageSessionReadDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0) +} + +var storageSessionSortDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "storage", + Name: "sort_duration_seconds", + Help: "A histogram displaying the duration of sorting for each session in seconds.", + Buckets: common.DefaultDurationBuckets, + }, + []string{"file_type"}, +) + +func RecordSessionSortDuration(durMillis float64, fileType string) { + storageSessionSortDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0) +} + +var storageSessionEncodeDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "storage", + Name: "encode_duration_seconds", + Help: "A histogram displaying the duration of encoding for each session in seconds.", + Buckets: common.DefaultDurationBuckets, + }, + []string{"file_type"}, +) + +func RecordSessionEncodeDuration(durMillis float64, fileType string) { + storageSessionEncodeDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0) +} + +var storageSessionCompressDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "storage", + Name: "compress_duration_seconds", + Help: "A histogram displaying the duration of compressing for each session in seconds.", + Buckets: common.DefaultDurationBuckets, + }, + []string{"file_type"}, +) + +func RecordSessionCompressDuration(durMillis float64, fileType string) { + storageSessionCompressDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0) +} + +var storageSessionUploadDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "storage", + Name: "upload_duration_seconds", + Help: "A histogram displaying the duration of uploading to s3 for each session in seconds.", + Buckets: common.DefaultDurationBuckets, + }, + []string{"file_type"}, +) + +func RecordSessionUploadDuration(durMillis float64, fileType string) { + storageSessionUploadDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0) +} + +func List() []prometheus.Collector { + return []prometheus.Collector{ + storageSessionSize, + storageTotalSessions, + storageSessionReadDuration, + storageSessionSortDuration, + storageSessionEncodeDuration, + storageSessionCompressDuration, + storageSessionUploadDuration, + } +} diff --git a/backend/pkg/monitoring/metrics.go b/backend/pkg/monitoring/metrics.go deleted file mode 100644 index 803fba127..000000000 --- a/backend/pkg/monitoring/metrics.go +++ /dev/null @@ -1,138 +0,0 @@ -package monitoring - -import ( - "fmt" - "log" - "net/http" - - "go.opentelemetry.io/otel/exporters/prometheus" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/global" - "go.opentelemetry.io/otel/metric/instrument/syncfloat64" - "go.opentelemetry.io/otel/sdk/metric/aggregator/histogram" - controller "go.opentelemetry.io/otel/sdk/metric/controller/basic" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - processor "go.opentelemetry.io/otel/sdk/metric/processor/basic" - selector "go.opentelemetry.io/otel/sdk/metric/selector/simple" -) - -// Metrics stores all collected metrics -type Metrics struct { - meter metric.Meter - counters map[string]syncfloat64.Counter - upDownCounters map[string]syncfloat64.UpDownCounter - histograms map[string]syncfloat64.Histogram -} - -func New(name string) *Metrics { - m := &Metrics{ - counters: make(map[string]syncfloat64.Counter), - upDownCounters: make(map[string]syncfloat64.UpDownCounter), - histograms: make(map[string]syncfloat64.Histogram), - } - m.initPrometheusDataExporter() - m.initMetrics(name) - return m -} - -// initPrometheusDataExporter allows to use collected metrics in prometheus -func (m *Metrics) initPrometheusDataExporter() { - config := prometheus.Config{ - DefaultHistogramBoundaries: []float64{1, 2, 5, 10, 20, 50, 100, 250, 500, 1000}, - } - c := controller.New( - processor.NewFactory( - selector.NewWithHistogramDistribution( - histogram.WithExplicitBoundaries(config.DefaultHistogramBoundaries), - ), - aggregation.CumulativeTemporalitySelector(), - processor.WithMemory(true), - ), - ) - exporter, err := prometheus.New(config, c) - if err != nil { - log.Panicf("failed to initialize prometheus exporter %v", err) - } - - global.SetMeterProvider(exporter.MeterProvider()) - - http.HandleFunc("/metrics", exporter.ServeHTTP) - go func() { - _ = http.ListenAndServe(":8888", nil) - }() - - fmt.Println("Prometheus server running on :8888") -} - -func (m *Metrics) initMetrics(name string) { - m.meter = global.Meter(name) -} - -/* -Counter is a synchronous instrument that measures additive non-decreasing values, for example, the number of: -- processed requests -- received bytes -- disk reads -*/ - -func (m *Metrics) RegisterCounter(name string) (syncfloat64.Counter, error) { - if counter, ok := m.counters[name]; ok { - return counter, nil - } - counter, err := m.meter.SyncFloat64().Counter(name) - if err != nil { - return nil, fmt.Errorf("failed to initialize counter: %v", err) - } - m.counters[name] = counter - return counter, nil -} - -func (m *Metrics) GetCounter(name string) syncfloat64.Counter { - return m.counters[name] -} - -/* -UpDownCounter is a synchronous instrument which measures additive values that increase or decrease with time, -for example, the number of: -- active requests -- open connections -- memory in use (megabytes) -*/ - -func (m *Metrics) RegisterUpDownCounter(name string) (syncfloat64.UpDownCounter, error) { - if counter, ok := m.upDownCounters[name]; ok { - return counter, nil - } - counter, err := m.meter.SyncFloat64().UpDownCounter(name) - if err != nil { - return nil, fmt.Errorf("failed to initialize upDownCounter: %v", err) - } - m.upDownCounters[name] = counter - return counter, nil -} - -func (m *Metrics) GetUpDownCounter(name string) syncfloat64.UpDownCounter { - return m.upDownCounters[name] -} - -/* -Histogram is a synchronous instrument that produces a histogram from recorded values, for example: -- request latency -- request size -*/ - -func (m *Metrics) RegisterHistogram(name string) (syncfloat64.Histogram, error) { - if hist, ok := m.histograms[name]; ok { - return hist, nil - } - hist, err := m.meter.SyncFloat64().Histogram(name) - if err != nil { - return nil, fmt.Errorf("failed to initialize histogram: %v", err) - } - m.histograms[name] = hist - return hist, nil -} - -func (m *Metrics) GetHistogram(name string) syncfloat64.Histogram { - return m.histograms[name] -} diff --git a/ee/api/.gitignore b/ee/api/.gitignore index 5e982fda6..79aec2ade 100644 --- a/ee/api/.gitignore +++ b/ee/api/.gitignore @@ -263,5 +263,6 @@ Pipfile.lock /chalicelib/core/saved_search.py /app_alerts.py /build_alerts.sh +/build_crons.sh /routers/subs/v1_api.py #exp /chalicelib/core/dashboards.py diff --git a/ee/api/chalicelib/core/__init__.py b/ee/api/chalicelib/core/__init__.py index 64529b782..62723d0f1 100644 --- a/ee/api/chalicelib/core/__init__.py +++ b/ee/api/chalicelib/core/__init__.py @@ -6,41 +6,41 @@ logging.basicConfig(level=config("LOGLEVEL", default=logging.INFO)) from . import sessions as sessions_legacy if config("EXP_SESSIONS_SEARCH", cast=bool, default=False): - print(">>> Using experimental sessions search") + logging.info(">>> Using experimental sessions search") from . import sessions_exp as sessions else: from . import sessions as sessions if config("EXP_AUTOCOMPLETE", cast=bool, default=False): - print(">>> Using experimental autocomplete") + logging.info(">>> Using experimental autocomplete") from . import autocomplete_exp as autocomplete else: from . import autocomplete as autocomplete if config("EXP_ERRORS_SEARCH", cast=bool, default=False): - print(">>> Using experimental error search") + logging.info(">>> Using experimental error search") from . import errors as errors_legacy from . import errors_exp as errors if config("EXP_ERRORS_GET", cast=bool, default=False): - print(">>> Using experimental error get") + logging.info(">>> Using experimental error get") else: from . import errors as errors if config("EXP_METRICS", cast=bool, default=False): - print(">>> Using experimental metrics") + logging.info(">>> Using experimental metrics") from . import metrics_exp as metrics else: from . import metrics as metrics if config("EXP_ALERTS", cast=bool, default=False): - print(">>> Using experimental alerts") + logging.info(">>> Using experimental alerts") from . import alerts_processor_exp as alerts_processor else: from . import alerts_processor as alerts_processor if config("EXP_FUNNELS", cast=bool, default=False): - print(">>> Using experimental funnels") + logging.info(">>> Using experimental funnels") if not config("EXP_SESSIONS_SEARCH", cast=bool, default=False): from . import sessions as sessions_legacy @@ -49,4 +49,4 @@ else: from . import significance as significance if config("EXP_RESOURCES", cast=bool, default=False): - print(">>> Using experimental resources for session-replay") + logging.info(">>> Using experimental resources for session-replay") diff --git a/ee/api/chalicelib/core/alerts_processor.py b/ee/api/chalicelib/core/alerts_processor.py index 69a0f7f5f..17e4d275f 100644 --- a/ee/api/chalicelib/core/alerts_processor.py +++ b/ee/api/chalicelib/core/alerts_processor.py @@ -54,10 +54,12 @@ LeftToDb = { schemas.AlertColumn.errors__4xx_5xx__count: { "table": "events.resources INNER JOIN public.sessions USING(session_id)", "formula": "COUNT(session_id)", "condition": "status/100!=2"}, - schemas.AlertColumn.errors__4xx__count: {"table": "events.resources INNER JOIN public.sessions USING(session_id)", - "formula": "COUNT(session_id)", "condition": "status/100=4"}, - schemas.AlertColumn.errors__5xx__count: {"table": "events.resources INNER JOIN public.sessions USING(session_id)", - "formula": "COUNT(session_id)", "condition": "status/100=5"}, + schemas.AlertColumn.errors__4xx__count: { + "table": "events.resources INNER JOIN public.sessions USING(session_id)", + "formula": "COUNT(session_id)", "condition": "status/100=4"}, + schemas.AlertColumn.errors__5xx__count: { + "table": "events.resources INNER JOIN public.sessions USING(session_id)", + "formula": "COUNT(session_id)", "condition": "status/100=5"}, schemas.AlertColumn.errors__javascript__impacted_sessions__count: { "table": "events.resources INNER JOIN public.sessions USING(session_id)", "formula": "COUNT(DISTINCT session_id)", "condition": "success= FALSE AND type='script'"}, @@ -100,7 +102,7 @@ def can_check(a) -> bool: a["options"].get("lastNotification") is None or a["options"]["lastNotification"] <= 0 or ((now - a["options"]["lastNotification"]) > a["options"]["renotifyInterval"] * 60 * 1000)) \ - and ((now - a["createdAt"]) % (TimeInterval[repetitionBase] * 60 * 1000)) < 60 * 1000 + and ((now - a["createdAt"]) % (TimeInterval[repetitionBase] * 60 * 1000)) < 60 * 1000 def Build(a): @@ -124,7 +126,7 @@ def Build(a): subQ = f"""SELECT {colDef["formula"]} AS value FROM {colDef["table"]} WHERE project_id = %(project_id)s - {"AND " + colDef["condition"] if colDef.get("condition") is not None else ""}""" + {"AND " + colDef["condition"] if colDef.get("condition") else ""}""" j_s = colDef.get("joinSessions", True) main_table = colDef["table"] is_ss = main_table == "public.sessions" @@ -147,8 +149,7 @@ def Build(a): "startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000, "timestamp_sub2": TimeUTC.now() - 2 * a["options"]["currentPeriod"] * 60 * 1000} else: - sub1 = f"""{subQ} AND timestamp>=%(startDate)s - AND timestamp<=%(now)s + sub1 = f"""{subQ} {"AND timestamp >= %(startDate)s AND timestamp <= %(now)s" if not is_ss else ""} {"AND start_ts >= %(startDate)s AND start_ts <= %(now)s" if j_s else ""}""" params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000 sub2 = f"""{subQ} {"AND timestamp < %(startDate)s AND timestamp >= %(timestamp_sub2)s" if not is_ss else ""} @@ -211,7 +212,7 @@ def process(): cur = cur.recreate(rollback=True) if len(notifications) > 0: cur.execute( - cur.mogrify(f"""UPDATE public.Alerts + cur.mogrify(f"""UPDATE public.alerts SET options = options||'{{"lastNotification":{TimeUTC.now()}}}'::jsonb WHERE alert_id IN %(ids)s;""", {"ids": tuple([n["alertId"] for n in notifications])})) if len(notifications) > 0: diff --git a/ee/api/chalicelib/core/alerts_processor_exp.py b/ee/api/chalicelib/core/alerts_processor_exp.py index 7a300654c..310c6faa9 100644 --- a/ee/api/chalicelib/core/alerts_processor_exp.py +++ b/ee/api/chalicelib/core/alerts_processor_exp.py @@ -4,9 +4,10 @@ from decouple import config import schemas from chalicelib.core import alerts_listener, alerts_processor -from chalicelib.core import sessions, alerts +from chalicelib.core import alerts from chalicelib.utils import pg_client, ch_client, exp_ch_helper from chalicelib.utils.TimeUTC import TimeUTC +from chalicelib.core import sessions_exp as sessions logging.basicConfig(level=config("LOGLEVEL", default=logging.INFO)) @@ -135,7 +136,7 @@ def Build(a): FROM {colDef["table"](now)} WHERE project_id = %(project_id)s {"AND event_type=%(event_type)s" if params["event_type"] else ""} - {"AND " + colDef["condition"] if colDef.get("condition") is not None else ""}""" + {"AND " + colDef["condition"] if colDef.get("condition") else ""}""" q = f"""SELECT coalesce(value,0) AS value, coalesce(value,0) {a["query"]["operator"]} {a["query"]["right"]} AS valid""" @@ -198,9 +199,14 @@ def process(): if alert["query"]["left"] != "CUSTOM": continue if alerts_processor.can_check(alert): - logging.info(f"Querying alertId:{alert['alertId']} name: {alert['name']}") query, params = Build(alert) - query = ch_cur.format(query, params) + try: + query = ch_cur.format(query, params) + except Exception as e: + logging.error( + f"!!!Error while building alert query for alertId:{alert['alertId']} name: {alert['name']}") + logging.error(e) + continue logging.debug(alert) logging.debug(query) try: diff --git a/ee/api/chalicelib/core/projects.py b/ee/api/chalicelib/core/projects.py index 7b54c9beb..2e22422ad 100644 --- a/ee/api/chalicelib/core/projects.py +++ b/ee/api/chalicelib/core/projects.py @@ -13,11 +13,11 @@ from chalicelib.utils.TimeUTC import TimeUTC def __exists_by_name(tenant_id: int, name: str, exclude_id: Optional[int]) -> bool: with pg_client.PostgresClient() as cur: query = cur.mogrify(f"""SELECT EXISTS(SELECT 1 - FROM public.projects - WHERE deleted_at IS NULL - AND name ILIKE %(name)s - AND tenant_id = %(tenant_id)s - {"AND project_id!=%(exclude_id))s" if exclude_id else ""}) AS exists;""", + FROM public.projects + WHERE deleted_at IS NULL + AND name ILIKE %(name)s + AND tenant_id = %(tenant_id)s + {"AND project_id!=%(exclude_id)s" if exclude_id else ""}) AS exists;""", {"tenant_id": tenant_id, "name": name, "exclude_id": exclude_id}) cur.execute(query=query) diff --git a/ee/api/chalicelib/core/roles.py b/ee/api/chalicelib/core/roles.py index 9b8a9b56e..79f1caec7 100644 --- a/ee/api/chalicelib/core/roles.py +++ b/ee/api/chalicelib/core/roles.py @@ -12,11 +12,11 @@ from chalicelib.utils.TimeUTC import TimeUTC def __exists_by_name(tenant_id: int, name: str, exclude_id: Optional[int]) -> bool: with pg_client.PostgresClient() as cur: query = cur.mogrify(f"""SELECT EXISTS(SELECT 1 - FROM public.roles - WHERE tenant_id = %(tenant_id)s - AND name ILIKE %(name)s - AND deleted_at ISNULL - {"role_id!=%(exclude_id)s" if exclude_id else ""}) AS exists;""", + FROM public.roles + WHERE tenant_id = %(tenant_id)s + AND name ILIKE %(name)s + AND deleted_at ISNULL + {"AND role_id!=%(exclude_id)s" if exclude_id else ""}) AS exists;""", {"tenant_id": tenant_id, "name": name, "exclude_id": exclude_id}) cur.execute(query=query) row = cur.fetchone() diff --git a/ee/api/chalicelib/core/sessions.py b/ee/api/chalicelib/core/sessions.py index bc7613278..6d92c3954 100644 --- a/ee/api/chalicelib/core/sessions.py +++ b/ee/api/chalicelib/core/sessions.py @@ -304,7 +304,7 @@ def search2_series(data: schemas.SessionsSearchPayloadSchema, project_id: int, d extra_col = "" extra_where = "" pre_query = "" - distinct_on="s.session_id" + distinct_on = "s.session_id" if metric_of == schemas.MetricOfTable.user_country: main_col = "user_country" elif metric_of == schemas.MetricOfTable.user_device: @@ -324,7 +324,7 @@ def search2_series(data: schemas.SessionsSearchPayloadSchema, project_id: int, d elif metric_of == schemas.MetricOfTable.visited_url: main_col = "path" extra_col = ", path" - distinct_on+=",path" + distinct_on += ",path" main_query = cur.mogrify(f"""{pre_query} SELECT COUNT(*) AS count, COALESCE(JSONB_AGG(users_sessions) FILTER ( WHERE rn <= 200 ), '[]'::JSONB) AS values FROM (SELECT {main_col} AS name, @@ -1197,8 +1197,9 @@ def delete_sessions_by_user_ids(project_id, user_ids): def count_all(): with pg_client.PostgresClient(unlimited_query=True) as cur: - row = cur.execute(query="SELECT COUNT(session_id) AS count FROM public.sessions") - return row.get("count", 0) + cur.execute(query="SELECT COUNT(session_id) AS count FROM public.sessions") + row = cur.fetchone() + return row.get("count", 0) if row else 0 def session_exists(project_id, session_id): @@ -1206,7 +1207,8 @@ def session_exists(project_id, session_id): query = cur.mogrify("""SELECT 1 FROM public.sessions WHERE session_id=%(session_id)s - AND project_id=%(project_id)s""", + AND project_id=%(project_id)s + LIMIT 1;""", {"project_id": project_id, "session_id": session_id}) cur.execute(query) row = cur.fetchone() diff --git a/ee/api/chalicelib/core/sessions_exp.py b/ee/api/chalicelib/core/sessions_exp.py index a4713e992..f60090ed4 100644 --- a/ee/api/chalicelib/core/sessions_exp.py +++ b/ee/api/chalicelib/core/sessions_exp.py @@ -202,7 +202,7 @@ def _isUndefined_operator(op: schemas.SearchEventOperator): # This function executes the query and return result def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_id, errors_only=False, - error_status=schemas.ErrorStatus.all, count_only=False, issue=None): + error_status=schemas.ErrorStatus.all, count_only=False, issue=None, ids_only=False): full_args, query_part = search_query_parts_ch(data=data, error_status=error_status, errors_only=errors_only, favorite_only=data.bookmarked, issue=issue, project_id=project_id, user_id=user_id) @@ -264,6 +264,12 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_ GROUP BY user_id ) AS users_sessions;""", full_args) + elif ids_only: + main_query = cur.format(f"""SELECT DISTINCT ON(s.session_id) s.session_id + {query_part} + ORDER BY s.session_id desc + LIMIT %(sessions_limit)s OFFSET %(sessions_limit_s)s;""", + full_args) else: if data.order is None: data.order = schemas.SortOrderType.desc.value @@ -302,8 +308,8 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_ print(data.json()) print("--------------------") raise err - if errors_only: - return helper.list_to_camel_case(cur.fetchall()) + if errors_only or ids_only: + return helper.list_to_camel_case(sessions) if len(sessions) > 0: sessions = sessions[0] @@ -1170,6 +1176,9 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu ) {"" if or_events else (f"AS event_{event_index} " + ("ON(TRUE)" if event_index > 0 else ""))}\ """) event_index += 1 + # limit THEN-events to 7 in CH because sequenceMatch cannot take more arguments + if event_index == 7 and data.events_order == schemas.SearchEventOrder._then: + break if event_index < 2: data.events_order = schemas.SearchEventOrder._or @@ -1520,17 +1529,18 @@ def delete_sessions_by_user_ids(project_id, user_ids): def count_all(): - with pg_client.PostgresClient(unlimited_query=True) as cur: - row = cur.execute(query="SELECT COUNT(session_id) AS count FROM public.sessions") + with ch_client.ClickHouseClient() as cur: + row = cur.execute(query=f"SELECT COUNT(session_id) AS count FROM {exp_ch_helper.get_main_sessions_table()}") return row.get("count", 0) def session_exists(project_id, session_id): with ch_client.ClickHouseClient() as cur: - query = cur.format("""SELECT 1 - FROM public.sessions - WHERE session_id=%(session_id)s - AND project_id=%(project_id)s""", + query = cur.format(f"""SELECT 1 + FROM {exp_ch_helper.get_main_sessions_table()} + WHERE session_id=%(session_id)s + AND project_id=%(project_id)s + LIMIT 1""", {"project_id": project_id, "session_id": session_id}) row = cur.execute(query) return row is not None diff --git a/ee/api/chalicelib/core/sessions_insights.py b/ee/api/chalicelib/core/sessions_insights.py index 9f05e9b11..532e47e19 100644 --- a/ee/api/chalicelib/core/sessions_insights.py +++ b/ee/api/chalicelib/core/sessions_insights.py @@ -173,7 +173,7 @@ def query_requests_by_period(project_id, start_time, end_time, filters: Optional if n == n_: data_['value'] = v[0] data_['oldValue'] = v[1] - data_['change'] = 100* v[2] + data_['change'] = 100 * v[2] data_['isNew'] = False break results.append(data_) @@ -252,12 +252,12 @@ def query_most_errors_by_period(project_id, start_time, end_time, for n in common_errors: if n is None: continue - old_errors = _sum_table_index(_table_where(table_hh2, names_idx, n), names_idx) - if old_errors == 0: + sum_old_errors = _sum_table_index(_table_where(table_hh2, names_idx, n), sessions_idx) + if sum_old_errors == 0: continue - new_errors = _sum_table_index(_table_where(table_hh1, names_idx, n), names_idx) + sum_new_errors = _sum_table_index(_table_where(table_hh1, names_idx, n), sessions_idx) # error_increase[n] = (new_errors - old_errors) / old_errors - error_values[n] = new_errors, old_errors, (new_errors - old_errors) / old_errors + error_values[n] = sum_new_errors, sum_old_errors, (sum_new_errors - sum_old_errors) / sum_old_errors ratio = sorted(percentage_errors.items(), key=lambda k: k[1], reverse=True) increase = sorted(error_values.items(), key=lambda k: k[1][-1], reverse=True) names_ = set([k[0] for k in increase[:3] + ratio[:3]] + new_errors[:3]) @@ -347,18 +347,20 @@ def query_cpu_memory_by_period(project_id, start_time, end_time, output = list() if cpu_oldvalue is not None or cpu_newvalue is not None: output.append({'category': schemas_ee.InsightCategories.resources, - 'name': 'cpu', - 'value': cpu_newvalue, - 'oldValue': cpu_oldvalue, - 'change': 100 * (cpu_newvalue - cpu_oldvalue) / cpu_oldvalue if cpu_ratio is not None else cpu_ratio, - 'isNew': True if cpu_newvalue is not None and cpu_oldvalue is None else False}) + 'name': 'cpu', + 'value': cpu_newvalue, + 'oldValue': cpu_oldvalue, + 'change': 100 * ( + cpu_newvalue - cpu_oldvalue) / cpu_oldvalue if cpu_ratio is not None else cpu_ratio, + 'isNew': True if cpu_newvalue is not None and cpu_oldvalue is None else False}) if mem_oldvalue is not None or mem_newvalue is not None: output.append({'category': schemas_ee.InsightCategories.resources, - 'name': 'memory', - 'value': mem_newvalue, - 'oldValue': mem_oldvalue, - 'change': 100 * (mem_newvalue - mem_oldvalue) / mem_oldvalue if mem_ratio is not None else mem_ratio, - 'isNew': True if mem_newvalue is not None and mem_oldvalue is None else False}) + 'name': 'memory', + 'value': mem_newvalue, + 'oldValue': mem_oldvalue, + 'change': 100 * ( + mem_newvalue - mem_oldvalue) / mem_oldvalue if mem_ratio is not None else mem_ratio, + 'isNew': True if mem_newvalue is not None and mem_oldvalue is None else False}) return output diff --git a/ee/api/chalicelib/core/users.py b/ee/api/chalicelib/core/users.py index d2b13535a..ff357113f 100644 --- a/ee/api/chalicelib/core/users.py +++ b/ee/api/chalicelib/core/users.py @@ -591,19 +591,6 @@ def set_password_invitation(tenant_id, user_id, new_password): } -def count_members(tenant_id): - with pg_client.PostgresClient() as cur: - cur.execute( - cur.mogrify( - """SELECT - COUNT(user_id) - FROM public.users WHERE tenant_id = %(tenant_id)s AND deleted_at IS NULL;""", - {"tenant_id": tenant_id}) - ) - r = cur.fetchone() - return r["count"] - - def email_exists(email): with pg_client.PostgresClient() as cur: cur.execute( diff --git a/ee/api/chalicelib/core/webhook.py b/ee/api/chalicelib/core/webhook.py index 584db9839..d1e70d3e7 100644 --- a/ee/api/chalicelib/core/webhook.py +++ b/ee/api/chalicelib/core/webhook.py @@ -116,12 +116,12 @@ def exists_by_name(tenant_id: int, name: str, exclude_id: Optional[int], webhook_type: str = schemas.WebhookType.webhook) -> bool: with pg_client.PostgresClient() as cur: query = cur.mogrify(f"""SELECT EXISTS(SELECT 1 - FROM public.webhooks - WHERE name ILIKE %(name)s - AND deleted_at ISNULL - AND tenant_id=%(tenant_id)s - AND type=%(webhook_type)s - {"AND webhook_id!=%(exclude_id))s" if exclude_id else ""}) AS exists;""", + FROM public.webhooks + WHERE name ILIKE %(name)s + AND deleted_at ISNULL + AND tenant_id=%(tenant_id)s + AND type=%(webhook_type)s + {"AND webhook_id!=%(exclude_id)s" if exclude_id else ""}) AS exists;""", {"tenant_id": tenant_id, "name": name, "exclude_id": exclude_id, "webhook_type": webhook_type}) cur.execute(query) diff --git a/ee/api/clean.sh b/ee/api/clean-dev.sh similarity index 100% rename from ee/api/clean.sh rename to ee/api/clean-dev.sh diff --git a/ee/backend/internal/db/datasaver/saver.go b/ee/backend/internal/db/datasaver/saver.go index 76057309d..e05e502f1 100644 --- a/ee/backend/internal/db/datasaver/saver.go +++ b/ee/backend/internal/db/datasaver/saver.go @@ -19,7 +19,6 @@ func New(pg *cache.PGCache, cfg *db.Config) *Saver { var producer types.Producer = nil if cfg.UseQuickwit { producer = queue.NewProducer(cfg.MessageSizeLimit, true) - defer producer.Close(15000) } return &Saver{pg: pg, producer: producer, topic: cfg.QuickwitTopic} } diff --git a/ee/backend/pkg/db/clickhouse/bulk.go b/ee/backend/pkg/db/clickhouse/bulk.go index 706b66f68..6eb8d98fd 100644 --- a/ee/backend/pkg/db/clickhouse/bulk.go +++ b/ee/backend/pkg/db/clickhouse/bulk.go @@ -5,6 +5,8 @@ import ( "errors" "fmt" "log" + "openreplay/backend/pkg/metrics/database" + "time" "github.com/ClickHouse/clickhouse-go/v2/lib/driver" ) @@ -16,19 +18,23 @@ type Bulk interface { type bulkImpl struct { conn driver.Conn + table string query string values [][]interface{} } -func NewBulk(conn driver.Conn, query string) (Bulk, error) { +func NewBulk(conn driver.Conn, table, query string) (Bulk, error) { switch { case conn == nil: return nil, errors.New("clickhouse connection is empty") + case table == "": + return nil, errors.New("table is empty") case query == "": return nil, errors.New("query is empty") } return &bulkImpl{ conn: conn, + table: table, query: query, values: make([][]interface{}, 0), }, nil @@ -40,6 +46,7 @@ func (b *bulkImpl) Append(args ...interface{}) error { } func (b *bulkImpl) Send() error { + start := time.Now() batch, err := b.conn.PrepareBatch(context.Background(), b.query) if err != nil { return fmt.Errorf("can't create new batch: %s", err) @@ -50,6 +57,11 @@ func (b *bulkImpl) Send() error { log.Printf("failed query: %s", b.query) } } + err = batch.Send() + // Save bulk metrics + database.RecordBulkElements(float64(len(b.values)), "ch", b.table) + database.RecordBulkInsertDuration(float64(time.Now().Sub(start).Milliseconds()), "ch", b.table) + // Prepare values slice for a new data b.values = make([][]interface{}, 0) - return batch.Send() + return err } diff --git a/ee/backend/pkg/db/clickhouse/connector.go b/ee/backend/pkg/db/clickhouse/connector.go index 157d384b9..b872adcc2 100644 --- a/ee/backend/pkg/db/clickhouse/connector.go +++ b/ee/backend/pkg/db/clickhouse/connector.go @@ -3,18 +3,16 @@ package clickhouse import ( "errors" "fmt" + "github.com/ClickHouse/clickhouse-go/v2" + "github.com/ClickHouse/clickhouse-go/v2/lib/driver" "log" "openreplay/backend/pkg/db/types" "openreplay/backend/pkg/hashid" "openreplay/backend/pkg/messages" "openreplay/backend/pkg/url" - "os" "strings" "time" - "github.com/ClickHouse/clickhouse-go/v2" - "github.com/ClickHouse/clickhouse-go/v2/lib/driver" - "openreplay/backend/pkg/license" ) @@ -52,28 +50,14 @@ type connectorImpl struct { finished chan struct{} } -// Check env variables. If not present, return default value. -func getEnv(key, fallback string) string { - if value, ok := os.LookupEnv(key); ok { - return value - } - return fallback -} - func NewConnector(url string) Connector { license.CheckLicense() - // Check username, password, database - userName := getEnv("CH_USERNAME", "default") - password := getEnv("CH_PASSWORD", "") - database := getEnv("CH_DATABASE", "default") url = strings.TrimPrefix(url, "tcp://") - url = strings.TrimSuffix(url, "/"+database) + url = strings.TrimSuffix(url, "/default") conn, err := clickhouse.Open(&clickhouse.Options{ Addr: []string{url}, Auth: clickhouse.Auth{ - Database: database, - Username: userName, - Password: password, + Database: "default", }, MaxOpenConns: 20, MaxIdleConns: 15, @@ -99,7 +83,7 @@ func NewConnector(url string) Connector { } func (c *connectorImpl) newBatch(name, query string) error { - batch, err := NewBulk(c.conn, query) + batch, err := NewBulk(c.conn, name, query) if err != nil { return fmt.Errorf("can't create new batch: %s", err) } diff --git a/ee/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql b/ee/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql index 30961fc88..6461a1214 100644 --- a/ee/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql +++ b/ee/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql @@ -5,6 +5,148 @@ $$ SELECT 'v1.10.0-ee' $$ LANGUAGE sql IMMUTABLE; +-- Backup dashboard & search data: +DO +$$ + BEGIN + IF NOT (SELECT EXISTS(SELECT schema_name + FROM information_schema.schemata + WHERE schema_name = 'backup_v1_10_0')) THEN + CREATE SCHEMA backup_v1_10_0; + CREATE TABLE backup_v1_10_0.dashboards + ( + dashboard_id integer, + project_id integer, + user_id integer, + name text NOT NULL, + description text NOT NULL DEFAULT '', + is_public boolean NOT NULL DEFAULT TRUE, + is_pinned boolean NOT NULL DEFAULT FALSE, + created_at timestamp NOT NULL DEFAULT timezone('utc'::text, now()), + deleted_at timestamp NULL DEFAULT NULL + ); + CREATE TABLE backup_v1_10_0.dashboard_widgets + ( + widget_id integer, + dashboard_id integer, + metric_id integer, + user_id integer, + created_at timestamp NOT NULL DEFAULT timezone('utc'::text, now()), + config jsonb NOT NULL DEFAULT '{}'::jsonb + ); + CREATE TABLE backup_v1_10_0.searches + ( + search_id integer, + project_id integer, + user_id integer, + name text not null, + filter jsonb not null, + created_at timestamp default timezone('utc'::text, now()) not null, + deleted_at timestamp, + is_public boolean NOT NULL DEFAULT False + ); + CREATE TABLE backup_v1_10_0.metrics + ( + metric_id integer, + project_id integer, + user_id integer, + name text NOT NULL, + is_public boolean NOT NULL DEFAULT FALSE, + active boolean NOT NULL DEFAULT TRUE, + created_at timestamp NOT NULL DEFAULT timezone('utc'::text, now()), + deleted_at timestamp, + edited_at timestamp NOT NULL DEFAULT timezone('utc'::text, now()), + metric_type text NOT NULL, + view_type text NOT NULL, + metric_of text NOT NULL DEFAULT 'sessionCount', + metric_value text[] NOT NULL DEFAULT '{}'::text[], + metric_format text, + category text NULL DEFAULT 'custom', + is_pinned boolean NOT NULL DEFAULT FALSE, + is_predefined boolean NOT NULL DEFAULT FALSE, + is_template boolean NOT NULL DEFAULT FALSE, + predefined_key text NULL DEFAULT NULL, + default_config jsonb NOT NULL + ); + CREATE TABLE backup_v1_10_0.metric_series + ( + series_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + metric_id integer REFERENCES metrics (metric_id) ON DELETE CASCADE, + index integer NOT NULL, + name text NULL, + filter jsonb NOT NULL, + created_at timestamp DEFAULT timezone('utc'::text, now()) NOT NULL, + deleted_at timestamp + ); + + INSERT INTO backup_v1_10_0.dashboards(dashboard_id, project_id, user_id, name, description, is_public, + is_pinned, + created_at, deleted_at) + SELECT dashboard_id, + project_id, + user_id, + name, + description, + is_public, + is_pinned, + created_at, + deleted_at + FROM public.dashboards + ORDER BY dashboard_id; + + INSERT INTO backup_v1_10_0.metrics(metric_id, project_id, user_id, name, is_public, active, created_at, + deleted_at, edited_at, metric_type, view_type, metric_of, metric_value, + metric_format, category, is_pinned, is_predefined, is_template, + predefined_key, default_config) + SELECT metric_id, + project_id, + user_id, + name, + is_public, + active, + created_at, + deleted_at, + edited_at, + metric_type, + view_type, + metric_of, + metric_value, + metric_format, + category, + is_pinned, + is_predefined, + is_template, + predefined_key, + default_config + FROM public.metrics + ORDER BY metric_id; + + INSERT INTO backup_v1_10_0.metric_series(series_id, metric_id, index, name, filter, created_at, deleted_at) + SELECT series_id, metric_id, index, name, filter, created_at, deleted_at + FROM public.metric_series + ORDER BY series_id; + + INSERT INTO backup_v1_10_0.dashboard_widgets(widget_id, dashboard_id, metric_id, user_id, created_at, config) + SELECT widget_id, dashboard_id, metric_id, user_id, created_at, config + FROM public.dashboard_widgets + ORDER BY widget_id; + + INSERT INTO backup_v1_10_0.searches(search_id, project_id, user_id, name, filter, created_at, deleted_at, + is_public) + SELECT search_id, + project_id, + user_id, + name, + filter, + created_at, + deleted_at, + is_public + FROM public.searches + ORDER BY search_id; + END IF; + END +$$ LANGUAGE plpgsql; + CREATE TABLE IF NOT EXISTS frontend_signals ( project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, @@ -36,6 +178,73 @@ ALTER TYPE webhook_type ADD VALUE IF NOT EXISTS 'msteams'; UPDATE metrics SET is_public= TRUE; +CREATE OR REPLACE FUNCTION get_global_key(key text) + RETURNS text AS +$$ +DECLARE + events_map CONSTANT JSONB := '{ + "SESSIONS": "sessions", + "sessionCount": "sessionCount", + "CLICK": "click", + "INPUT": "input", + "LOCATION": "location", + "CUSTOM": "custom", + "REQUEST": "request", + "FETCH": "fetch", + "GRAPHQL": "graphql", + "STATEACTION": "stateAction", + "ERROR": "error", + "CLICK_IOS": "clickIos", + "INPUT_IOS": "inputIos", + "VIEW_IOS": "viewIos", + "CUSTOM_IOS": "customIos", + "REQUEST_IOS": "requestIos", + "ERROR_IOS": "errorIos", + "DOM_COMPLETE": "domComplete", + "LARGEST_CONTENTFUL_PAINT_TIME": "largestContentfulPaintTime", + "TIME_BETWEEN_EVENTS": "timeBetweenEvents", + "TTFB": "ttfb", + "AVG_CPU_LOAD": "avgCpuLoad", + "AVG_MEMORY_USAGE": "avgMemoryUsage", + "FETCH_FAILED": "fetchFailed", + "FETCH_URL": "fetchUrl", + "FETCH_STATUS_CODE": "fetchStatusCode", + "FETCH_METHOD": "fetchMethod", + "FETCH_DURATION": "fetchDuration", + "FETCH_REQUEST_BODY": "fetchRequestBody", + "FETCH_RESPONSE_BODY": "fetchResponseBody", + "GRAPHQL_NAME": "graphqlName", + "GRAPHQL_METHOD": "graphqlMethod", + "GRAPHQL_REQUEST_BODY": "graphqlRequestBody", + "GRAPHQL_RESPONSE_BODY": "graphqlResponseBody", + "USEROS": "userOs", + "USERBROWSER": "userBrowser", + "USERDEVICE": "userDevice", + "USERCOUNTRY": "userCountry", + "USERID": "userId", + "USERANONYMOUSID": "userAnonymousId", + "REFERRER": "referrer", + "REVID": "revId", + "USEROS_IOS": "userOsIos", + "USERDEVICE_IOS": "userDeviceIos", + "USERCOUNTRY_IOS": "userCountryIos", + "USERID_IOS": "userIdIos", + "USERANONYMOUSID_IOS": "userAnonymousIdIos", + "REVID_IOS": "revIdIos", + "DURATION": "duration", + "PLATFORM": "platform", + "METADATA": "metadata", + "ISSUE": "issue", + "EVENTS_COUNT": "eventsCount", + "UTM_SOURCE": "utmSource", + "UTM_MEDIUM": "utmMedium", + "UTM_CAMPAIGN": "utmCampaign" + }'; +BEGIN + RETURN jsonb_extract_path(events_map, key); +END; +$$ LANGUAGE plpgsql IMMUTABLE; + ALTER TABLE IF EXISTS metrics ALTER COLUMN metric_type TYPE text, ALTER COLUMN metric_type SET DEFAULT 'timeseries', @@ -50,6 +259,11 @@ $$ FROM information_schema.columns WHERE table_name = 'metrics' and column_name = 'is_predefined') THEN + -- 0. change metric_of + UPDATE metrics + SET metric_of=coalesce(replace(get_global_key(metric_of), '"', ''), + left(metric_of, 1) || right(replace(initcap(metric_of), '_', ''), -1)) + WHERE not is_predefined; -- 1. pre transform structure ALTER TABLE IF EXISTS metrics @@ -138,9 +352,8 @@ ALTER TABLE IF EXISTS projects ADD COLUMN IF NOT EXISTS beacon_size integer NOT NULL DEFAULT 0; -- To migrate saved search data --- SET client_min_messages TO NOTICE; --- SET client_min_messages TO NOTICE; +SET client_min_messages TO NOTICE; CREATE OR REPLACE FUNCTION get_new_event_key(key text) RETURNS text AS $$ @@ -326,9 +539,109 @@ $$ $$ LANGUAGE plpgsql; + +-- To migrate metric_series data +DO +$$ + DECLARE + row RECORD; + events_att JSONB; + event_filters_att JSONB; + filters_att JSONB; + element JSONB; + s_element JSONB; + new_value TEXT; + new_events JSONB[]; + new_filters JSONB[]; + new_event_filters JSONB[]; + changed BOOLEAN; + planned_update JSONB[]; + BEGIN + planned_update := '{}'::jsonb[]; + FOR row IN SELECT * FROM metric_series + LOOP + -- Transform events attributes + events_att := row.filter -> 'events'; + IF events_att IS NOT NULL THEN + new_events := '{}'::jsonb[]; + FOR element IN SELECT jsonb_array_elements(events_att) + LOOP + changed := FALSE; + new_value := get_new_event_key(element ->> 'type'); + if new_value IS NOT NULL THEN + changed := TRUE; + new_value := replace(new_value, '"', ''); + element := element || jsonb_build_object('type', new_value); + END IF; + -- Transform event's sub-filters attributes + event_filters_att := element -> 'filters'; + new_event_filters := '{}'::jsonb[]; + IF event_filters_att IS NOT NULL AND jsonb_array_length(event_filters_att) > 0 THEN + FOR s_element IN SELECT jsonb_array_elements(event_filters_att) + LOOP + new_value := get_new_event_filter_key(s_element ->> 'type'); + if new_value IS NOT NULL THEN + changed := TRUE; + new_value := replace(new_value, '"', ''); + s_element := s_element || jsonb_build_object('type', new_value); + new_event_filters := array_append(new_event_filters, s_element); + END IF; + END LOOP; + element := element || jsonb_build_object('filters', new_event_filters); + END IF; + IF changed THEN + new_events := array_append(new_events, element); + END IF; + END LOOP; + IF array_length(new_events, 1) > 0 THEN + row.filter := row.filter || jsonb_build_object('events', new_events); + END IF; + END IF; + + -- Transform filters attributes + filters_att := row.filter -> 'filters'; + IF filters_att IS NOT NULL THEN + new_filters := '{}'::jsonb; + FOR element IN SELECT jsonb_array_elements(filters_att) + LOOP + new_value := get_new_filter_key(element ->> 'type'); + if new_value IS NOT NULL THEN + new_value := replace(new_value, '"', ''); + element := element || jsonb_build_object('type', new_value); + new_filters := array_append(new_filters, element); + END IF; + END LOOP; + IF array_length(new_filters, 1) > 0 THEN + row.filter := row.filter || jsonb_build_object('filters', new_filters); + END IF; + END IF; + + IF array_length(new_events, 1) > 0 OR array_length(new_filters, 1) > 0 THEN + planned_update := array_append(planned_update, + jsonb_build_object('id', row.series_id, 'change', row.filter)); + END IF; + END LOOP; + + -- Update metric_series + IF array_length(planned_update, 1) > 0 THEN + raise notice 'must update % elements',array_length(planned_update, 1); + + UPDATE metric_series + SET filter=changes.change -> 'change' + FROM (SELECT unnest(planned_update)) AS changes(change) + WHERE series_id = (changes.change -> 'id')::integer; + raise notice 'update done'; + ELSE + raise notice 'nothing to update'; + END IF; + END ; +$$ +LANGUAGE plpgsql; + DROP FUNCTION get_new_filter_key; DROP FUNCTION get_new_event_filter_key; DROP FUNCTION get_new_event_key; +DROP FUNCTION get_global_key; DROP TABLE IF EXISTS public.funnels; ALTER TABLE IF EXISTS public.metrics diff --git a/ee/scripts/schema/db/init_dbs/postgresql/init_schema.sql b/ee/scripts/schema/db/init_dbs/postgresql/init_schema.sql index c9cc4f87d..0b2945b39 100644 --- a/ee/scripts/schema/db/init_dbs/postgresql/init_schema.sql +++ b/ee/scripts/schema/db/init_dbs/postgresql/init_schema.sql @@ -747,6 +747,7 @@ $$ metric_value text[] NOT NULL DEFAULT '{}'::text[], metric_format text, thumbnail text, + is_pinned boolean NOT NULL DEFAULT FALSE, default_config jsonb NOT NULL DEFAULT '{ "col": 2, "row": 2, diff --git a/ee/sourcemap-reader/Readme.md b/ee/sourcemap-reader/Readme.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/ee/utilities/.gitignore b/ee/utilities/.gitignore index 8c9dca279..cd68b1ffb 100644 --- a/ee/utilities/.gitignore +++ b/ee/utilities/.gitignore @@ -15,5 +15,4 @@ servers/sourcemaps-server.js /utils/helper.js /utils/assistHelper.js .local -run-dev.sh *.mmdb diff --git a/ee/utilities/Dockerfile b/ee/utilities/Dockerfile index 08ccba56f..3119b5eed 100644 --- a/ee/utilities/Dockerfile +++ b/ee/utilities/Dockerfile @@ -18,4 +18,4 @@ USER 1001 ADD --chown=1001 https://static.openreplay.com/geoip/GeoLite2-Country.mmdb $MAXMINDDB_FILE ENTRYPOINT ["/sbin/tini", "--"] -CMD npm start +CMD npm start \ No newline at end of file diff --git a/ee/utilities/clean.sh b/ee/utilities/clean-dev.sh similarity index 100% rename from ee/utilities/clean.sh rename to ee/utilities/clean-dev.sh diff --git a/ee/utilities/package-lock.json b/ee/utilities/package-lock.json new file mode 100644 index 000000000..1d74677cf --- /dev/null +++ b/ee/utilities/package-lock.json @@ -0,0 +1,1180 @@ +{ + "name": "assist-server", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "assist-server", + "version": "1.0.0", + "license": "Elastic License 2.0 (ELv2)", + "dependencies": { + "@maxmind/geoip2-node": "^3.5.0", + "@socket.io/redis-adapter": "^8.1.0", + "express": "^4.18.2", + "jsonwebtoken": "^9.0.0", + "redis": "^4.6.4", + "socket.io": "^4.6.0", + "ua-parser-js": "^1.0.33", + "uWebSockets.js": "github:uNetworking/uWebSockets.js#v20.19.0" + } + }, + "node_modules/@maxmind/geoip2-node": { + "version": "3.5.0", + "resolved": "https://registry.npmjs.org/@maxmind/geoip2-node/-/geoip2-node-3.5.0.tgz", + "integrity": "sha512-WG2TNxMwDWDOrljLwyZf5bwiEYubaHuICvQRlgz74lE9OZA/z4o+ZT6OisjDBAZh/yRJVNK6mfHqmP5lLlAwsA==", + "dependencies": { + "camelcase-keys": "^7.0.0", + "ip6addr": "^0.2.5", + "maxmind": "^4.2.0" + } + }, + "node_modules/@redis/bloom": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@redis/bloom/-/bloom-1.2.0.tgz", + "integrity": "sha512-HG2DFjYKbpNmVXsa0keLHp/3leGJz1mjh09f2RLGGLQZzSHpkmZWuwJbAvo3QcRY8p80m5+ZdXZdYOSBLlp7Cg==", + "peerDependencies": { + "@redis/client": "^1.0.0" + } + }, + "node_modules/@redis/client": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@redis/client/-/client-1.5.5.tgz", + "integrity": "sha512-fuMnpDYSjT5JXR9rrCW1YWA4L8N/9/uS4ImT3ZEC/hcaQRI1D/9FvwjriRj1UvepIgzZXthFVKMNRzP/LNL7BQ==", + "dependencies": { + "cluster-key-slot": "1.1.2", + "generic-pool": "3.9.0", + "yallist": "4.0.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/@redis/graph": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@redis/graph/-/graph-1.1.0.tgz", + "integrity": "sha512-16yZWngxyXPd+MJxeSr0dqh2AIOi8j9yXKcKCwVaKDbH3HTuETpDVPcLujhFYVPtYrngSco31BUcSa9TH31Gqg==", + "peerDependencies": { + "@redis/client": "^1.0.0" + } + }, + "node_modules/@redis/json": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@redis/json/-/json-1.0.4.tgz", + "integrity": "sha512-LUZE2Gdrhg0Rx7AN+cZkb1e6HjoSKaeeW8rYnt89Tly13GBI5eP4CwDVr+MY8BAYfCg4/N15OUrtLoona9uSgw==", + "peerDependencies": { + "@redis/client": "^1.0.0" + } + }, + "node_modules/@redis/search": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@redis/search/-/search-1.1.1.tgz", + "integrity": "sha512-pqCXTc5e7wJJgUuJiC3hBgfoFRoPxYzwn0BEfKgejTM7M/9zP3IpUcqcjgfp8hF+LoV8rHZzcNTz7V+pEIY7LQ==", + "peerDependencies": { + "@redis/client": "^1.0.0" + } + }, + "node_modules/@redis/time-series": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@redis/time-series/-/time-series-1.0.4.tgz", + "integrity": "sha512-ThUIgo2U/g7cCuZavucQTQzA9g9JbDDY2f64u3AbAoz/8vE2lt2U37LamDUVChhaDA3IRT9R6VvJwqnUfTJzng==", + "peerDependencies": { + "@redis/client": "^1.0.0" + } + }, + "node_modules/@socket.io/component-emitter": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@socket.io/component-emitter/-/component-emitter-3.1.0.tgz", + "integrity": "sha512-+9jVqKhRSpsc591z5vX+X5Yyw+he/HCB4iQ/RYxw35CEPaY1gnsNE43nf9n9AaYjAQrTiI/mOwKUKdUs9vf7Xg==" + }, + "node_modules/@socket.io/redis-adapter": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@socket.io/redis-adapter/-/redis-adapter-8.1.0.tgz", + "integrity": "sha512-8nGMKcQ+DWpgefxA/Pi25aLajVilRPKwu29mZXu5cT+WGVYItcCkfMr4RsMmyYXUyJf00mN+7WinVLihmJwpXA==", + "dependencies": { + "debug": "~4.3.1", + "notepack.io": "~3.0.1", + "uid2": "1.0.0" + }, + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "socket.io-adapter": "^2.4.0" + } + }, + "node_modules/@types/cookie": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/@types/cookie/-/cookie-0.4.1.tgz", + "integrity": "sha512-XW/Aa8APYr6jSVVA1y/DEIZX0/GMKLEVekNG727R8cs56ahETkRAy/3DR7+fJyh7oUgGwNQaRfXCun0+KbWY7Q==" + }, + "node_modules/@types/cors": { + "version": "2.8.13", + "resolved": "https://registry.npmjs.org/@types/cors/-/cors-2.8.13.tgz", + "integrity": "sha512-RG8AStHlUiV5ysZQKq97copd2UmVYw3/pRMLefISZ3S1hK104Cwm7iLQ3fTKx+lsUH2CE8FlLaYeEA2LSeqYUA==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/node": { + "version": "18.14.1", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.14.1.tgz", + "integrity": "sha512-QH+37Qds3E0eDlReeboBxfHbX9omAcBCXEzswCu6jySP642jiM3cYSIkU/REqwhCUqXdonHFuBfJDiAJxMNhaQ==" + }, + "node_modules/accepts": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", + "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", + "dependencies": { + "mime-types": "~2.1.34", + "negotiator": "0.6.3" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/array-flatten": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", + "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==" + }, + "node_modules/assert-plus": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", + "integrity": "sha512-NfJ4UzBCcQGLDlQq7nHxH+tv3kyZ0hHQqF5BO6J7tNJeP5do1llPr8dZ8zHonfhAu0PHAdMkSo+8o0wxg9lZWw==", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/base64id": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/base64id/-/base64id-2.0.0.tgz", + "integrity": "sha512-lGe34o6EHj9y3Kts9R4ZYs/Gr+6N7MCaMlIFA3F1R2O5/m7K06AxfSeO5530PEERE6/WyEg3lsuyw4GHlPZHog==", + "engines": { + "node": "^4.5.0 || >= 5.9" + } + }, + "node_modules/body-parser": { + "version": "1.20.1", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.1.tgz", + "integrity": "sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw==", + "dependencies": { + "bytes": "3.1.2", + "content-type": "~1.0.4", + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "on-finished": "2.4.1", + "qs": "6.11.0", + "raw-body": "2.5.1", + "type-is": "~1.6.18", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/body-parser/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/body-parser/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/buffer-equal-constant-time": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz", + "integrity": "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==" + }, + "node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/call-bind": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", + "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", + "dependencies": { + "function-bind": "^1.1.1", + "get-intrinsic": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/camelcase-keys": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/camelcase-keys/-/camelcase-keys-7.0.2.tgz", + "integrity": "sha512-Rjs1H+A9R+Ig+4E/9oyB66UC5Mj9Xq3N//vcLf2WzgdTi/3gUu3Z9KoqmlrEG4VuuLK8wJHofxzdQXz/knhiYg==", + "dependencies": { + "camelcase": "^6.3.0", + "map-obj": "^4.1.0", + "quick-lru": "^5.1.1", + "type-fest": "^1.2.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cluster-key-slot": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/cluster-key-slot/-/cluster-key-slot-1.1.2.tgz", + "integrity": "sha512-RMr0FhtfXemyinomL4hrWcYJxmX6deFdCxpJzhDttxgO1+bcCnkk+9drydLVDmAMG7NE6aN/fl4F7ucU/90gAA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/content-disposition": { + "version": "0.5.4", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", + "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", + "dependencies": { + "safe-buffer": "5.2.1" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/content-type": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.5.0.tgz", + "integrity": "sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie-signature": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", + "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==" + }, + "node_modules/core-util-is": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", + "integrity": "sha512-3lqz5YjWTYnW6dlDa5TLaTCcShfar1e40rmcJVwCBJC6mWlFuj0eCHIElmG1g5kyuJ/GD+8Wn4FFCcz4gJPfaQ==" + }, + "node_modules/cors": { + "version": "2.8.5", + "resolved": "https://registry.npmjs.org/cors/-/cors-2.8.5.tgz", + "integrity": "sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g==", + "dependencies": { + "object-assign": "^4", + "vary": "^1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/depd": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", + "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/destroy": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", + "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/ecdsa-sig-formatter": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz", + "integrity": "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==", + "dependencies": { + "safe-buffer": "^5.0.1" + } + }, + "node_modules/ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==" + }, + "node_modules/encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/engine.io": { + "version": "6.4.1", + "resolved": "https://registry.npmjs.org/engine.io/-/engine.io-6.4.1.tgz", + "integrity": "sha512-JFYQurD/nbsA5BSPmbaOSLa3tSVj8L6o4srSwXXY3NqE+gGUNmmPTbhn8tjzcCtSqhFgIeqef81ngny8JM25hw==", + "dependencies": { + "@types/cookie": "^0.4.1", + "@types/cors": "^2.8.12", + "@types/node": ">=10.0.0", + "accepts": "~1.3.4", + "base64id": "2.0.0", + "cookie": "~0.4.1", + "cors": "~2.8.5", + "debug": "~4.3.1", + "engine.io-parser": "~5.0.3", + "ws": "~8.11.0" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/engine.io-parser": { + "version": "5.0.6", + "resolved": "https://registry.npmjs.org/engine.io-parser/-/engine.io-parser-5.0.6.tgz", + "integrity": "sha512-tjuoZDMAdEhVnSFleYPCtdL2GXwVTGtNjoeJd9IhIG3C1xs9uwxqRNEu5WpnDZCaozwVlK/nuQhpodhXSIMaxw==", + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/engine.io/node_modules/cookie": { + "version": "0.4.2", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.4.2.tgz", + "integrity": "sha512-aSWTXFzaKWkvHO1Ny/s+ePFpvKsPnjc551iI41v3ny/ow6tBG5Vd+FuqGNhh1LxOmVzOlGUriIlOaokOvhaStA==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==" + }, + "node_modules/etag": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", + "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/express": { + "version": "4.18.2", + "resolved": "https://registry.npmjs.org/express/-/express-4.18.2.tgz", + "integrity": "sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ==", + "dependencies": { + "accepts": "~1.3.8", + "array-flatten": "1.1.1", + "body-parser": "1.20.1", + "content-disposition": "0.5.4", + "content-type": "~1.0.4", + "cookie": "0.5.0", + "cookie-signature": "1.0.6", + "debug": "2.6.9", + "depd": "2.0.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "finalhandler": "1.2.0", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "merge-descriptors": "1.0.1", + "methods": "~1.1.2", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "path-to-regexp": "0.1.7", + "proxy-addr": "~2.0.7", + "qs": "6.11.0", + "range-parser": "~1.2.1", + "safe-buffer": "5.2.1", + "send": "0.18.0", + "serve-static": "1.15.0", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "type-is": "~1.6.18", + "utils-merge": "1.0.1", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.10.0" + } + }, + "node_modules/express/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/express/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/extsprintf": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", + "integrity": "sha512-11Ndz7Nv+mvAC1j0ktTa7fAb0vLyGGX+rMHNBYQviQDGU0Hw7lhctJANqbPhu9nV9/izT/IntTgZ7Im/9LJs9g==", + "engines": [ + "node >=0.6.0" + ] + }, + "node_modules/finalhandler": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.2.0.tgz", + "integrity": "sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==", + "dependencies": { + "debug": "2.6.9", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "statuses": "2.0.1", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/finalhandler/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/finalhandler/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/forwarded": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", + "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fresh": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", + "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/function-bind": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", + "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" + }, + "node_modules/generic-pool": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/generic-pool/-/generic-pool-3.9.0.tgz", + "integrity": "sha512-hymDOu5B53XvN4QT9dBmZxPX4CWhBPPLguTZ9MMFeFa/Kg0xWVfylOVNlJji/E7yTZWFd/q9GO5TxDLq156D7g==", + "engines": { + "node": ">= 4" + } + }, + "node_modules/get-intrinsic": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.0.tgz", + "integrity": "sha512-L049y6nFOuom5wGyRc3/gdTLO94dySVKRACj1RmJZBQXlbTMhtNIgkWkUHq+jYmZvKf14EW1EoJnnjbmoHij0Q==", + "dependencies": { + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-symbols": "^1.0.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", + "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", + "dependencies": { + "function-bind": "^1.1.1" + }, + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/has-symbols": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", + "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/http-errors": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", + "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", + "dependencies": { + "depd": "2.0.0", + "inherits": "2.0.4", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "toidentifier": "1.0.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, + "node_modules/ip6addr": { + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/ip6addr/-/ip6addr-0.2.5.tgz", + "integrity": "sha512-9RGGSB6Zc9Ox5DpDGFnJdIeF0AsqXzdH+FspCfPPaU/L/4tI6P+5lIoFUFm9JXs9IrJv1boqAaNCQmoDADTSKQ==", + "dependencies": { + "assert-plus": "^1.0.0", + "jsprim": "^2.0.2" + } + }, + "node_modules/ipaddr.js": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", + "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/json-schema": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.4.0.tgz", + "integrity": "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==" + }, + "node_modules/jsonwebtoken": { + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/jsonwebtoken/-/jsonwebtoken-9.0.0.tgz", + "integrity": "sha512-tuGfYXxkQGDPnLJ7SibiQgVgeDgfbPq2k2ICcbgqW8WxWLBAxKQM/ZCu/IT8SOSwmaYl4dpTFCW5xZv7YbbWUw==", + "dependencies": { + "jws": "^3.2.2", + "lodash": "^4.17.21", + "ms": "^2.1.1", + "semver": "^7.3.8" + }, + "engines": { + "node": ">=12", + "npm": ">=6" + } + }, + "node_modules/jsprim": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-2.0.2.tgz", + "integrity": "sha512-gqXddjPqQ6G40VdnI6T6yObEC+pDNvyP95wdQhkWkg7crHH3km5qP1FsOXEkzEQwnz6gz5qGTn1c2Y52wP3OyQ==", + "engines": [ + "node >=0.6.0" + ], + "dependencies": { + "assert-plus": "1.0.0", + "extsprintf": "1.3.0", + "json-schema": "0.4.0", + "verror": "1.10.0" + } + }, + "node_modules/jwa": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/jwa/-/jwa-1.4.1.tgz", + "integrity": "sha512-qiLX/xhEEFKUAJ6FiBMbes3w9ATzyk5W7Hvzpa/SLYdxNtng+gcurvrI7TbACjIXlsJyr05/S1oUhZrc63evQA==", + "dependencies": { + "buffer-equal-constant-time": "1.0.1", + "ecdsa-sig-formatter": "1.0.11", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/jws": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/jws/-/jws-3.2.2.tgz", + "integrity": "sha512-YHlZCB6lMTllWDtSPHz/ZXTsi8S00usEV6v1tjq8tOUZzw7DpSDWVXjXDre6ed1w/pd495ODpHZYSdkRTsa0HA==", + "dependencies": { + "jwa": "^1.4.1", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" + }, + "node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/map-obj": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-4.3.0.tgz", + "integrity": "sha512-hdN1wVrZbb29eBGiGjJbeP8JbKjq1urkHJ/LIP/NY48MZ1QVXUsQBV1G1zvYFHn1XE06cwjBsOI2K3Ulnj1YXQ==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/maxmind": { + "version": "4.3.8", + "resolved": "https://registry.npmjs.org/maxmind/-/maxmind-4.3.8.tgz", + "integrity": "sha512-HrfxEu5yPBPtTy/OT+W5bPQwEfLUX0EHqe2EbJiB47xQMumHqXvSP7PAwzV8Z++NRCmQwy4moQrTSt0+dH+Jmg==", + "dependencies": { + "mmdb-lib": "2.0.2", + "tiny-lru": "9.0.3" + }, + "engines": { + "node": ">=12", + "npm": ">=6" + } + }, + "node_modules/media-typer": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/merge-descriptors": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", + "integrity": "sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==" + }, + "node_modules/methods": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", + "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", + "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mmdb-lib": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/mmdb-lib/-/mmdb-lib-2.0.2.tgz", + "integrity": "sha512-shi1I+fCPQonhTi7qyb6hr7hi87R7YS69FlfJiMFuJ12+grx0JyL56gLNzGTYXPU7EhAPkMLliGeyHer0K+AVA==", + "engines": { + "node": ">=10", + "npm": ">=6" + } + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, + "node_modules/negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/notepack.io": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/notepack.io/-/notepack.io-3.0.1.tgz", + "integrity": "sha512-TKC/8zH5pXIAMVQio2TvVDTtPRX+DJPHDqjRbxogtFiByHyzKmy96RA0JtCQJ+WouyyL4A10xomQzgbUT+1jCg==" + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-inspect": { + "version": "1.12.3", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.3.tgz", + "integrity": "sha512-geUvdk7c+eizMNUDkRpW1wJwgfOiOeHbxBR/hLXK1aT6zmVSO0jsQcs7fj6MGw89jC/cjGfLcNOrtMYtGqm81g==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/on-finished": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", + "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", + "dependencies": { + "ee-first": "1.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/parseurl": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", + "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/path-to-regexp": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", + "integrity": "sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==" + }, + "node_modules/proxy-addr": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", + "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", + "dependencies": { + "forwarded": "0.2.0", + "ipaddr.js": "1.9.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/qs": { + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz", + "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==", + "dependencies": { + "side-channel": "^1.0.4" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/quick-lru": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-5.1.1.tgz", + "integrity": "sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/raw-body": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.1.tgz", + "integrity": "sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig==", + "dependencies": { + "bytes": "3.1.2", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/redis": { + "version": "4.6.4", + "resolved": "https://registry.npmjs.org/redis/-/redis-4.6.4.tgz", + "integrity": "sha512-wi2tgDdQ+Q8q+PR5FLRx4QvDiWaA+PoJbrzsyFqlClN5R4LplHqN3scs/aGjE//mbz++W19SgxiEnQ27jnCRaA==", + "dependencies": { + "@redis/bloom": "1.2.0", + "@redis/client": "1.5.5", + "@redis/graph": "1.1.0", + "@redis/json": "1.0.4", + "@redis/search": "1.1.1", + "@redis/time-series": "1.0.4" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" + }, + "node_modules/semver": { + "version": "7.3.8", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.8.tgz", + "integrity": "sha512-NB1ctGL5rlHrPJtFDVIVzTyQylMLu9N9VICA6HSFJo8MCGVTMW6gfpicwKmmK/dAjTOrqu5l63JJOpDSrAis3A==", + "dependencies": { + "lru-cache": "^6.0.0" + }, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/send": { + "version": "0.18.0", + "resolved": "https://registry.npmjs.org/send/-/send-0.18.0.tgz", + "integrity": "sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==", + "dependencies": { + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "mime": "1.6.0", + "ms": "2.1.3", + "on-finished": "2.4.1", + "range-parser": "~1.2.1", + "statuses": "2.0.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/send/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/send/node_modules/debug/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/send/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" + }, + "node_modules/serve-static": { + "version": "1.15.0", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.15.0.tgz", + "integrity": "sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==", + "dependencies": { + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "parseurl": "~1.3.3", + "send": "0.18.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/setprototypeof": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", + "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==" + }, + "node_modules/side-channel": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", + "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", + "dependencies": { + "call-bind": "^1.0.0", + "get-intrinsic": "^1.0.2", + "object-inspect": "^1.9.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/socket.io": { + "version": "4.6.1", + "resolved": "https://registry.npmjs.org/socket.io/-/socket.io-4.6.1.tgz", + "integrity": "sha512-KMcaAi4l/8+xEjkRICl6ak8ySoxsYG+gG6/XfRCPJPQ/haCRIJBTL4wIl8YCsmtaBovcAXGLOShyVWQ/FG8GZA==", + "dependencies": { + "accepts": "~1.3.4", + "base64id": "~2.0.0", + "debug": "~4.3.2", + "engine.io": "~6.4.1", + "socket.io-adapter": "~2.5.2", + "socket.io-parser": "~4.2.1" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/socket.io-adapter": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/socket.io-adapter/-/socket.io-adapter-2.5.2.tgz", + "integrity": "sha512-87C3LO/NOMc+eMcpcxUBebGjkpMDkNBS9tf7KJqcDsmL936EChtVva71Dw2q4tQcuVC+hAUy4an2NO/sYXmwRA==", + "dependencies": { + "ws": "~8.11.0" + } + }, + "node_modules/socket.io-parser": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-4.2.2.tgz", + "integrity": "sha512-DJtziuKypFkMMHCm2uIshOYC7QaylbtzQwiMYDuCKy3OPkjLzu4B2vAhTlqipRHHzrI0NJeBAizTK7X+6m1jVw==", + "dependencies": { + "@socket.io/component-emitter": "~3.1.0", + "debug": "~4.3.1" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/statuses": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", + "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/tiny-lru": { + "version": "9.0.3", + "resolved": "https://registry.npmjs.org/tiny-lru/-/tiny-lru-9.0.3.tgz", + "integrity": "sha512-/i9GruRjXsnDgehxvy6iZ4AFNVxngEFbwzirhdulomMNPGPVV3ECMZOWSw0w4sRMZ9Al9m4jy08GPvRxRUGYlw==", + "engines": { + "node": ">=6" + } + }, + "node_modules/toidentifier": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", + "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/type-fest": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-1.4.0.tgz", + "integrity": "sha512-yGSza74xk0UG8k+pLh5oeoYirvIiWo5t0/o3zHHAO2tRDiZcxWP7fywNlXhqb6/r6sWvwi+RsyQMWhVLe4BVuA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/type-is": { + "version": "1.6.18", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", + "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", + "dependencies": { + "media-typer": "0.3.0", + "mime-types": "~2.1.24" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/ua-parser-js": { + "version": "1.0.33", + "resolved": "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-1.0.33.tgz", + "integrity": "sha512-RqshF7TPTE0XLYAqmjlu5cLLuGdKrNu9O1KLA/qp39QtbZwuzwv1dT46DZSopoUMsYgXpB3Cv8a03FI8b74oFQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/ua-parser-js" + }, + { + "type": "paypal", + "url": "https://paypal.me/faisalman" + } + ], + "engines": { + "node": "*" + } + }, + "node_modules/uid2": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/uid2/-/uid2-1.0.0.tgz", + "integrity": "sha512-+I6aJUv63YAcY9n4mQreLUt0d4lvwkkopDNmpomkAUz0fAkEMV9pRWxN0EjhW1YfRhcuyHg2v3mwddCDW1+LFQ==", + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/unpipe": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", + "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/utils-merge": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", + "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/uWebSockets.js": { + "version": "20.19.0", + "resolved": "git+ssh://git@github.com/uNetworking/uWebSockets.js.git#42c9c0d5d31f46ca4115dc75672b0037ec970f28" + }, + "node_modules/vary": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", + "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/verror": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", + "integrity": "sha512-ZZKSmDAEFOijERBLkmYfJ+vmk3w+7hOLYDNkRCuRuMJGEmqYNCNLyBBFwWKVMhfwaEF3WOd0Zlw86U/WC/+nYw==", + "engines": [ + "node >=0.6.0" + ], + "dependencies": { + "assert-plus": "^1.0.0", + "core-util-is": "1.0.2", + "extsprintf": "^1.2.0" + } + }, + "node_modules/ws": { + "version": "8.11.0", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.11.0.tgz", + "integrity": "sha512-HPG3wQd9sNQoT9xHyNCXoDUa+Xw/VevmY9FoHyQ+g+rrMn4j6FB4np7Z0OhdTgjx6MgQLK7jwSy1YecU1+4Asg==", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": "^5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" + } + } +} diff --git a/ee/utilities/package.json b/ee/utilities/package.json index 2f61c6a95..3fcedf03b 100644 --- a/ee/utilities/package.json +++ b/ee/utilities/package.json @@ -1,5 +1,5 @@ { - "name": "utilities-server", + "name": "assist-server", "version": "1.0.0", "description": "assist server to get live sessions & sourcemaps reader to get stack trace", "main": "peerjs-server.js", diff --git a/ee/utilities/run-dev.sh b/ee/utilities/run-dev.sh new file mode 100755 index 000000000..00e8d5a4b --- /dev/null +++ b/ee/utilities/run-dev.sh @@ -0,0 +1,6 @@ +#!/bin/bash +set -a +source .env +set +a + +npm start \ No newline at end of file diff --git a/ee/utilities/servers/websocket-cluster.js b/ee/utilities/servers/websocket-cluster.js index 6aa2bade5..e129bfcb6 100644 --- a/ee/utilities/servers/websocket-cluster.js +++ b/ee/utilities/servers/websocket-cluster.js @@ -24,7 +24,7 @@ const { const {createAdapter} = require("@socket.io/redis-adapter"); const {createClient} = require("redis"); const wsRouter = express.Router(); -const REDIS_URL = process.env.REDIS_URL || "redis://localhost:6379"; +const REDIS_URL = (process.env.REDIS_URL || "localhost:6379").replace(/((^\w+:|^)\/\/|^)/, 'redis://'); const pubClient = createClient({url: REDIS_URL}); const subClient = pubClient.duplicate(); console.log(`Using Redis: ${REDIS_URL}`); @@ -309,7 +309,8 @@ module.exports = { debug && console.log(`notifying new agent about no SESSIONS`); io.to(socket.id).emit(EVENTS_DEFINITION.emit.NO_SESSIONS); } - await io.of('/').adapter.remoteJoin(socket.id, socket.peerId); + // await io.of('/').adapter.join(socket.id, socket.peerId); + await socket.join(socket.peerId); let rooms = await io.of('/').adapter.allRooms(); if (rooms.has(socket.peerId)) { let connectedSockets = await io.in(socket.peerId).fetchSockets(); diff --git a/ee/utilities/servers/websocket.js b/ee/utilities/servers/websocket.js index bf65789f2..c906b5987 100644 --- a/ee/utilities/servers/websocket.js +++ b/ee/utilities/servers/websocket.js @@ -287,7 +287,7 @@ module.exports = { debug && console.log(`notifying new agent about no SESSIONS`); io.to(socket.id).emit(EVENTS_DEFINITION.emit.NO_SESSIONS); } - socket.join(socket.peerId); + await socket.join(socket.peerId); if (io.sockets.adapter.rooms.get(socket.peerId)) { debug && console.log(`${socket.id} joined room:${socket.peerId}, as:${socket.identity}, members:${io.sockets.adapter.rooms.get(socket.peerId).size}`); } diff --git a/frontend/.env.sample b/frontend/.env.sample index d5337deee..4b6cface2 100644 --- a/frontend/.env.sample +++ b/frontend/.env.sample @@ -22,5 +22,5 @@ MINIO_ACCESS_KEY = '' MINIO_SECRET_KEY = '' # APP and TRACKER VERSIONS -VERSION = '1.9.0' -TRACKER_VERSION = '4.1.9' +VERSION = '1.10.0' +TRACKER_VERSION = '5.0.0' diff --git a/frontend/app/components/Assist/Assist.tsx b/frontend/app/components/Assist/Assist.tsx index abb1403a9..b58df1352 100644 --- a/frontend/app/components/Assist/Assist.tsx +++ b/frontend/app/components/Assist/Assist.tsx @@ -6,6 +6,7 @@ import AssistRouter from './AssistRouter'; import { SideMenuitem } from 'UI'; import { withSiteId, assist, recordings } from 'App/routes'; import { connect } from 'react-redux'; +import { ENTERPRISE_REQUEIRED } from 'App/constants'; interface Props extends RouteComponentProps { siteId: string; @@ -40,7 +41,7 @@ function Assist(props: Props) { iconName="record-circle" onClick={() => redirect(recordings())} disabled={!isEnterprise} - tooltipTitle="This feature requires an enterprise license." + tooltipTitle={ENTERPRISE_REQUEIRED} />
diff --git a/frontend/app/components/Client/Integrations/IntegrationItem.tsx b/frontend/app/components/Client/Integrations/IntegrationItem.tsx index 06e950ae6..efcdefd8a 100644 --- a/frontend/app/components/Client/Integrations/IntegrationItem.tsx +++ b/frontend/app/components/Client/Integrations/IntegrationItem.tsx @@ -13,7 +13,7 @@ interface Props { const IntegrationItem = (props: Props) => { const { integration, integrated, hide = false } = props; return hide ? <> : ( -
props.onClick(e)}> +
props.onClick(e)}> {integrated && (
diff --git a/frontend/app/components/Client/Integrations/Integrations.tsx b/frontend/app/components/Client/Integrations/Integrations.tsx index f1851f919..30d901dcf 100644 --- a/frontend/app/components/Client/Integrations/Integrations.tsx +++ b/frontend/app/components/Client/Integrations/Integrations.tsx @@ -99,7 +99,7 @@ function Integrations(props: Props) {
{cat.description}
-
+
{cat.integrations.map((integration: any) => ( ({ @@ -61,9 +62,14 @@ export default class NewSiteForm extends React.PureComponent { return this.setState({ existsError: true }); } if (site.exists()) { - this.props.update(this.props.site, this.props.site.id).then(() => { - this.props.onClose(null); - this.props.fetchList(); + this.props.update(this.props.site, this.props.site.id).then((response) => { + if (!response || !response.errors || response.errors.size === 0) { + this.props.onClose(null); + this.props.fetchList(); + toast.success('Project updated successfully'); + } else { + toast.error(response.errors[0]); + } }); } else { this.props.save(this.props.site).then(() => { diff --git a/frontend/app/components/Client/Webhooks/WebhookForm.js b/frontend/app/components/Client/Webhooks/WebhookForm.js index 62f009f1e..08799456f 100644 --- a/frontend/app/components/Client/Webhooks/WebhookForm.js +++ b/frontend/app/components/Client/Webhooks/WebhookForm.js @@ -1,75 +1,86 @@ import React from 'react'; import { Form, Button, Input } from 'UI'; import styles from './webhookForm.module.css'; -import { useStore } from 'App/mstore' -import { observer } from 'mobx-react-lite' +import { useStore } from 'App/mstore'; +import { observer } from 'mobx-react-lite'; +import { toast } from 'react-toastify'; function WebhookForm(props) { - const { settingsStore } = useStore() - const { webhookInst: webhook, hooksLoading: loading, saveWebhook, editWebhook } = settingsStore - const write = ({ target: { value, name } }) => editWebhook({ [name]: value }); + const { settingsStore } = useStore(); + const { webhookInst: webhook, hooksLoading: loading, saveWebhook, editWebhook } = settingsStore; + const write = ({ target: { value, name } }) => editWebhook({ [name]: value }); - const save = () => { - saveWebhook(webhook).then(() => { - props.onClose(); - }); - }; + const save = () => { + saveWebhook(webhook) + .then(() => { + props.onClose(); + }) + .catch((e) => { + const baseStr = 'Error saving webhook'; + if (e.response) { + e.response.json().then(({ errors }) => { + toast.error(baseStr + ': ' + errors.join(',')); + }); + } else { + toast.error(baseStr); + } + }); + }; + return ( +
+

{webhook.exists() ? 'Update' : 'Add'} Webhook

+
+ + + + - return ( -
-

{webhook.exists() ? 'Update' : 'Add'} Webhook

- - - - - + + + + - - - - + + + + - - - - - -
-
- - {webhook.exists() && } -
- {webhook.exists() && - } -
- +
+
+ + {webhook.exists() && } +
+ {webhook.exists() && ( + + )}
- ); + +
+ ); } export default observer(WebhookForm); diff --git a/frontend/app/components/Dashboard/components/Alerts/AlertForm/Condition.tsx b/frontend/app/components/Dashboard/components/Alerts/AlertForm/Condition.tsx index 5039cc1dd..80a900895 100644 --- a/frontend/app/components/Dashboard/components/Alerts/AlertForm/Condition.tsx +++ b/frontend/app/components/Dashboard/components/Alerts/AlertForm/Condition.tsx @@ -2,6 +2,7 @@ import React from 'react'; import { Input } from 'UI'; import Select from 'Shared/Select'; import { alertConditions as conditions } from 'App/constants'; +import Alert from 'Types/alert' const thresholdOptions = [ { label: '15 minutes', value: 15 }, @@ -25,6 +26,7 @@ interface ICondition { writeQuery: (data: any) => void; writeQueryOption: (e: any, data: any) => void; unit: any; + changeUnit: (value: string) => void; } function Condition({ @@ -35,6 +37,7 @@ function Condition({ writeQueryOption, writeQuery, unit, + changeUnit, }: ICondition) { return (
@@ -47,7 +50,7 @@ function Condition({ options={changeOptions} name="change" defaultValue={instance.change} - onChange={({ value }) => writeOption(null, { name: 'change', value })} + onChange={({ value }) => changeUnit(value)} id="change-dropdown" />
diff --git a/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx b/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx index e3412bdce..8137b7750 100644 --- a/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx +++ b/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx @@ -8,6 +8,7 @@ import { DateTime } from 'luxon'; import { withRouter, RouteComponentProps } from 'react-router-dom'; import cn from 'classnames'; import Alert from 'Types/alert'; +import { observer } from 'mobx-react-lite' const getThreshold = (threshold: number) => { if (threshold === 15) return '15 Minutes'; @@ -42,9 +43,8 @@ const getNotifyChannel = (alert: Record, webhooks: Array) => { ' (' + alert.msteamsInput .map((channelId: number) => { - return ( - webhooks.find((hook) => hook.webhookId === channelId && hook.type === 'msteams')?.name - ); + return webhooks.find((hook) => hook.webhookId === channelId && hook.type === 'msteams') + ?.name; }) .join(', ') + ')' @@ -58,7 +58,7 @@ const getNotifyChannel = (alert: Record, webhooks: Array) => { } } if (alert.msteams) { - str += (str === '' ? '' : ' and ') + 'MS Teams' + str += (str === '' ? '' : ' and ') + 'MS Teams'; if (alert.msteamsInput.length > 0) { str += getMsTeamsChannels(); } @@ -79,10 +79,11 @@ interface Props extends RouteComponentProps { init: (alert: Alert) => void; demo?: boolean; webhooks: Array; + triggerOptions: Record; } function AlertListItem(props: Props) { - const { alert, siteId, history, init, demo, webhooks } = props; + const { alert, siteId, history, init, demo, webhooks, triggerOptions } = props; if (!alert) { return null; @@ -95,6 +96,11 @@ function AlertListItem(props: Props) { history.push(path); }; + const formTriggerName = () => + Number.isInteger(alert.query.left) && triggerOptions + ? triggerOptions.find((opt: { value: any, label: string }) => opt.value === alert.query.left).label + : alert.query.left; + return (
{'When the '} - {alert.detectionMethod} + + {alert.detectionMethod} + {' of '} - {alert.seriesName} + + {triggerOptions ? formTriggerName() : alert.seriesName} + {' is '} {alert.query.operator} - {numberWithCommas(alert.query.right)} {alert.metric?.unit} + {numberWithCommas(alert.query.right)} + {alert.change === 'percent' ? '%' : alert.metric?.unit} {' over the past '} - {getThreshold( - alert.currentPeriod)} + + {getThreshold(alert.currentPeriod)} + {alert.detectionMethod === 'change' ? ( <> {' compared to the previous '} - {getThreshold( - alert.previousPeriod)} + + {getThreshold(alert.previousPeriod)} + ) : null} {', notify me on '} @@ -153,4 +166,4 @@ function AlertListItem(props: Props) { ); } -export default withRouter(AlertListItem); +export default withRouter(observer(AlertListItem)); diff --git a/frontend/app/components/Dashboard/components/Alerts/AlertsList.tsx b/frontend/app/components/Dashboard/components/Alerts/AlertsList.tsx index e4005098e..d1d4c84ef 100644 --- a/frontend/app/components/Dashboard/components/Alerts/AlertsList.tsx +++ b/frontend/app/components/Dashboard/components/Alerts/AlertsList.tsx @@ -17,10 +17,10 @@ function AlertsList({ siteId }: Props) { const { alertsStore, settingsStore } = useStore(); const { fetchWebhooks, webhooks } = settingsStore const { alerts: alertsList, alertsSearch, fetchList, init } = alertsStore + const page = alertsStore.page; React.useEffect(() => { fetchList(); fetchWebhooks() }, []); const alertsArray = alertsList - const [page, setPage] = React.useState(1); const filteredAlerts = filterList(alertsArray, alertsSearch, ['name'], (item, query) => query.test(item.query.left)) const list = alertsSearch !== '' ? filteredAlerts : alertsArray; @@ -59,7 +59,7 @@ function AlertsList({ siteId }: Props) { setPage(page)} + onPageChange={(page) => alertsStore.updateKey('page', page)} limit={pageSize} debounceRequest={100} /> diff --git a/frontend/app/components/Dashboard/components/Alerts/AlertsView.tsx b/frontend/app/components/Dashboard/components/Alerts/AlertsView.tsx index 631df8e43..544c86f8f 100644 --- a/frontend/app/components/Dashboard/components/Alerts/AlertsView.tsx +++ b/frontend/app/components/Dashboard/components/Alerts/AlertsView.tsx @@ -1,16 +1,30 @@ -import React from 'react'; +import React, { useEffect } from 'react'; import { Button, PageTitle, Icon, Link } from 'UI'; import withPageTitle from 'HOCs/withPageTitle'; import { withSiteId, alertCreate } from 'App/routes'; import AlertsList from './AlertsList'; import AlertsSearch from './AlertsSearch'; +import { useHistory } from 'react-router'; +import { useStore } from 'App/mstore'; interface IAlertsView { siteId: string; } function AlertsView({ siteId }: IAlertsView) { + const history = useHistory(); + const { alertsStore } = useStore(); + + + useEffect(() => { + const unmount = history.listen((location) => { + if (!location.pathname.includes('/alert')) { + alertsStore.updateKey('page', 1); + } + }); + return unmount; + }, [history]); return (
diff --git a/frontend/app/components/Dashboard/components/Alerts/NewAlert.tsx b/frontend/app/components/Dashboard/components/Alerts/NewAlert.tsx index 717c7ea59..4d1d247b0 100644 --- a/frontend/app/components/Dashboard/components/Alerts/NewAlert.tsx +++ b/frontend/app/components/Dashboard/components/Alerts/NewAlert.tsx @@ -167,6 +167,10 @@ const NewAlert = (props: IProps) => { edit({ query: { ...query, [name]: value } }); }; + const changeUnit = (value: string) => { + alertsStore.changeUnit(value) + } + const writeQuery = ({ target: { value, name } }: React.ChangeEvent) => { const { query } = instance; edit({ query: { ...query, [name]: value } }); @@ -243,6 +247,7 @@ const NewAlert = (props: IProps) => { instance={instance} triggerOptions={triggerOptions} writeQueryOption={writeQueryOption} + changeUnit={changeUnit} writeQuery={writeQuery} unit={unit} /> @@ -278,7 +283,13 @@ const NewAlert = (props: IProps) => {
{instance && ( - null} webhooks={webhooks} /> + null} + webhooks={webhooks} /> )}
diff --git a/frontend/app/components/Dashboard/components/DashboardOptions/DashboardOptions.tsx b/frontend/app/components/Dashboard/components/DashboardOptions/DashboardOptions.tsx index 62cfd9404..63b1c3f35 100644 --- a/frontend/app/components/Dashboard/components/DashboardOptions/DashboardOptions.tsx +++ b/frontend/app/components/Dashboard/components/DashboardOptions/DashboardOptions.tsx @@ -1,6 +1,7 @@ import React from 'react'; import { ItemMenu } from 'UI'; import { connect } from 'react-redux'; +import { ENTERPRISE_REQUEIRED } from 'App/constants'; interface Props { editHandler: (isTitle: boolean) => void; @@ -16,7 +17,7 @@ function DashboardOptions(props: Props) { { icon: 'text-paragraph', text: `${!isTitlePresent ? 'Add' : 'Edit'} Description`, onClick: () => editHandler(false) }, { icon: 'users', text: 'Visibility & Access', onClick: editHandler }, { icon: 'trash', text: 'Delete', onClick: deleteHandler }, - { icon: 'pdf-download', text: 'Download Report', onClick: renderReport, disabled: !isEnterprise, tooltipTitle: 'This feature requires an enterprise license.' } + { icon: 'pdf-download', text: 'Download Report', onClick: renderReport, disabled: !isEnterprise, tooltipTitle: ENTERPRISE_REQUEIRED } ] return ( diff --git a/frontend/app/components/Dashboard/components/MetricTypeItem/MetricTypeItem.tsx b/frontend/app/components/Dashboard/components/MetricTypeItem/MetricTypeItem.tsx index 0d5fc4c89..c28389c4a 100644 --- a/frontend/app/components/Dashboard/components/MetricTypeItem/MetricTypeItem.tsx +++ b/frontend/app/components/Dashboard/components/MetricTypeItem/MetricTypeItem.tsx @@ -2,6 +2,7 @@ import { IconNames } from 'App/components/ui/SVG'; import React from 'react'; import { Icon, Tooltip } from 'UI'; import cn from 'classnames'; +import { ENTERPRISE_REQUEIRED } from 'App/constants'; export interface MetricType { title: string; @@ -23,7 +24,7 @@ function MetricTypeItem(props: Props) { onClick = () => {}, } = props; return ( - +
dashboard?.widgets?.map(i => parseInt(i.metricId)), [dashboard]); - const cards = useMemo(() => metricStore.filteredCards.filter(i => !existingCardIds?.includes(parseInt(i.metricId))), [metricStore.filteredCards]); + const cards = useMemo(() => !!onSelectionChange ? metricStore.filteredCards.filter(i => !existingCardIds?.includes(parseInt(i.metricId))) : metricStore.filteredCards, [metricStore.filteredCards]); useEffect(() => { metricStore.fetchList(); diff --git a/frontend/app/components/Dashboard/components/WidgetSessions/WidgetSessions.tsx b/frontend/app/components/Dashboard/components/WidgetSessions/WidgetSessions.tsx index 4052e7a7e..3720dd94b 100644 --- a/frontend/app/components/Dashboard/components/WidgetSessions/WidgetSessions.tsx +++ b/frontend/app/components/Dashboard/components/WidgetSessions/WidgetSessions.tsx @@ -1,5 +1,5 @@ import React, { useEffect, useState } from 'react'; -import { NoContent, Loader, Pagination } from 'UI'; +import { NoContent, Loader, Pagination, Button } from 'UI'; import Select from 'Shared/Select'; import cn from 'classnames'; import { useStore } from 'App/mstore'; @@ -10,168 +10,203 @@ import { debounce } from 'App/utils'; import useIsMounted from 'App/hooks/useIsMounted'; import AnimatedSVG, { ICONS } from 'Shared/AnimatedSVG/AnimatedSVG'; import { numberWithCommas } from 'App/utils'; -import { CLICKMAP } from "App/constants/card"; +import { CLICKMAP } from 'App/constants/card'; interface Props { - className?: string; + className?: string; } function WidgetSessions(props: Props) { - const { className = '' } = props; - const [activeSeries, setActiveSeries] = useState('all'); - const [data, setData] = useState([]); - const isMounted = useIsMounted(); - const [loading, setLoading] = useState(false); - const filteredSessions = getListSessionsBySeries(data, activeSeries); - const { dashboardStore, metricStore, sessionStore } = useStore(); - const filter = dashboardStore.drillDownFilter; - const widget = metricStore.instance; - const startTime = DateTime.fromMillis(filter.startTimestamp).toFormat('LLL dd, yyyy HH:mm'); - const endTime = DateTime.fromMillis(filter.endTimestamp).toFormat('LLL dd, yyyy HH:mm'); - const [seriesOptions, setSeriesOptions] = useState([{ label: 'All', value: 'all' }]); + const { className = '' } = props; + const [activeSeries, setActiveSeries] = useState('all'); + const [data, setData] = useState([]); + const isMounted = useIsMounted(); + const [loading, setLoading] = useState(false); + const filteredSessions = getListSessionsBySeries(data, activeSeries); + const { dashboardStore, metricStore, sessionStore } = useStore(); + const filter = dashboardStore.drillDownFilter; + const widget = metricStore.instance; + const startTime = DateTime.fromMillis(filter.startTimestamp).toFormat('LLL dd, yyyy HH:mm'); + const endTime = DateTime.fromMillis(filter.endTimestamp).toFormat('LLL dd, yyyy HH:mm'); + const [seriesOptions, setSeriesOptions] = useState([{ label: 'All', value: 'all' }]); + const hasFilters = filter.filters.length > 0 || (filter.startTimestamp !== dashboardStore.drillDownPeriod.start || filter.endTimestamp !== dashboardStore.drillDownPeriod.end); - const writeOption = ({ value }: any) => setActiveSeries(value.value); - useEffect(() => { - if (!data) return; - const seriesOptions = data.map((item: any) => ({ - label: item.seriesName, - value: item.seriesId, - })); - setSeriesOptions([{ label: 'All', value: 'all' }, ...seriesOptions]); - }, [data]); + const writeOption = ({ value }: any) => setActiveSeries(value.value); + useEffect(() => { + if (!data) return; + const seriesOptions = data.map((item: any) => ({ + label: item.seriesName, + value: item.seriesId, + })); + setSeriesOptions([{ label: 'All', value: 'all' }, ...seriesOptions]); + }, [data]); - const fetchSessions = (metricId: any, filter: any) => { - if (!isMounted()) return; - setLoading(true); - delete filter.eventsOrderSupport; - widget - .fetchSessions(metricId, filter) - .then((res: any) => { - setData(res); - }) - .finally(() => { - setLoading(false); - }); - }; - const fetchClickmapSessions = (customFilters: Record) => { - sessionStore.getSessions(customFilters) - .then(data => { - setData([{ ...data, seriesId: 1 , seriesName: "Clicks" }]) - }) + const fetchSessions = (metricId: any, filter: any) => { + if (!isMounted()) return; + setLoading(true); + delete filter.eventsOrderSupport; + widget + .fetchSessions(metricId, filter) + .then((res: any) => { + setData(res); + }) + .finally(() => { + setLoading(false); + }); + }; + const fetchClickmapSessions = (customFilters: Record) => { + sessionStore.getSessions(customFilters).then((data) => { + setData([{ ...data, seriesId: 1, seriesName: 'Clicks' }]); + }); + }; + const debounceRequest: any = React.useCallback(debounce(fetchSessions, 1000), []); + const debounceClickMapSearch = React.useCallback(debounce(fetchClickmapSessions, 1000), []); + + const depsString = JSON.stringify(widget.series); + + const loadData = () => { + if (widget.metricType === CLICKMAP && metricStore.clickMapSearch) { + const clickFilter = { + value: [metricStore.clickMapSearch], + type: 'CLICK', + operator: 'onSelector', + isEvent: true, + // @ts-ignore + filters: [], + }; + const timeRange = { + rangeValue: dashboardStore.drillDownPeriod.rangeValue, + startDate: dashboardStore.drillDownPeriod.start, + endDate: dashboardStore.drillDownPeriod.end, + }; + const customFilter = { + ...filter, + ...timeRange, + filters: [...sessionStore.userFilter.filters, clickFilter], + }; + debounceClickMapSearch(customFilter); + } else { + debounceRequest(widget.metricId, { + ...filter, + series: widget.series.map((s) => s.toJson()), + page: metricStore.sessionsPage, + limit: metricStore.sessionsPageSize, + }); } - const debounceRequest: any = React.useCallback(debounce(fetchSessions, 1000), []); - const debounceClickMapSearch = React.useCallback(debounce(fetchClickmapSessions, 1000), []) + }; + useEffect(() => { + metricStore.updateKey('sessionsPage', 1); + loadData(); + }, [ + filter.startTimestamp, + filter.endTimestamp, + filter.filters, + depsString, + metricStore.clickMapSearch, + activeSeries, + ]); + useEffect(loadData, [metricStore.sessionsPage]); - const depsString = JSON.stringify(widget.series); - - const loadData = () => { - if (widget.metricType === CLICKMAP && metricStore.clickMapSearch) { - const clickFilter = { - value: [ - metricStore.clickMapSearch - ], - type: "CLICK", - operator: "onSelector", - isEvent: true, - // @ts-ignore - "filters": [] - } - const timeRange = { - rangeValue: dashboardStore.drillDownPeriod.rangeValue, - startDate: dashboardStore.drillDownPeriod.start, - endDate: dashboardStore.drillDownPeriod.end, - } - const customFilter = { ...filter, ...timeRange, filters: [ ...sessionStore.userFilter.filters, clickFilter]} - debounceClickMapSearch(customFilter) - } else { - debounceRequest(widget.metricId, { - ...filter, - series: widget.series.map(s => s.toJson()), - page: metricStore.sessionsPage, - limit: metricStore.sessionsPageSize, - }); - } - } - useEffect(() => { - metricStore.updateKey('sessionsPage', 1); - loadData(); - }, [filter.startTimestamp, filter.endTimestamp, filter.filters, depsString, metricStore.clickMapSearch]); - useEffect(loadData, [metricStore.sessionsPage]); + const clearFilters = () => { + metricStore.updateKey('sessionsPage', 1); + dashboardStore.resetDrillDownFilter(); + } - return ( -
-
-
-

{metricStore.clickMapSearch ? 'Clicks' : 'Sessions'}

-
- {metricStore.clickMapLabel ? `on "${metricStore.clickMapLabel}" ` : null} - between {startTime} and{' '} - {endTime}{' '} -
-
- - {widget.metricType !== 'table' && widget.metricType !== CLICKMAP && ( -
- Filter by Series - +
+ )} +
+
+ +
+ + + +
+
+ No relevant sessions found for the selected time period. +
+
+ } + show={filteredSessions.sessions.length === 0} + > + {filteredSessions.sessions.map((session: any) => ( + + +
+ + ))} + +
+
+ Showing{' '} + + {(metricStore.sessionsPage - 1) * metricStore.sessionsPageSize + 1} + {' '} + to{' '} + + {(metricStore.sessionsPage - 1) * metricStore.sessionsPageSize + + filteredSessions.sessions.length} + {' '} + of {numberWithCommas(filteredSessions.total)}{' '} + sessions. +
+ metricStore.updateKey('sessionsPage', page)} + limit={metricStore.sessionsPageSize} + debounceRequest={500} + /> +
+ + +
+
+ ); } const getListSessionsBySeries = (data: any, seriesId: any) => { - const arr: any = { sessions: [], total: 0 }; - data.forEach((element: any) => { - if (seriesId === 'all') { - const sessionIds = arr.sessions.map((i: any) => i.sessionId); - arr.sessions.push(...element.sessions.filter((i: any) => !sessionIds.includes(i.sessionId))); - arr.total = element.total; - } else { - if (element.seriesId === seriesId) { - arr.sessions.push(...element.sessions); - arr.total = element.total; - } - } - }); - return arr; + const arr = data.reduce( + (arr: any, element: any) => { + if (seriesId === 'all') { + const sessionIds = arr.sessions.map((i: any) => i.sessionId); + const sessions = element.sessions.filter((i: any) => !sessionIds.includes(i.sessionId)); + arr.sessions.push(...sessions); + } else if (element.seriesId === seriesId) { + const sessionIds = arr.sessions.map((i: any) => i.sessionId); + const sessions = element.sessions.filter((i: any) => !sessionIds.includes(i.sessionId)); + const duplicates = element.sessions.length - sessions.length; + arr.sessions.push(...sessions); + arr.total = element.total - duplicates; + } + return arr; + }, + { sessions: [] } + ); + arr.total = + seriesId === 'all' + ? Math.max(...data.map((i: any) => i.total)) + : data.find((i: any) => i.seriesId === seriesId).total; + return arr; }; export default observer(WidgetSessions); diff --git a/frontend/app/components/Login/Login.js b/frontend/app/components/Login/Login.js index 34ca11de5..da2206bdf 100644 --- a/frontend/app/components/Login/Login.js +++ b/frontend/app/components/Login/Login.js @@ -10,6 +10,7 @@ import stl from './login.module.css'; import cn from 'classnames'; import { setJwt } from 'Duck/user'; import LoginBg from '../../svg/login-illustration.svg'; +import { ENTERPRISE_REQUEIRED } from 'App/constants'; const FORGOT_PASSWORD = forgotPassword(); const SIGNUP_ROUTE = signup(); @@ -165,7 +166,7 @@ class Login extends React.Component { ) : ( {authDetails.edition === 'ee' ? "SSO has not been configured. Please reach out to your admin." : "This feature requires an enterprise license."}
} + title={
{authDetails.edition === 'ee' ? "SSO has not been configured. Please reach out to your admin." : ENTERPRISE_REQUEIRED}
} placement="top" >