Compare commits
6 commits
main
...
player-ref
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
211852bafe | ||
|
|
2a7f9d6cd2 | ||
|
|
ecf4e0e8a2 | ||
|
|
607047f022 | ||
|
|
5a966ca3de | ||
|
|
d6cac3bfda |
5543 changed files with 177847 additions and 337006 deletions
74
.github/composite-actions/update-keys/action.yml
vendored
74
.github/composite-actions/update-keys/action.yml
vendored
|
|
@ -1,74 +0,0 @@
|
|||
name: 'Update Keys'
|
||||
description: 'Updates keys'
|
||||
inputs:
|
||||
domain_name:
|
||||
required: true
|
||||
description: 'Domain Name'
|
||||
license_key:
|
||||
required: true
|
||||
description: 'License Key'
|
||||
jwt_secret:
|
||||
required: true
|
||||
description: 'JWT Secret'
|
||||
jwt_spot_secret:
|
||||
required: true
|
||||
description: 'JWT spot Secret'
|
||||
minio_access_key:
|
||||
required: true
|
||||
description: 'MinIO Access Key'
|
||||
minio_secret_key:
|
||||
required: true
|
||||
description: 'MinIO Secret Key'
|
||||
pg_password:
|
||||
required: true
|
||||
description: 'PostgreSQL Password'
|
||||
registry_url:
|
||||
required: true
|
||||
description: 'Registry URL'
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Downloading yq
|
||||
run: |
|
||||
VERSION="v4.42.1"
|
||||
sudo wget https://github.com/mikefarah/yq/releases/download/${VERSION}/yq_linux_amd64 -O /usr/bin/yq
|
||||
sudo chmod +x /usr/bin/yq
|
||||
shell: bash
|
||||
|
||||
- name: "Updating OSS secrets"
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
vars=(
|
||||
"ASSIST_JWT_SECRET:.global.assistJWTSecret"
|
||||
"ASSIST_KEY:.global.assistKey"
|
||||
"DOMAIN_NAME:.global.domainName"
|
||||
"JWT_REFRESH_SECRET:.chalice.env.JWT_REFRESH_SECRET"
|
||||
"JWT_SECRET:.global.jwtSecret"
|
||||
"JWT_SPOT_REFRESH_SECRET:.chalice.env.JWT_SPOT_REFRESH_SECRET"
|
||||
"JWT_SPOT_SECRET:.global.jwtSpotSecret"
|
||||
"LICENSE_KEY:.global.enterpriseEditionLicense"
|
||||
"MINIO_ACCESS_KEY:.global.s3.accessKey"
|
||||
"MINIO_SECRET_KEY:.global.s3.secretKey"
|
||||
"PG_PASSWORD:.postgresql.postgresqlPassword"
|
||||
"REGISTRY_URL:.global.openReplayContainerRegistry"
|
||||
)
|
||||
for var in "${vars[@]}"; do
|
||||
IFS=":" read -r env_var yq_path <<<"$var"
|
||||
yq e -i "${yq_path} = strenv(${env_var})" vars.yaml
|
||||
done
|
||||
shell: bash
|
||||
env:
|
||||
ASSIST_JWT_SECRET: ${{ inputs.assist_jwt_secret }}
|
||||
ASSIST_KEY: ${{ inputs.assist_key }}
|
||||
DOMAIN_NAME: ${{ inputs.domain_name }}
|
||||
JWT_REFRESH_SECRET: ${{ inputs.jwt_refresh_secret }}
|
||||
JWT_SECRET: ${{ inputs.jwt_secret }}
|
||||
JWT_SPOT_REFRESH_SECRET: ${{inputs.jwt_spot_refresh_secret}}
|
||||
JWT_SPOT_SECRET: ${{ inputs.jwt_spot_secret }}
|
||||
LICENSE_KEY: ${{ inputs.license_key }}
|
||||
MINIO_ACCESS_KEY: ${{ inputs.minio_access_key }}
|
||||
MINIO_SECRET_KEY: ${{ inputs.minio_secret_key }}
|
||||
PG_PASSWORD: ${{ inputs.pg_password }}
|
||||
REGISTRY_URL: ${{ inputs.registry_url }}
|
||||
|
||||
12
.github/dependabot.yaml
vendored
12
.github/dependabot.yaml
vendored
|
|
@ -1,12 +0,0 @@
|
|||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "npm"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
target-branch: "dev"
|
||||
- package-ecosystem: "pip"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
target-branch: "dev"
|
||||
72
.github/workflows/alerts-ee.yaml
vendored
72
.github/workflows/alerts-ee.yaml
vendored
|
|
@ -3,13 +3,12 @@ on:
|
|||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_security_checks:
|
||||
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
|
||||
description: 'Skip Security checks if there is a unfixable vuln or error. Value: true/false'
|
||||
required: false
|
||||
default: "false"
|
||||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
- api-v1.10.0
|
||||
paths:
|
||||
- "ee/api/**"
|
||||
- "api/**"
|
||||
|
|
@ -26,6 +25,7 @@ on:
|
|||
- "!ee/api/requirements.txt"
|
||||
- "!ee/api/requirements-crons.txt"
|
||||
|
||||
|
||||
name: Build and Deploy Alerts EE
|
||||
|
||||
jobs:
|
||||
|
|
@ -41,25 +41,9 @@ jobs:
|
|||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.EE_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
|
|
@ -72,6 +56,7 @@ jobs:
|
|||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
|
||||
- name: Building and Pushing api image
|
||||
id: build-image
|
||||
env:
|
||||
|
|
@ -83,10 +68,10 @@ jobs:
|
|||
cd api
|
||||
PUSH_IMAGE=0 bash -x ./build_alerts.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("alerts")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
|
|
@ -106,9 +91,9 @@ jobs:
|
|||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
|
|
@ -123,16 +108,24 @@ jobs:
|
|||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/alerts/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,alerts,quickwit,connector} /tmp/charts/
|
||||
mv openreplay/charts/{ingress-nginx,alerts,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
mv /tmp/{ingress-nginx,alerts,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
|
|
@ -149,14 +142,13 @@ jobs:
|
|||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: "Build failed :bomb:"
|
||||
SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# ENVIRONMENT: staging
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# limit-access-to-actor: true
|
||||
|
|
|
|||
57
.github/workflows/alerts.yaml
vendored
57
.github/workflows/alerts.yaml
vendored
|
|
@ -3,13 +3,12 @@ on:
|
|||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_security_checks:
|
||||
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
|
||||
description: 'Skip Security checks if there is a unfixable vuln or error. Value: true/false'
|
||||
required: false
|
||||
default: "false"
|
||||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
- api-v1.10.0
|
||||
paths:
|
||||
- "api/**"
|
||||
- "!api/.gitignore"
|
||||
|
|
@ -34,25 +33,9 @@ jobs:
|
|||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
|
|
@ -65,6 +48,7 @@ jobs:
|
|||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
|
||||
- name: Building and Pushing Alerts image
|
||||
id: build-image
|
||||
env:
|
||||
|
|
@ -76,10 +60,10 @@ jobs:
|
|||
cd api
|
||||
PUSH_IMAGE=0 bash -x ./build_alerts.sh
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("alerts")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
|
|
@ -99,9 +83,9 @@ jobs:
|
|||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
|
|
@ -115,7 +99,7 @@ jobs:
|
|||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
|
||||
|
|
@ -123,16 +107,15 @@ jobs:
|
|||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/alerts/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,alerts,quickwit,connector} /tmp/charts/
|
||||
mv openreplay/charts/{ingress-nginx,alerts,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
mv /tmp/{ingress-nginx,alerts,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
|
|
@ -148,13 +131,13 @@ jobs:
|
|||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: "Build failed :bomb:"
|
||||
SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# limit-access-to-actor: true
|
||||
|
||||
|
|
|
|||
210
.github/workflows/api-ee.yaml
vendored
210
.github/workflows/api-ee.yaml
vendored
|
|
@ -3,13 +3,12 @@ on:
|
|||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_security_checks:
|
||||
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
|
||||
description: 'Skip Security checks if there is a unfixable vuln or error. Value: true/false'
|
||||
required: false
|
||||
default: "false"
|
||||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
- api-v1.10.0
|
||||
paths:
|
||||
- "ee/api/**"
|
||||
- "api/**"
|
||||
|
|
@ -23,6 +22,7 @@ on:
|
|||
- "!ee/api/*-dev.sh"
|
||||
- "!ee/api/requirements-*.txt"
|
||||
|
||||
|
||||
name: Build and Deploy Chalice EE
|
||||
|
||||
jobs:
|
||||
|
|
@ -31,129 +31,121 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.EE_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
- name: Building and Pushing api image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd api
|
||||
PUSH_IMAGE=0 bash -x ./build.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("chalice")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
- name: Building and Pushing api image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd api
|
||||
PUSH_IMAGE=0 bash -x ./build.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("chalice")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("chalice")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||
EOF
|
||||
done
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||
EOF
|
||||
done
|
||||
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/chalice/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,chalice,quickwit,connector} /tmp/charts/
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
# Update changed image tag
|
||||
sed -i "/chalice/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: ee
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: "Build failed :bomb:"
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,chalice,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,chalice,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: ee
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# limit-access-to-actor: true
|
||||
#
|
||||
|
|
|
|||
215
.github/workflows/api.yaml
vendored
215
.github/workflows/api.yaml
vendored
|
|
@ -3,13 +3,12 @@ on:
|
|||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_security_checks:
|
||||
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
|
||||
description: 'Skip Security checks if there is a unfixable vuln or error. Value: true/false'
|
||||
required: false
|
||||
default: "false"
|
||||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
- api-v1.10.0
|
||||
paths:
|
||||
- "api/**"
|
||||
- "!api/.gitignore"
|
||||
|
|
@ -25,126 +24,118 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
- name: Building and Pushing api image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd api
|
||||
PUSH_IMAGE=0 bash -x ./build.sh
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("chalice")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
- name: Building and Pushing api image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd api
|
||||
PUSH_IMAGE=0 bash -x ./build.sh
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("chalice")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("chalice")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
tag: ${image_array[1]}
|
||||
EOF
|
||||
done
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
tag: ${image_array[1]}
|
||||
EOF
|
||||
done
|
||||
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/chalice/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,chalice,quickwit,connector} /tmp/charts/
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
# Update changed image tag
|
||||
sed -i "/chalice/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: foss
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: "Build failed :bomb:"
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,chalice,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,chalice,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: foss
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}
|
||||
# ENVIRONMENT: staging
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# limit-access-to-actor: true
|
||||
|
|
|
|||
190
.github/workflows/assist-ee.yaml
vendored
190
.github/workflows/assist-ee.yaml
vendored
|
|
@ -1,19 +1,14 @@
|
|||
# This action will push the assist changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_security_checks:
|
||||
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
|
||||
required: false
|
||||
default: "false"
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
paths:
|
||||
- "ee/assist/**"
|
||||
- "assist/**"
|
||||
- "!assist/.gitignore"
|
||||
- "!assist/*-dev.sh"
|
||||
- "ee/utilities/**"
|
||||
- "utilities/**"
|
||||
- "!utilities/.gitignore"
|
||||
- "!utilities/*-dev.sh"
|
||||
|
||||
name: Build and Deploy Assist EE
|
||||
|
||||
|
|
@ -23,112 +18,103 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.EE_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- name: Building and Pushing Assist image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd assist
|
||||
PUSH_IMAGE=0 bash -x ./build.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("assist")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
- name: Building and Pushing Assist image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd utilities
|
||||
PUSH_IMAGE=0 bash -x ./build.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("assist")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("assist")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||
EOF
|
||||
done
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||
EOF
|
||||
done
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/assist/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,assist,quickwit,connector} /tmp/charts/
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
# Update changed image tag
|
||||
sed -i "/assist/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,chalice,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,chalice,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# IMAGE_TAG: ${{ github.sha }}
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# iimit-access-to-actor: true
|
||||
#
|
||||
|
|
|
|||
122
.github/workflows/assist-server-ee.yaml
vendored
122
.github/workflows/assist-server-ee.yaml
vendored
|
|
@ -1,122 +0,0 @@
|
|||
# This action will push the assist changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_security_checks:
|
||||
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
|
||||
required: false
|
||||
default: "false"
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
paths:
|
||||
- "ee/assist-server/**"
|
||||
|
||||
name: Build and Deploy Assist-Server EE
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.EE_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- name: Building and Pushing Assist-Server image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd assist-server
|
||||
PUSH_IMAGE=0 bash -x ./build.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("assist-server")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("assist-server")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||
EOF
|
||||
done
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
pwd
|
||||
cd scripts/helmcharts/
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/assist-server/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,assist-server,quickwit,connector} /tmp/charts/
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
162
.github/workflows/assist-stats.yaml
vendored
162
.github/workflows/assist-stats.yaml
vendored
|
|
@ -1,162 +0,0 @@
|
|||
# This action will push the assist-stats changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_security_checks:
|
||||
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
|
||||
required: false
|
||||
default: "false"
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
paths:
|
||||
- "assist-stats/**"
|
||||
- "!assist-stats/.gitignore"
|
||||
- "!assist-stats/*-dev.sh"
|
||||
- "!assist-stats/requirements-*.txt"
|
||||
|
||||
name: Build and Deploy Assist Stats ee
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
- name: Building and Pushing assist-stats image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd assist-stats
|
||||
PUSH_IMAGE=0 bash -x ./build.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("assist-stats")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("assist-stats")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
|
||||
### Enterprise code deployment
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontextee
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.EE_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
|
||||
- name: Deploy to kubernetes ee
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
cat <<EOF>/tmp/image_override.yaml
|
||||
assist-stats:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: ${IMAGE_TAG}
|
||||
EOF
|
||||
|
||||
export IMAGE_TAG=${IMAGE_TAG}
|
||||
# Update changed image tag
|
||||
yq '.utilities.apiCrons.assiststats.image.tag = strenv(IMAGE_TAG)' -i /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,assist-stats,quickwit,connector} /tmp/charts/
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: foss
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: "Build failed :bomb:"
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# limit-access-to-actor: true
|
||||
190
.github/workflows/assist.yaml
vendored
190
.github/workflows/assist.yaml
vendored
|
|
@ -1,18 +1,13 @@
|
|||
# This action will push the assist changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_security_checks:
|
||||
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
|
||||
required: false
|
||||
default: "false"
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
paths:
|
||||
- "assist/**"
|
||||
- "!assist/.gitignore"
|
||||
- "!assist/*-dev.sh"
|
||||
- "utilities/**"
|
||||
- "!utilities/.gitignore"
|
||||
- "!utilities/*-dev.sh"
|
||||
|
||||
name: Build and Deploy Assist
|
||||
|
||||
|
|
@ -22,112 +17,103 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- name: Building and Pushing Assist image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd assist
|
||||
PUSH_IMAGE=0 bash -x ./build.sh
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("assist")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
- name: Building and Pushing Assist image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd utilities
|
||||
PUSH_IMAGE=0 bash -x ./build.sh
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("assist")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("assist")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||
EOF
|
||||
done
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||
EOF
|
||||
done
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/assist/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.OSS_LICENSE_KEY }}\"/g" vars.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,assist,quickwit,connector} /tmp/charts/
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
# Update changed image tag
|
||||
sed -i "/assist/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,chalice,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,chalice,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# iimit-access-to-actor: true
|
||||
#
|
||||
|
|
|
|||
212
.github/workflows/crons-ee.yaml
vendored
212
.github/workflows/crons-ee.yaml
vendored
|
|
@ -3,13 +3,12 @@ on:
|
|||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_security_checks:
|
||||
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
|
||||
description: 'Skip Security checks if there is a unfixable vuln or error. Value: true/false'
|
||||
required: false
|
||||
default: "false"
|
||||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
- api-v1.10.0
|
||||
paths:
|
||||
- "ee/api/**"
|
||||
- "api/**"
|
||||
|
|
@ -34,126 +33,121 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.EE_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
- name: Building and Pushing api image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd api
|
||||
PUSH_IMAGE=0 bash -x ./build_crons.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("crons")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
- name: Building and Pushing api image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd api
|
||||
PUSH_IMAGE=0 bash -x ./build_crons.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("crons")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
env:
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
cat <<EOF>/tmp/image_override.yaml
|
||||
image: &image
|
||||
tag: "${IMAGE_TAG}"
|
||||
utilities:
|
||||
apiCrons:
|
||||
assiststats:
|
||||
image: *image
|
||||
report:
|
||||
image: *image
|
||||
sessionsCleaner:
|
||||
image: *image
|
||||
projectsStats:
|
||||
image: *image
|
||||
fixProjectsStats:
|
||||
image: *image
|
||||
EOF
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("crons")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,utilities,quickwit,connector} /tmp/charts/
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
ENVIRONMENT: staging
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||
EOF
|
||||
done
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: ee
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: "Build failed :bomb:"
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/crons/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,utilities,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,utilities,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: ee
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# iimit-access-to-actor: true
|
||||
#
|
||||
|
|
|
|||
57
.github/workflows/db-migrate.yaml
vendored
57
.github/workflows/db-migrate.yaml
vendored
|
|
@ -59,22 +59,17 @@ jobs:
|
|||
EOF
|
||||
done
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
|
||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
|
||||
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
|
||||
- name: Deploy to kubernetes foss
|
||||
if: ${{ steps.check-migration.outputs.skip_migration_oss != 'true' }}
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
|
|
@ -120,21 +115,22 @@ jobs:
|
|||
EOF
|
||||
done
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
|
||||
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.EE_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
|
||||
- name: Resetting vars file
|
||||
run: |
|
||||
git checkout -- scripts/helmcharts/vars.yaml
|
||||
- name: Deploy to kubernetes ee
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
## Update secerts
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
helm upgrade --install openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --atomic --set forceMigration=true --set dbMigrationUpstreamBranch=${IMAGE_TAG}
|
||||
|
|
@ -144,13 +140,12 @@ jobs:
|
|||
IMAGE_TAG: ${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# limit-access-to-actor: true
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
# AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
# AWS_REGION: eu-central-1
|
||||
# AWS_S3_BUCKET_NAME: ${{ secrets.AWS_S3_BUCKET_NAME }}
|
||||
|
||||
|
|
|
|||
130
.github/workflows/frontend-dev.yaml
vendored
130
.github/workflows/frontend-dev.yaml
vendored
|
|
@ -1,7 +1,7 @@
|
|||
name: Frontend Dev Deployment
|
||||
name: Frontend Dev Deployment
|
||||
on: workflow_dispatch
|
||||
# Disable previous workflows for this action.
|
||||
concurrency:
|
||||
concurrency:
|
||||
group: ${{ github.workflow }} #-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
|
|
@ -9,77 +9,73 @@ jobs:
|
|||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Cache node modules
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: node_modules
|
||||
key: ${{ runner.OS }}-build-${{ hashFiles('**/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.OS }}-build-
|
||||
${{ runner.OS }}-
|
||||
- name: Cache node modules
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: node_modules
|
||||
key: ${{ runner.OS }}-build-${{ hashFiles('**/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.OS }}-build-
|
||||
${{ runner.OS }}-
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
domain_name: ${{ secrets.DEV_DOMAIN_NAME }}
|
||||
license_key: ${{ secrets.DEV_LICENSE_KEY }}
|
||||
jwt_secret: ${{ secrets.DEV_JWT_SECRET }}
|
||||
minio_access_key: ${{ secrets.DEV_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.DEV_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.DEV_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.DEV_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.DEV_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
- name: Building and Pushing frontend image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
set -x
|
||||
cd frontend
|
||||
mv .env.sample .env
|
||||
docker run --rm -v /etc/passwd:/etc/passwd -u `id -u`:`id -g` -v $(pwd):/home/${USER} -w /home/${USER} --name node_build node:14-stretch-slim /bin/bash -c "yarn && yarn build"
|
||||
# https://github.com/docker/cli/issues/1134#issuecomment-613516912
|
||||
DOCKER_BUILDKIT=1 docker build --target=cicd -t $DOCKER_REPO/frontend:${IMAGE_TAG} .
|
||||
docker tag $DOCKER_REPO/frontend:${IMAGE_TAG} $DOCKER_REPO/frontend:${IMAGE_TAG}-ee
|
||||
docker push $DOCKER_REPO/frontend:${IMAGE_TAG}
|
||||
docker push $DOCKER_REPO/frontend:${IMAGE_TAG}-ee
|
||||
|
||||
- name: Building and Pushing frontend image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
set -x
|
||||
cd frontend
|
||||
mv .env.sample .env
|
||||
docker run --rm -v /etc/passwd:/etc/passwd -u `id -u`:`id -g` -v $(pwd):/home/${USER} -w /home/${USER} --name node_build node:20-slim /bin/bash -c "yarn && yarn build"
|
||||
# https://github.com/docker/cli/issues/1134#issuecomment-613516912
|
||||
DOCKER_BUILDKIT=1 docker build --target=cicd -t $DOCKER_REPO/frontend:${IMAGE_TAG} .
|
||||
docker tag $DOCKER_REPO/frontend:${IMAGE_TAG} $DOCKER_REPO/frontend:${IMAGE_TAG}-ee
|
||||
docker push $DOCKER_REPO/frontend:${IMAGE_TAG}
|
||||
docker push $DOCKER_REPO/frontend:${IMAGE_TAG}-ee
|
||||
- name: Deploy to kubernetes foss
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
- name: Deploy to kubernetes foss
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
set -x
|
||||
cat <<EOF>>/tmp/image_override.yaml
|
||||
frontend:
|
||||
image:
|
||||
tag: ${IMAGE_TAG}
|
||||
EOF
|
||||
|
||||
set -x
|
||||
cat <<EOF>>/tmp/image_override.yaml
|
||||
frontend:
|
||||
image:
|
||||
tag: ${IMAGE_TAG}
|
||||
EOF
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.DEV_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.DEV_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.DEV_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.DEV_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.DEV_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/frontend/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
# Update changed image tag
|
||||
sed -i "/frontend/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,frontend,quickwit,connector} /tmp/charts/
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
iMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,frontend,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,frontend,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
|
|
|
|||
234
.github/workflows/frontend.yaml
vendored
234
.github/workflows/frontend.yaml
vendored
|
|
@ -1,4 +1,4 @@
|
|||
name: Frontend Foss Deployment
|
||||
name: Frontend Foss Deployment
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
|
|
@ -7,7 +7,7 @@ on:
|
|||
paths:
|
||||
- frontend/**
|
||||
# Disable previous workflows for this action.
|
||||
concurrency:
|
||||
concurrency:
|
||||
group: ${{ github.workflow }} #-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
|
|
@ -15,145 +15,129 @@ jobs:
|
|||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Cache node modules
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
/home/runner/work/openreplay/openreplay/frontend/node_modules
|
||||
/home/runner/work/openreplay/openreplay/frontend/.yarn
|
||||
key: ${{ runner.OS }}-build-${{ hashFiles('frontend/yarn.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.OS }}-build-
|
||||
${{ runner.OS }}-
|
||||
- name: Cache node modules
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: node_modules
|
||||
key: ${{ runner.OS }}-build-${{ hashFiles('**/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.OS }}-build-
|
||||
${{ runner.OS }}-
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
- name: Building and Pushing frontend image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
set -x
|
||||
cd frontend
|
||||
mv .env.sample .env
|
||||
docker run --rm -v /etc/passwd:/etc/passwd -u `id -u`:`id -g` -v $(pwd):/home/${USER} -w /home/${USER} --name node_build node:14-stretch-slim /bin/bash -c "yarn && yarn build"
|
||||
# https://github.com/docker/cli/issues/1134#issuecomment-613516912
|
||||
DOCKER_BUILDKIT=1 docker build --target=cicd -t $DOCKER_REPO/frontend:${IMAGE_TAG} .
|
||||
docker tag $DOCKER_REPO/frontend:${IMAGE_TAG} $DOCKER_REPO/frontend:${IMAGE_TAG}-ee
|
||||
docker push $DOCKER_REPO/frontend:${IMAGE_TAG}
|
||||
docker push $DOCKER_REPO/frontend:${IMAGE_TAG}-ee
|
||||
|
||||
- name: Building and Pushing frontend image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
set -x
|
||||
cd frontend
|
||||
mv .env.sample .env
|
||||
docker run --rm -v /etc/passwd:/etc/passwd -u `id -u`:`id -g` -v $(pwd):/home/${USER} -w /home/${USER} --name node_build node:20-slim /bin/bash -c "yarn && yarn build"
|
||||
# https://github.com/docker/cli/issues/1134#issuecomment-613516912
|
||||
DOCKER_BUILDKIT=1 docker build --target=cicd -t $DOCKER_REPO/frontend:${IMAGE_TAG} .
|
||||
docker tag $DOCKER_REPO/frontend:${IMAGE_TAG} $DOCKER_REPO/frontend:${IMAGE_TAG}-ee
|
||||
docker push $DOCKER_REPO/frontend:${IMAGE_TAG}
|
||||
docker push $DOCKER_REPO/frontend:${IMAGE_TAG}-ee
|
||||
- name: Deploy to kubernetes foss
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
- name: Deploy to kubernetes foss
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
set -x
|
||||
cat <<EOF>>/tmp/image_override.yaml
|
||||
frontend:
|
||||
image:
|
||||
tag: ${IMAGE_TAG}
|
||||
EOF
|
||||
|
||||
set -x
|
||||
cat <<EOF>>/tmp/image_override.yaml
|
||||
frontend:
|
||||
image:
|
||||
tag: ${IMAGE_TAG}
|
||||
EOF
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/frontend/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
# Update changed image tag
|
||||
sed -i "/frontend/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,frontend,quickwit,connector} /tmp/charts/
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,frontend,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,frontend,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
### Enterprise code deployment
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontextee
|
||||
### Enterprise code deployment
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.EE_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontextee
|
||||
|
||||
- name: Deploy to kubernetes ee
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
cat <<EOF>/tmp/image_override.yaml
|
||||
frontend:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: ${IMAGE_TAG}
|
||||
EOF
|
||||
- name: Resetting vars file
|
||||
run: |
|
||||
git checkout -- scripts/helmcharts/vars.yaml
|
||||
- name: Deploy to kubernetes ee
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
cat <<EOF>/tmp/image_override.yaml
|
||||
frontend:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: ${IMAGE_TAG}
|
||||
EOF
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/frontend/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,frontend,quickwit,connector} /tmp/charts/
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
# Update changed image tag
|
||||
sed -i "/frontend/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,frontend,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,frontend,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# iimit-access-to-actor: true
|
||||
# AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
# AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
# AWS_REGION: eu-central-1
|
||||
# AWS_S3_BUCKET_NAME: ${{ secrets.AWS_S3_BUCKET_NAME }}
|
||||
|
|
|
|||
189
.github/workflows/patch-build-old.yaml
vendored
189
.github/workflows/patch-build-old.yaml
vendored
|
|
@ -1,189 +0,0 @@
|
|||
# Ref: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
services:
|
||||
description: 'Comma separated names of services to build(in small letters).'
|
||||
required: true
|
||||
default: 'chalice,frontend'
|
||||
tag:
|
||||
description: 'Tag to update.'
|
||||
required: true
|
||||
type: string
|
||||
branch:
|
||||
description: 'Branch to build patches from. Make sure the branch is uptodate with tag. Else itll cause missing commits.'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
name: Build patches from tag, rewrite commit HEAD to older timestamp, and Push the tag
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Build Patch from old tag
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
|
||||
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 4
|
||||
ref: ${{ github.event.inputs.tag }}
|
||||
|
||||
- name: Set Remote with GITHUB_TOKEN
|
||||
run: |
|
||||
git config --unset http.https://github.com/.extraheader
|
||||
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}.git
|
||||
|
||||
- name: Create backup tag with timestamp
|
||||
run: |
|
||||
set -e # Exit immediately if a command exits with a non-zero status
|
||||
TIMESTAMP=$(date +%Y%m%d%H%M%S)
|
||||
BACKUP_TAG="${{ github.event.inputs.tag }}-backup-${TIMESTAMP}"
|
||||
echo "BACKUP_TAG=${BACKUP_TAG}" >> $GITHUB_ENV
|
||||
echo "INPUT_TAG=${{ github.event.inputs.tag }}" >> $GITHUB_ENV
|
||||
git tag $BACKUP_TAG || { echo "Failed to create backup tag"; exit 1; }
|
||||
git push origin $BACKUP_TAG || { echo "Failed to push backup tag"; exit 1; }
|
||||
echo "Created backup tag: $BACKUP_TAG"
|
||||
|
||||
# Get the oldest commit date from the last 3 commits in raw format
|
||||
OLDEST_COMMIT_TIMESTAMP=$(git log -3 --pretty=format:"%at" | tail -1)
|
||||
echo "Oldest commit timestamp: $OLDEST_COMMIT_TIMESTAMP"
|
||||
# Add 1 second to the timestamp
|
||||
NEW_TIMESTAMP=$((OLDEST_COMMIT_TIMESTAMP + 1))
|
||||
echo "NEW_TIMESTAMP=$NEW_TIMESTAMP" >> $GITHUB_ENV
|
||||
|
||||
|
||||
- name: Setup yq
|
||||
uses: mikefarah/yq@master
|
||||
|
||||
# Configure AWS credentials for the first registry
|
||||
- name: Configure AWS credentials for RELEASE_ARM_REGISTRY
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_DEPOT_ACCESS_KEY }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_DEPOT_SECRET_KEY }}
|
||||
aws-region: ${{ secrets.AWS_DEPOT_DEFAULT_REGION }}
|
||||
|
||||
- name: Login to Amazon ECR for RELEASE_ARM_REGISTRY
|
||||
id: login-ecr-arm
|
||||
run: |
|
||||
aws ecr get-login-password --region ${{ secrets.AWS_DEPOT_DEFAULT_REGION }} | docker login --username AWS --password-stdin ${{ secrets.RELEASE_ARM_REGISTRY }}
|
||||
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ secrets.RELEASE_OSS_REGISTRY }}
|
||||
|
||||
- uses: depot/setup-action@v1
|
||||
- name: Get HEAD Commit ID
|
||||
run: echo "HEAD_COMMIT_ID=$(git rev-parse HEAD)" >> $GITHUB_ENV
|
||||
- name: Define Branch Name
|
||||
run: echo "BRANCH_NAME=${{inputs.branch}}" >> $GITHUB_ENV
|
||||
|
||||
- name: Build
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO_ARM: ${{ secrets.RELEASE_ARM_REGISTRY }}
|
||||
DOCKER_REPO_OSS: ${{ secrets.RELEASE_OSS_REGISTRY }}
|
||||
MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
|
||||
MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
|
||||
MSAAS_REPO_FOLDER: /tmp/msaas
|
||||
run: |
|
||||
set -exo pipefail
|
||||
git config --local user.email "action@github.com"
|
||||
git config --local user.name "GitHub Action"
|
||||
git checkout -b $BRANCH_NAME
|
||||
working_dir=$(pwd)
|
||||
function image_version(){
|
||||
local service=$1
|
||||
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
|
||||
current_version=$(yq eval '.AppVersion' $chart_path)
|
||||
new_version=$(echo $current_version | awk -F. '{$NF += 1 ; print $1"."$2"."$3}')
|
||||
echo $new_version
|
||||
# yq eval ".AppVersion = \"$new_version\"" -i $chart_path
|
||||
}
|
||||
function clone_msaas() {
|
||||
[ -d $MSAAS_REPO_FOLDER ] || {
|
||||
git clone -b $INPUT_TAG --recursive https://x-access-token:$MSAAS_REPO_CLONE_TOKEN@$MSAAS_REPO_URL $MSAAS_REPO_FOLDER
|
||||
cd $MSAAS_REPO_FOLDER
|
||||
cd openreplay && git fetch origin && git checkout $INPUT_TAG
|
||||
git log -1
|
||||
cd $MSAAS_REPO_FOLDER
|
||||
bash git-init.sh
|
||||
git checkout
|
||||
}
|
||||
}
|
||||
function build_managed() {
|
||||
local service=$1
|
||||
local version=$2
|
||||
echo building managed
|
||||
clone_msaas
|
||||
if [[ $service == 'chalice' ]]; then
|
||||
cd $MSAAS_REPO_FOLDER/openreplay/api
|
||||
else
|
||||
cd $MSAAS_REPO_FOLDER/openreplay/$service
|
||||
fi
|
||||
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh >> /tmp/arm.txt
|
||||
}
|
||||
# Checking for backend images
|
||||
ls backend/cmd >> /tmp/backend.txt
|
||||
echo Services: "${{ github.event.inputs.services }}"
|
||||
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
||||
BUILD_SCRIPT_NAME="build.sh"
|
||||
# Build FOSS
|
||||
for SERVICE in "${SERVICES[@]}"; do
|
||||
# Check if service is backend
|
||||
if grep -q $SERVICE /tmp/backend.txt; then
|
||||
cd backend
|
||||
foss_build_args="nil $SERVICE"
|
||||
ee_build_args="ee $SERVICE"
|
||||
else
|
||||
[[ $SERVICE == 'chalice' || $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && cd $working_dir/api || cd $SERVICE
|
||||
[[ $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && BUILD_SCRIPT_NAME="build_${SERVICE}.sh"
|
||||
ee_build_args="ee"
|
||||
fi
|
||||
version=$(image_version $SERVICE)
|
||||
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
echo IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||
IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||
if [[ "$SERVICE" != "chalice" && "$SERVICE" != "frontend" ]]; then
|
||||
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
else
|
||||
build_managed $SERVICE $version
|
||||
fi
|
||||
cd $working_dir
|
||||
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$SERVICE/Chart.yaml"
|
||||
yq eval ".AppVersion = \"$version\"" -i $chart_path
|
||||
git add $chart_path
|
||||
git commit -m "Increment $SERVICE chart version"
|
||||
done
|
||||
|
||||
- name: Change commit timestamp
|
||||
run: |
|
||||
# Convert the timestamp to a date format git can understand
|
||||
NEW_DATE=$(perl -le 'print scalar gmtime($ARGV[0])." +0000"' $NEW_TIMESTAMP)
|
||||
echo "Setting commit date to: $NEW_DATE"
|
||||
|
||||
# Amend the commit with the new date
|
||||
GIT_COMMITTER_DATE="$NEW_DATE" git commit --amend --no-edit --date="$NEW_DATE"
|
||||
|
||||
# Verify the change
|
||||
git log -1 --pretty=format:"Commit now dated: %cD"
|
||||
|
||||
# git tag and push
|
||||
git tag $INPUT_TAG -f
|
||||
git push origin $INPUT_TAG -f
|
||||
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO_ARM: ${{ secrets.RELEASE_ARM_REGISTRY }}
|
||||
# DOCKER_REPO_OSS: ${{ secrets.RELEASE_OSS_REGISTRY }}
|
||||
# MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
|
||||
# MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
|
||||
# MSAAS_REPO_FOLDER: /tmp/msaas
|
||||
# with:
|
||||
# limit-access-to-actor: true
|
||||
261
.github/workflows/patch-build.yaml
vendored
261
.github/workflows/patch-build.yaml
vendored
|
|
@ -1,261 +0,0 @@
|
|||
# Ref: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
services:
|
||||
description: 'Comma separated names of services to build(in small letters).'
|
||||
required: true
|
||||
default: 'chalice,frontend'
|
||||
|
||||
name: Build patches from main branch, Raise PR to Main, and Push to tag
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Build Patch from main
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
|
||||
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Rebase with main branch, to make sure the code has latest main changes
|
||||
if: github.ref != 'refs/heads/main'
|
||||
run: |
|
||||
git remote -v
|
||||
git config --global user.email "action@github.com"
|
||||
git config --global user.name "GitHub Action"
|
||||
git config --global rebase.autoStash true
|
||||
git fetch origin main:main
|
||||
git rebase main
|
||||
git log -3
|
||||
|
||||
- name: Downloading yq
|
||||
run: |
|
||||
VERSION="v4.42.1"
|
||||
sudo wget https://github.com/mikefarah/yq/releases/download/${VERSION}/yq_linux_amd64 -O /usr/bin/yq
|
||||
sudo chmod +x /usr/bin/yq
|
||||
|
||||
# Configure AWS credentials for the first registry
|
||||
- name: Configure AWS credentials for RELEASE_ARM_REGISTRY
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_DEPOT_ACCESS_KEY }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_DEPOT_SECRET_KEY }}
|
||||
aws-region: ${{ secrets.AWS_DEPOT_DEFAULT_REGION }}
|
||||
|
||||
- name: Login to Amazon ECR for RELEASE_ARM_REGISTRY
|
||||
id: login-ecr-arm
|
||||
run: |
|
||||
aws ecr get-login-password --region ${{ secrets.AWS_DEPOT_DEFAULT_REGION }} | docker login --username AWS --password-stdin ${{ secrets.RELEASE_ARM_REGISTRY }}
|
||||
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ secrets.RELEASE_OSS_REGISTRY }}
|
||||
|
||||
- uses: depot/setup-action@v1
|
||||
env:
|
||||
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
|
||||
- name: Get HEAD Commit ID
|
||||
run: echo "HEAD_COMMIT_ID=$(git rev-parse HEAD)" >> $GITHUB_ENV
|
||||
- name: Define Branch Name
|
||||
run: echo "BRANCH_NAME=patch/main/${HEAD_COMMIT_ID}" >> $GITHUB_ENV
|
||||
- name: Set Remote with GITHUB_TOKEN
|
||||
run: |
|
||||
git config --unset http.https://github.com/.extraheader
|
||||
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}.git
|
||||
|
||||
- name: Build
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO_ARM: ${{ secrets.RELEASE_ARM_REGISTRY }}
|
||||
DOCKER_REPO_OSS: ${{ secrets.RELEASE_OSS_REGISTRY }}
|
||||
MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
|
||||
MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
|
||||
MSAAS_REPO_FOLDER: /tmp/msaas
|
||||
SERVICES_INPUT: ${{ github.event.inputs.services }}
|
||||
run: |
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
# Configuration
|
||||
readonly WORKING_DIR=$(pwd)
|
||||
readonly BUILD_SCRIPT_NAME="build.sh"
|
||||
readonly BACKEND_SERVICES_FILE="/tmp/backend.txt"
|
||||
|
||||
# Initialize git configuration
|
||||
setup_git() {
|
||||
git config --local user.email "action@github.com"
|
||||
git config --local user.name "GitHub Action"
|
||||
git checkout -b "$BRANCH_NAME"
|
||||
}
|
||||
|
||||
# Get and increment image version
|
||||
image_version() {
|
||||
local service=$1
|
||||
local chart_path="$WORKING_DIR/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
|
||||
local current_version new_version
|
||||
|
||||
current_version=$(yq eval '.AppVersion' "$chart_path")
|
||||
new_version=$(echo "$current_version" | awk -F. '{$NF += 1; print $1"."$2"."$3}')
|
||||
echo "$new_version"
|
||||
}
|
||||
|
||||
# Clone MSAAS repository if not exists
|
||||
clone_msaas() {
|
||||
if [[ ! -d "$MSAAS_REPO_FOLDER" ]]; then
|
||||
git clone -b dev --recursive "https://x-access-token:${MSAAS_REPO_CLONE_TOKEN}@${MSAAS_REPO_URL}" "$MSAAS_REPO_FOLDER"
|
||||
cd "$MSAAS_REPO_FOLDER"
|
||||
cd openreplay && git fetch origin && git checkout main
|
||||
git log -1
|
||||
cd "$MSAAS_REPO_FOLDER"
|
||||
bash git-init.sh
|
||||
git checkout
|
||||
fi
|
||||
}
|
||||
|
||||
# Build managed services
|
||||
build_managed() {
|
||||
local service=$1
|
||||
local version=$2
|
||||
|
||||
echo "Building managed service: $service"
|
||||
clone_msaas
|
||||
|
||||
if [[ $service == 'chalice' ]]; then
|
||||
cd "$MSAAS_REPO_FOLDER/openreplay/api"
|
||||
else
|
||||
cd "$MSAAS_REPO_FOLDER/openreplay/$service"
|
||||
fi
|
||||
|
||||
local build_cmd="IMAGE_TAG=$version DOCKER_RUNTIME=depot DOCKER_BUILD_ARGS=--push ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh"
|
||||
|
||||
echo "Executing: $build_cmd"
|
||||
if ! eval "$build_cmd" 2>&1; then
|
||||
echo "Build failed for $service"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Build service with given arguments
|
||||
build_service() {
|
||||
local service=$1
|
||||
local version=$2
|
||||
local build_args=$3
|
||||
local build_script=${4:-$BUILD_SCRIPT_NAME}
|
||||
|
||||
local command="IMAGE_TAG=$version DOCKER_RUNTIME=depot DOCKER_BUILD_ARGS=--push ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash $build_script $build_args"
|
||||
echo "Executing: $command"
|
||||
eval "$command"
|
||||
}
|
||||
|
||||
# Update chart version and commit changes
|
||||
update_chart_version() {
|
||||
local service=$1
|
||||
local version=$2
|
||||
local chart_path="$WORKING_DIR/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
|
||||
|
||||
# Ensure we're in the original working directory/repository
|
||||
cd "$WORKING_DIR"
|
||||
yq eval ".AppVersion = \"$version\"" -i "$chart_path"
|
||||
git add "$chart_path"
|
||||
git commit -m "Increment $service chart version to $version"
|
||||
git push --set-upstream origin "$BRANCH_NAME"
|
||||
cd -
|
||||
}
|
||||
|
||||
# Main execution
|
||||
main() {
|
||||
setup_git
|
||||
|
||||
# Get backend services list
|
||||
ls backend/cmd >"$BACKEND_SERVICES_FILE"
|
||||
|
||||
# Parse services input (fix for GitHub Actions syntax)
|
||||
echo "Services: ${SERVICES_INPUT:-$1}"
|
||||
IFS=',' read -ra services <<<"${SERVICES_INPUT:-$1}"
|
||||
|
||||
# Process each service
|
||||
for service in "${services[@]}"; do
|
||||
echo "Processing service: $service"
|
||||
cd "$WORKING_DIR"
|
||||
|
||||
local foss_build_args="" ee_build_args="" build_script="$BUILD_SCRIPT_NAME"
|
||||
|
||||
# Determine build configuration based on service type
|
||||
if grep -q "$service" "$BACKEND_SERVICES_FILE"; then
|
||||
# Backend service
|
||||
cd backend
|
||||
foss_build_args="nil $service"
|
||||
ee_build_args="ee $service"
|
||||
else
|
||||
# Non-backend service
|
||||
case "$service" in
|
||||
chalice | alerts | crons)
|
||||
cd "$WORKING_DIR/api"
|
||||
;;
|
||||
*)
|
||||
cd "$service"
|
||||
;;
|
||||
esac
|
||||
|
||||
# Special build scripts for alerts/crons
|
||||
if [[ $service == 'alerts' || $service == 'crons' ]]; then
|
||||
build_script="build_${service}.sh"
|
||||
fi
|
||||
|
||||
ee_build_args="ee"
|
||||
fi
|
||||
|
||||
# Get version and build
|
||||
local version
|
||||
version=$(image_version "$service")
|
||||
|
||||
# Build FOSS and EE versions
|
||||
build_service "$service" "$version" "$foss_build_args"
|
||||
build_service "$service" "${version}-ee" "$ee_build_args"
|
||||
|
||||
# Build managed version for specific services
|
||||
if [[ "$service" != "chalice" && "$service" != "frontend" ]]; then
|
||||
echo "Nothing to build in managed for service $service"
|
||||
else
|
||||
build_managed "$service" "$version"
|
||||
fi
|
||||
|
||||
# Update chart and commit
|
||||
update_chart_version "$service" "$version"
|
||||
done
|
||||
cd "$WORKING_DIR"
|
||||
|
||||
# Cleanup
|
||||
rm -f "$BACKEND_SERVICES_FILE"
|
||||
}
|
||||
|
||||
echo "Working directory: $WORKING_DIR"
|
||||
# Run main function with all arguments
|
||||
main "$SERVICES_INPUT"
|
||||
|
||||
|
||||
- name: Create Pull Request
|
||||
uses: repo-sync/pull-request@v2
|
||||
with:
|
||||
github_token: ${{ secrets.ACTIONS_COMMMIT_TOKEN }}
|
||||
source_branch: ${{ env.BRANCH_NAME }}
|
||||
destination_branch: "main"
|
||||
pr_title: "Updated patch build from main ${{ env.HEAD_COMMIT_ID }}"
|
||||
pr_body: |
|
||||
This PR updates the Helm chart version after building the patch from $HEAD_COMMIT_ID.
|
||||
Once this PR is merged, tag update job will run automatically.
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO_ARM: ${{ secrets.RELEASE_ARM_REGISTRY }}
|
||||
# DOCKER_REPO_OSS: ${{ secrets.RELEASE_OSS_REGISTRY }}
|
||||
# MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
|
||||
# MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
|
||||
# MSAAS_REPO_FOLDER: /tmp/msaas
|
||||
# with:
|
||||
# limit-access-to-actor: true
|
||||
69
.github/workflows/peers-ee.yaml
vendored
Normal file
69
.github/workflows/peers-ee.yaml
vendored
Normal file
|
|
@ -0,0 +1,69 @@
|
|||
# This action will push the peers changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
paths:
|
||||
- "ee/peers/**"
|
||||
- "peers/**"
|
||||
- "!peers/.gitignore"
|
||||
- "!peers/*-dev.sh"
|
||||
|
||||
name: Build and Deploy Peers
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- name: Building and Pushing api image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
cd peers
|
||||
PUSH_IMAGE=1 bash build.sh ee
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.EE_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s#minio_access_key.*#minio_access_key: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\" #g" vars.yaml
|
||||
sed -i "s#minio_secret_key.*#minio_secret_key: \"${{ secrets.EE_MINIO_SECRET_KEY }}\" #g" vars.yaml
|
||||
sed -i "s#domain_name.*#domain_name: \"ee.openreplay.com\" #g" vars.yaml
|
||||
sed -i "s#kubeconfig.*#kubeconfig_path: ${KUBECONFIG}#g" vars.yaml
|
||||
sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml
|
||||
bash kube-install.sh --app peers
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}
|
||||
# ENVIRONMENT: staging
|
||||
#
|
||||
68
.github/workflows/peers.yaml
vendored
Normal file
68
.github/workflows/peers.yaml
vendored
Normal file
|
|
@ -0,0 +1,68 @@
|
|||
# This action will push the peers changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
paths:
|
||||
- "peers/**"
|
||||
- "!peers/.gitignore"
|
||||
- "!peers/*-dev.sh"
|
||||
|
||||
name: Build and Deploy Peers
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- name: Building and Pushing api image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
cd peers
|
||||
PUSH_IMAGE=1 bash build.sh
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s#minio_access_key.*#minio_access_key: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\" #g" vars.yaml
|
||||
sed -i "s#minio_secret_key.*#minio_secret_key: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\" #g" vars.yaml
|
||||
sed -i "s#domain_name.*#domain_name: \"foss.openreplay.com\" #g" vars.yaml
|
||||
sed -i "s#kubeconfig.*#kubeconfig_path: ${KUBECONFIG}#g" vars.yaml
|
||||
sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml
|
||||
bash kube-install.sh --app peers
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}
|
||||
# ENVIRONMENT: staging
|
||||
#
|
||||
86
.github/workflows/pr-env-delete.yaml
vendored
86
.github/workflows/pr-env-delete.yaml
vendored
|
|
@ -1,86 +0,0 @@
|
|||
name: PR-Env-Delete
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
env_origin_url:
|
||||
description: |
|
||||
URL of the origin of the PR env to be deleted. Example: https://pr-1717-ee.openreplay.tools
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
create-vcluster-pr:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
build_service: ${{ github.event.inputs.build_service }}
|
||||
env_flavour: ${{ github.event.inputs.env_flavour }}
|
||||
steps:
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.OR_PR_AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.OR_PR_AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ secrets.OR_PR_AWS_DEFAULT_REGION}}
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.PR_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
- name: Install vCluster CLI
|
||||
run: |
|
||||
# Replace with the command to install vCluster CLI
|
||||
curl -s -L "https://github.com/loft-sh/vcluster/releases/download/v0.16.4/vcluster-linux-amd64" -o /usr/local/bin/vcluster
|
||||
chmod +x /usr/local/bin/vcluster
|
||||
- name: Deleting vcluster
|
||||
run: |
|
||||
url=${{ github.event.inputs.env_origin_url }}
|
||||
# Remove the protocol part of the URL
|
||||
url_no_protocol=${url#*//}
|
||||
|
||||
# Extract the subdomain and domain
|
||||
subdomain=$(echo $url_no_protocol | cut -d"." -f1)
|
||||
domain=$(echo $url_no_protocol | cut -d"." -f2-)
|
||||
echo "subdomain=$subdomain" >> $GITHUB_ENV
|
||||
echo "domain=$domain" >> $GITHUB_ENV
|
||||
vcluster delete -n $subdomain-vcluster $subdomain-vcluster
|
||||
echo $subdomain $domain
|
||||
- name: Get LoadBalancer IP
|
||||
id: lb-ip
|
||||
run: |
|
||||
LB_IP=$(kubectl get svc ingress-ingress-nginx-controller -n default -o jsonpath='{.status.loadBalancer.ingress[0].hostname}')
|
||||
echo "::set-output name=ip::$LB_IP"
|
||||
- name: Delete dns record
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.OR_PR_AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.OR_PR_AWS_SECRET_ACCESS_KEY }}
|
||||
AWS_DEFAULT_REGION: ${{ secrets.OR_PR_AWS_DEFAULT_REGION }}
|
||||
run: |
|
||||
DOMAIN_NAME_1=$subdomain.$domain
|
||||
DOMAIN_NAME_2=$subdomain-vcluster.$domain
|
||||
|
||||
cat <<EOF > route53-changes.json
|
||||
{
|
||||
"Comment": "Create record set for VCluster",
|
||||
"Changes": [
|
||||
{
|
||||
"Action": "DELETE",
|
||||
"ResourceRecordSet": {
|
||||
"Name": "$DOMAIN_NAME_1",
|
||||
"Type": "CNAME",
|
||||
"TTL": 300,
|
||||
"ResourceRecords": [{ "Value": "${{ steps.lb-ip.outputs.ip }}" }]
|
||||
}
|
||||
},
|
||||
{
|
||||
"Action": "DELETE",
|
||||
"ResourceRecordSet": {
|
||||
"Name": "$DOMAIN_NAME_2",
|
||||
"Type": "CNAME",
|
||||
"TTL": 300,
|
||||
"ResourceRecords": [{ "Value": "${{ steps.lb-ip.outputs.ip }}" }]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
iws route53 change-resource-record-sets --hosted-zone-id ${{ secrets.OR_PR_HOSTED_ZONE_ID }} --change-batch file://route53-changes.json
|
||||
340
.github/workflows/pr-env.yaml
vendored
340
.github/workflows/pr-env.yaml
vendored
|
|
@ -1,340 +0,0 @@
|
|||
name: PR-Deployment
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
build_service:
|
||||
description: |
|
||||
Name of a single service to build(in small letters), eg: api or frontend etc. backend:sevice-name to build service.
|
||||
If what ever image is not built, it'll be deployed from latest release.
|
||||
Options: none/all/service-name/backend:{app1/app1,app2,app3/all}
|
||||
required: false
|
||||
default: none
|
||||
env_flavour:
|
||||
description: 'Which env to build. Values: foss/ee'
|
||||
required: false
|
||||
|
||||
jobs:
|
||||
create-vcluster-pr:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
build_service: ${{ github.event.inputs.build_service }}
|
||||
env_flavour: ${{ github.event.inputs.env_flavour }}
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v2
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.OR_PR_AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.OR_PR_AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ secrets.OR_PR_AWS_DEFAULT_REGION}}
|
||||
- name: Setting up env variables
|
||||
run: |
|
||||
# Fetching details open/draft PR for current branch
|
||||
PR_DATA=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
|
||||
"https://api.github.com/repos/${{ github.repository }}/pulls" \
|
||||
| jq -r --arg BRANCH "${{ github.ref_name }}" '.[] | select((.head.ref==$BRANCH) and (.state=="open") and (.draft==true or .draft==false))')
|
||||
# Extracting PR number
|
||||
PR_NUMBER=$(echo "$PR_DATA" | jq -r '.number' | head -n 1)
|
||||
if [ -z $PR_NUMBER ]; then
|
||||
echo "No PR found for ${{ github.ref_name}}"
|
||||
exit 100
|
||||
fi
|
||||
|
||||
echo "PR_NUMBER_PRE=$PR_NUMBER" >> $GITHUB_ENV
|
||||
PR_NUMBER=pr-$PR_NUMBER
|
||||
if [ $env_flavour == "ee" ]; then
|
||||
PR_NUMBER=$PR_NUMBER-ee
|
||||
fi
|
||||
echo "PR number: $PR_NUMBER"
|
||||
echo "PR_NUMBER=$PR_NUMBER" >> $GITHUB_ENV
|
||||
|
||||
# Extracting PR status (open, closed, merged)
|
||||
PR_STATUS=$(echo "$PR_DATA" | jq -r '.state' | head -n 1)
|
||||
echo "PR status: $PR_STATUS"
|
||||
echo "PR_STATUS=$PR_STATUS" >> $GITHUB_ENV
|
||||
- name: Install vCluster CLI
|
||||
run: |
|
||||
# Replace with the command to install vCluster CLI
|
||||
curl -s -L "https://github.com/loft-sh/vcluster/releases/download/v0.16.4/vcluster-linux-amd64" -o /usr/local/bin/vcluster
|
||||
chmod +x /usr/local/bin/vcluster
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.PR_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- name: Check existing vcluster
|
||||
id: vcluster_exists
|
||||
continue-on-error: true
|
||||
run: |
|
||||
if ! $(vcluster list | grep $PR_NUMBER &> /dev/null); then
|
||||
echo "no cluster found for $PR_NUMBER"
|
||||
echo "::set-output name=failed::true"
|
||||
exit 100
|
||||
fi
|
||||
DOMAIN_NAME=${PR_NUMBER}-vcluster.${{ secrets.OR_PR_DOMAIN_NAME }}
|
||||
vcluster connect ${PR_NUMBER}-vcluster --update-current=false --server=https://$DOMAIN_NAME
|
||||
mv kubeconfig.yaml /tmp/kubeconfig.yaml
|
||||
|
||||
- name: Get LoadBalancer IP
|
||||
if: steps.vcluster_exists.outputs.failed == 'true'
|
||||
id: lb-ip
|
||||
run: |
|
||||
# LB_IP=$(kubectl get svc ingress-ingress-nginx-controller -n default -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
LB_IP=$(kubectl get svc ingress-ingress-nginx-controller -n default -o jsonpath='{.status.loadBalancer.ingress[0].hostname}')
|
||||
echo "::set-output name=ip::$LB_IP"
|
||||
|
||||
- name: Create vCluster
|
||||
if: steps.vcluster_exists.outputs.failed == 'true'
|
||||
run: |
|
||||
# Replace with the actual command to create a vCluster
|
||||
pwd
|
||||
cd scripts/pr-env/
|
||||
bash create.sh ${PR_NUMBER}.${{ secrets.OR_PR_DOMAIN_NAME }}
|
||||
cp kubeconfig.yaml /tmp/
|
||||
|
||||
- name: Update AWS Route53 Record
|
||||
if: steps.vcluster_exists.outputs.failed == 'true'
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.OR_PR_AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.OR_PR_AWS_SECRET_ACCESS_KEY }}
|
||||
AWS_DEFAULT_REGION: ${{ secrets.OR_PR_AWS_DEFAULT_REGION }}
|
||||
run: |
|
||||
DOMAIN_NAME_1=$PR_NUMBER-vcluster.${{ secrets.OR_PR_DOMAIN_NAME }}
|
||||
DOMAIN_NAME_2=$PR_NUMBER.${{ secrets.OR_PR_DOMAIN_NAME }}
|
||||
|
||||
cat <<EOF > route53-changes.json
|
||||
{
|
||||
"Comment": "Create record set for VCluster",
|
||||
"Changes": [
|
||||
{
|
||||
"Action": "CREATE",
|
||||
"ResourceRecordSet": {
|
||||
"Name": "$DOMAIN_NAME_1",
|
||||
"Type": "CNAME",
|
||||
"TTL": 300,
|
||||
"ResourceRecords": [{ "Value": "${{ steps.lb-ip.outputs.ip }}" }]
|
||||
}
|
||||
},
|
||||
{
|
||||
"Action": "CREATE",
|
||||
"ResourceRecordSet": {
|
||||
"Name": "$DOMAIN_NAME_2",
|
||||
"Type": "CNAME",
|
||||
"TTL": 300,
|
||||
"ResourceRecords": [{ "Value": "${{ steps.lb-ip.outputs.ip }}" }]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
#
|
||||
NEW_IP=${{ steps.lb-ip.outputs.ip }}
|
||||
|
||||
# Get the current IP address associated with the domain
|
||||
CURRENT_IP=$(dig +short $DOMAIN_NAME_1 @1.1.1.1)
|
||||
echo "current ip: $CURRENT_IP"
|
||||
# Check if the domain has no IP association or if the IPs are different
|
||||
if [ -z "$CURRENT_IP" ] || [ "$CURRENT_IP" != "$NEW_IP" ]; then
|
||||
aws route53 change-resource-record-sets --hosted-zone-id ${{ secrets.OR_PR_HOSTED_ZONE_ID }} --change-batch file://route53-changes.json
|
||||
fi
|
||||
|
||||
|
||||
- name: Wait for DNS Propagation
|
||||
if: steps.vcluster_exists.outputs.failed == 'true'
|
||||
env:
|
||||
EXPECTED_IP: ${{ steps.lb-ip.outputs.ip }}
|
||||
run: |
|
||||
DOMAIN_NAME="$PR_NUMBER-vcluster.${{ secrets.OR_PR_DOMAIN_NAME }}"
|
||||
MAX_ATTEMPTS=30
|
||||
attempt=1
|
||||
until [[ $attempt -gt $MAX_ATTEMPTS ]]
|
||||
do
|
||||
# Use dig to query DNS records
|
||||
DNS_RESULT=$(dig +short $DOMAIN_NAME @1.1.1.1)
|
||||
|
||||
# Check if DNS result is empty
|
||||
if [ -z "$DNS_RESULT" ]; then
|
||||
echo "No IP or CNAME records found for $DOMAIN_NAME."
|
||||
else
|
||||
echo "DNS records found for $DOMAIN_NAME:"
|
||||
echo "$DNS_RESULT"
|
||||
break
|
||||
fi
|
||||
echo "Waiting for DNS propagation... Attempt $attempt of $MAX_ATTEMPTS"
|
||||
((attempt++))
|
||||
sleep 20
|
||||
done
|
||||
|
||||
if [[ $attempt -gt $MAX_ATTEMPTS ]]; then
|
||||
echo "DNS propagation check failed for $DOMAIN_NAME after $MAX_ATTEMPTS attempts."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Install openreplay
|
||||
if: steps.vcluster_exists.outputs.failed == 'true'
|
||||
env:
|
||||
KUBECONFIG: /tmp/kubeconfig.yaml
|
||||
run: |
|
||||
DOMAIN_NAME=$PR_NUMBER.${{ secrets.OR_PR_DOMAIN_NAME }}
|
||||
cd scripts/helmcharts
|
||||
sed -i "s/domainName: \"\"/domainName: \"${DOMAIN_NAME}\"/g" vars.yaml
|
||||
# If ee cluster, enable the following
|
||||
if [ $env_flavour == "ee" ]; then
|
||||
# Explanation for the sed command:
|
||||
# /clickhouse:/: Matches lines containing "clickhouse:".
|
||||
# {:a: Starts a block with label 'a'.
|
||||
# n;: Reads the next line.
|
||||
# /enabled:/s/false/true/: If the line contains 'enabled:', replace 'false' with 'true'.
|
||||
# t done;: If the substitution was made, branch to label 'done'.
|
||||
# ba;: Go back to label 'a' if no substitution was made.
|
||||
# :done}: Label 'done', where the script goes after a successful substitution.
|
||||
sed -i '/clickhouse:/{:a;n;/enabled:/s/false/true/;t done; ba; :done}' vars.yaml
|
||||
sed -i '/kafka:/{:a;n;/# enabled:/s/# enabled: .*/enabled: true/;t done; ba; :done}' vars.yaml
|
||||
sed -i '/redis:/{:a;n;/enabled:/s/true/false/;t done; ba; :done}' vars.yaml
|
||||
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${DOMAIN_NAME}\"/g" vars.yaml
|
||||
fi
|
||||
helm upgrade -i databases -n db ./databases -f vars.yaml --create-namespace --wait -f ../pr-env/resources.yaml
|
||||
helm upgrade -i openreplay -n app ./openreplay -f vars.yaml --create-namespace --set ingress-nginx.enabled=false -f ../pr-env/resources.yaml --wait
|
||||
|
||||
- name: Build and deploy application
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
env: ${{ github.event.inputs.env_flavour }}
|
||||
run: |
|
||||
set -x
|
||||
|
||||
app_name=${{github.event.inputs.build_service}}
|
||||
echo "building and deploying $app_name"
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
export KUBECONFIG=/tmp/kubeconfig.yaml
|
||||
|
||||
function build_and_deploy {
|
||||
apps_to_build=$1
|
||||
case $apps_to_build in
|
||||
backend*)
|
||||
echo "Building backend build"
|
||||
cd $GITHUB_WORKSPACE/backend
|
||||
components=()
|
||||
if [ $apps_to_build == "backend:all" ]; then
|
||||
# Append all folder names from 'cmd/' directory to the array
|
||||
for folder in cmd/*/; do
|
||||
# Use basename to extract the folder name without path
|
||||
folder_name=$(basename "$folder")
|
||||
components+=("$folder_name")
|
||||
done
|
||||
else
|
||||
# "${apps_to_build#*:}" :: Strip backend: and output app1,app2,app3 to read -ra
|
||||
IFS=',' read -ra components <<< "${apps_to_build#*:}"
|
||||
fi
|
||||
echo "Building components: " ${components[@]}
|
||||
for component in "${components[@]}"; do
|
||||
if [ $(docker manifest inspect ${DOCKER_REPO}/$component:${IMAGE_TAG} > /dev/null) ]; then
|
||||
echo Image present upstream. Skipping build: $component
|
||||
else
|
||||
echo "Building backend:$component"
|
||||
PUSH_IMAGE=1 bash -x ./build.sh $env $component
|
||||
fi
|
||||
kubectl set image -n app deployment/$component-openreplay $component=${DOCKER_REPO}/$component:${IMAGE_TAG}
|
||||
done
|
||||
;;
|
||||
chalice|api)
|
||||
echo "Chalice build"
|
||||
component=chalice
|
||||
cd $GITHUB_WORKSPACE/api || (Nothing to build: $component; exit 100)
|
||||
if [ $(docker manifest inspect ${DOCKER_REPO}/$component:${IMAGE_TAG} > /dev/null) ]; then
|
||||
echo Image present upstream. Skipping build: $component
|
||||
else
|
||||
echo "Building backend:$component"
|
||||
PUSH_IMAGE=1 bash -x ./build.sh $env $component
|
||||
fi
|
||||
kubectl set image -n app deployment/$component-openreplay $component=${DOCKER_REPO}/$component:${IMAGE_TAG}
|
||||
;;
|
||||
*)
|
||||
echo "$apps_to_build build"
|
||||
cd $GITHUB_WORKSPACE/$apps_to_build || (Nothing to build: $apps_to_build; exit 100)
|
||||
component=$apps_to_build
|
||||
if [ $(docker manifest inspect ${DOCKER_REPO}/$component:${IMAGE_TAG} > /dev/null) ]; then
|
||||
echo Image present upstream. Skipping build: $component
|
||||
else
|
||||
echo "Building backend:$component"
|
||||
PUSH_IMAGE=1 bash -x ./build.sh $env $component
|
||||
fi
|
||||
kubectl set image -n app deployment/$apps_to_build-openreplay $apps_to_build=${DOCKER_REPO}/$apps_to_build:${IMAGE_TAG}
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
case $app_name in
|
||||
all)
|
||||
build_and_deploy "backend:all"
|
||||
build_and_deploy "frontend"
|
||||
build_and_deploy "chalice"
|
||||
build_and_deploy "sourcemapreader"
|
||||
build_and_deploy "assist-stats"
|
||||
;;
|
||||
none)
|
||||
echo "Nothing to build"
|
||||
;;
|
||||
*)
|
||||
build_and_deploy $app_name
|
||||
;;
|
||||
esac
|
||||
|
||||
- name: Sent results to slack
|
||||
if: steps.vcluster_exists.outputs.failed == 'true'
|
||||
env:
|
||||
SLACK_BOT_TOKEN: ${{ secrets.OR_PR_SLACK_BOT_TOKEN }}
|
||||
SLACK_CHANNEL: ${{ secrets.OR_PR_SLACK_CHANNEL }}
|
||||
run: |
|
||||
echo hi ${{ steps.vcluster_exists.outputs.failed }}
|
||||
DOMAIN_NAME=https://$PR_NUMBER.${{ secrets.OR_PR_DOMAIN_NAME }}
|
||||
|
||||
# Variables
|
||||
PR_NUMBER=https://github.com/${{ github.repository }}/pull/${PR_NUMBER_PRE}
|
||||
BRANCH_NAME=${{ github.ref_name }}
|
||||
ORIGIN=$DOMAIN_NAME
|
||||
ASSETS_HOST=$DOMAIN_NAME/assets
|
||||
API_EDP=$DOMAIN_NAME/api
|
||||
INGEST_POINT=$DOMAIN_NAME/ingest
|
||||
|
||||
# File to be uploaded
|
||||
FILE_PATH="/tmp/kubeconfig.yaml"
|
||||
if [! -f $FILE_PATH ]; then
|
||||
echo "Kubeconfig file not found: $FILE_PATH"
|
||||
exit 100
|
||||
fi
|
||||
|
||||
# Form the message payload
|
||||
PAYLOAD=$(cat <<EOF
|
||||
{
|
||||
"channel": "$SLACK_CHANNEL",
|
||||
"text": "Deployment Information:\n- PR#: $PR_NUMBER\n- PR Status: $PR_STATUS\n- Branch Name: $BRANCH_NAME\n- Origin: $ORIGIN\n- Assets Host: $ASSETS_HOST\n- API Endpoint: $API_EDP\n- Ingest Point: $INGEST_POINT\n- To use the cluster: download the following file and run the following commands, \n export KUBECONFIG=/path/to/kubeconfig.yaml\n k9s"
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
# Send the message to Slack
|
||||
curl -X POST -H "Authorization: Bearer $SLACK_BOT_TOKEN" -H 'Content-type: application/json' --data "$PAYLOAD" https://slack.com/api/chat.postMessage > /dev/null
|
||||
|
||||
# Upload the file to Slack
|
||||
curl -F file=@"$FILE_PATH" -F channels="$SLACK_CHANNEL" -F token="$SLACK_BOT_TOKEN" https://slack.com/api/files.upload > /dev/null
|
||||
|
||||
# - name: Cleanup
|
||||
# if: always()
|
||||
# run: |
|
||||
# # Add any cleanup commands if necessary
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# iimit-access-to-actor: true
|
||||
103
.github/workflows/release-deployment.yaml
vendored
103
.github/workflows/release-deployment.yaml
vendored
|
|
@ -1,103 +0,0 @@
|
|||
name: Release Deployment
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
services:
|
||||
description: 'Comma-separated list of services to deploy. eg: frontend,api,sink'
|
||||
required: true
|
||||
branch:
|
||||
description: 'Branch to deploy (defaults to dev)'
|
||||
required: false
|
||||
default: 'dev'
|
||||
|
||||
env:
|
||||
IMAGE_REGISTRY_URL: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
||||
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.inputs.branch }}
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login $IMAGE_REGISTRY_URL -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
|
||||
- name: Set image tag with branch info
|
||||
run: |
|
||||
SHORT_SHA=$(git rev-parse --short HEAD)
|
||||
echo "IMAGE_TAG=${{ github.event.inputs.branch }}-${SHORT_SHA}" >> $GITHUB_ENV
|
||||
echo "Using image tag: $IMAGE_TAG"
|
||||
|
||||
- uses: depot/setup-action@v1
|
||||
|
||||
- name: Build and push Docker images
|
||||
run: |
|
||||
# Parse the comma-separated services list into an array
|
||||
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
||||
working_dir=$(pwd)
|
||||
|
||||
# Define backend services (consider moving this to workflow inputs or repo config)
|
||||
ls backend/cmd >> /tmp/backend.txt
|
||||
BUILD_SCRIPT_NAME="build.sh"
|
||||
|
||||
for SERVICE in "${SERVICES[@]}"; do
|
||||
# Check if service is backend
|
||||
if grep -q $SERVICE /tmp/backend.txt; then
|
||||
cd $working_dir/backend
|
||||
foss_build_args="nil $SERVICE"
|
||||
ee_build_args="ee $SERVICE"
|
||||
else
|
||||
cd $working_dir
|
||||
[[ $SERVICE == 'chalice' || $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && cd $working_dir/api || cd $SERVICE
|
||||
[[ $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && BUILD_SCRIPT_NAME="build_${SERVICE}.sh"
|
||||
ee_build_args="ee"
|
||||
fi
|
||||
{
|
||||
echo IMAGE_TAG=$IMAGE_TAG DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
IMAGE_TAG=$IMAGE_TAG DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
}&
|
||||
{
|
||||
echo IMAGE_TAG=${IMAGE_TAG}-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||
IMAGE_TAG=${IMAGE_TAG}-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||
}&
|
||||
done
|
||||
wait
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
name: Using ee release cluster
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_RELEASE_KUBECONFIG }}
|
||||
|
||||
- name: Deploy to ee release Kubernetes
|
||||
run: |
|
||||
echo "Deploying services to EE cluster: ${{ github.event.inputs.services }}"
|
||||
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
||||
for SERVICE in "${SERVICES[@]}"; do
|
||||
SERVICE=$(echo $SERVICE | xargs) # Trim whitespace
|
||||
echo "Deploying $SERVICE to EE cluster with image tag: ${IMAGE_TAG}"
|
||||
kubectl set image deployment/$SERVICE-openreplay -n app $SERVICE=${IMAGE_REGISTRY_URL}/$SERVICE:${IMAGE_TAG}-ee
|
||||
done
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
name: Using foss release cluster
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.FOSS_RELEASE_KUBECONFIG }}
|
||||
|
||||
- name: Deploy to FOSS release Kubernetes
|
||||
run: |
|
||||
echo "Deploying services to FOSS cluster: ${{ github.event.inputs.services }}"
|
||||
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
||||
for SERVICE in "${SERVICES[@]}"; do
|
||||
SERVICE=$(echo $SERVICE | xargs) # Trim whitespace
|
||||
echo "Deploying $SERVICE to FOSS cluster with image tag: ${IMAGE_TAG}"
|
||||
echo "Deploying $SERVICE to FOSS cluster with image tag: ${IMAGE_TAG}"
|
||||
kubectl set image deployment/$SERVICE-openreplay -n app $SERVICE=${IMAGE_REGISTRY_URL}/$SERVICE:${IMAGE_TAG}
|
||||
done
|
||||
150
.github/workflows/sourcemaps-reader-ee.yaml
vendored
150
.github/workflows/sourcemaps-reader-ee.yaml
vendored
|
|
@ -1,150 +0,0 @@
|
|||
# This action will push the sourcemapreader changes to ee
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_security_checks:
|
||||
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
|
||||
required: false
|
||||
default: "false"
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
paths:
|
||||
- "ee/sourcemap-reader/**"
|
||||
- "sourcemap-reader/**"
|
||||
- "!sourcemap-reader/.gitignore"
|
||||
- "!sourcemap-reader/*-dev.sh"
|
||||
|
||||
name: Build and Deploy sourcemap-reader EE
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.EE_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
- name: Building and Pushing sourcemaps-reader image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd sourcemap-reader
|
||||
PUSH_IMAGE=0 bash -x ./build.sh
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("sourcemaps-reader")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("sourcemaps-reader")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
tag: ${image_array[1]}
|
||||
EOF
|
||||
done
|
||||
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/sourcemaps-reader/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
sed -i "s/sourcemaps-reader/sourcemapreader/g" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,sourcemapreader,quickwit,connector} /tmp/charts/
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: ee
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: "Build failed :bomb:"
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# limit-access-to-actor: true
|
||||
186
.github/workflows/sourcemaps-reader.yaml
vendored
186
.github/workflows/sourcemaps-reader.yaml
vendored
|
|
@ -1,11 +1,6 @@
|
|||
# This action will push the sourcemapreader changes to aws
|
||||
# This action will push the chalice changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_security_checks:
|
||||
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
|
||||
required: false
|
||||
default: "false"
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
|
|
@ -22,128 +17,89 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
- name: Building and Pushing sourcemaps-reader image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd sourcemap-reader
|
||||
PUSH_IMAGE=0 bash -x ./build.sh
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("sourcemaps-reader")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("sourcemaps-reader")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
- name: Building and Pushing sourcemaps-reader image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
cd sourcemap-reader
|
||||
PUSH_IMAGE=1 bash build.sh
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
tag: ${image_array[1]}
|
||||
EOF
|
||||
done
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
tag: ${image_array[1]}
|
||||
EOF
|
||||
done
|
||||
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/sourcemaps-reader/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
sed -i "s/sourcemaps-reader/sourcemapreader/g" /tmp/image_override.yaml
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,sourcemapreader,quickwit,connector} /tmp/charts/
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
# Update changed image tag
|
||||
sed -i "/chalice/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: foss
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: "Build failed :bomb:"
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,chalice,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,chalice,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# limit-access-to-actor: true
|
||||
#
|
||||
|
|
|
|||
67
.github/workflows/tracker-tests.yaml
vendored
67
.github/workflows/tracker-tests.yaml
vendored
|
|
@ -1,67 +0,0 @@
|
|||
# Checking unit tests for tracker and assist
|
||||
name: Tracker tests
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches: [ "main", "dev" ]
|
||||
paths:
|
||||
- tracker/**
|
||||
pull_request:
|
||||
branches: [ "dev", "main" ]
|
||||
paths:
|
||||
- tracker/**
|
||||
jobs:
|
||||
build-and-test:
|
||||
runs-on: macos-latest
|
||||
name: Build and test Tracker
|
||||
steps:
|
||||
- uses: oven-sh/setup-bun@v2
|
||||
with:
|
||||
bun-version: latest
|
||||
- uses: actions/checkout@v3
|
||||
- name: Cache tracker modules
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: tracker/tracker/node_modules
|
||||
key: ${{ runner.OS }}-test_tracker_build-${{ hashFiles('**/bun.lockb') }}
|
||||
restore-keys: |
|
||||
test_tracker_build{{ runner.OS }}-build-
|
||||
test_tracker_build{{ runner.OS }}-
|
||||
- name: Cache tracker-assist modules
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: tracker/tracker-assist/node_modules
|
||||
key: ${{ runner.OS }}-test_tracker_build-${{ hashFiles('**/bun.lockb') }}
|
||||
restore-keys: |
|
||||
test_tracker_build{{ runner.OS }}-build-
|
||||
test_tracker_build{{ runner.OS }}-
|
||||
- name: Setup Testing packages
|
||||
run: |
|
||||
cd tracker/tracker
|
||||
bun install
|
||||
- name: Jest tests
|
||||
run: |
|
||||
cd tracker/tracker
|
||||
bun run test:ci
|
||||
- name: Building test
|
||||
run: |
|
||||
cd tracker/tracker
|
||||
bun run build
|
||||
- name: (TA) Setup Testing packages
|
||||
run: |
|
||||
cd tracker/tracker-assist
|
||||
bun install
|
||||
- name: (TA) Jest tests
|
||||
run: |
|
||||
cd tracker/tracker-assist
|
||||
bun run test:ci
|
||||
- name: (TA) Building test
|
||||
run: |
|
||||
cd tracker/tracker-assist
|
||||
bun run build
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: tracker
|
||||
iame: tracker
|
||||
76
.github/workflows/ui-tests.js.yml
vendored
76
.github/workflows/ui-tests.js.yml
vendored
|
|
@ -27,54 +27,54 @@ jobs:
|
|||
name: Build and test Tracker plus Replayer
|
||||
strategy:
|
||||
matrix:
|
||||
node-version: [ 20.x ]
|
||||
node-version: [ 16.x ]
|
||||
steps:
|
||||
- uses: oven-sh/setup-bun@v2
|
||||
with:
|
||||
bun-version: latest
|
||||
- uses: actions/checkout@v3
|
||||
- name: Use Node.js ${{ matrix.node-version }}
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: ${{ matrix.node-version }}
|
||||
- name: Cache tracker modules
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: tracker/tracker/node_modules
|
||||
key: ${{ runner.OS }}-test_tracker_build-${{ hashFiles('tracker/tracker/bun.lockb') }}
|
||||
key: ${{ runner.OS }}-test_tracker_build-${{ hashFiles('**/yarn.lock') }}
|
||||
restore-keys: |
|
||||
test_tracker_build-{{ runner.OS }}-build-
|
||||
test_tracker_build-{{ runner.OS }}-
|
||||
test_tracker_build{{ runner.OS }}-build-
|
||||
test_tracker_build{{ runner.OS }}-
|
||||
- name: Setup Testing packages
|
||||
run: |
|
||||
cd tracker/tracker
|
||||
bun install
|
||||
npm i -g yarn
|
||||
yarn
|
||||
- name: Jest tests
|
||||
run: |
|
||||
cd tracker/tracker
|
||||
yarn test
|
||||
- name: Build tracker inst
|
||||
run: |
|
||||
cd tracker/tracker
|
||||
bun run build
|
||||
yarn build
|
||||
- name: Setup Testing UI Env
|
||||
run: |
|
||||
cd tracker/tracker-testing-playground
|
||||
echo "REACT_APP_KEY=$FOSS_PROJECT_KEY" >> .env
|
||||
echo "REACT_APP_INGEST=$FOSS_INGEST" >> .env
|
||||
- name: Cache testing UI node modules
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: tracker/tracker-testing-playground/node_modules
|
||||
key: ${{ runner.OS }}-build-${{ hashFiles('**/yarn.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.OS }}-build-
|
||||
${{ runner.OS }}-
|
||||
- name: Setup Testing packages
|
||||
run: |
|
||||
cd tracker/tracker-testing-playground
|
||||
yarn
|
||||
- name: Wait for Testing Frontend
|
||||
run: |
|
||||
cd tracker/tracker-testing-playground
|
||||
yarn start &> ui.log &
|
||||
npx wait-on http://localhost:3000
|
||||
cd ../../frontend
|
||||
- name: Cache node modules
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: frontend/node_modules
|
||||
key: ${{ runner.OS }}-build-${{ hashFiles('frontend/yarn.lock') }}
|
||||
key: ${{ runner.OS }}-build-${{ hashFiles('**/yarn.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.OS }}-build-
|
||||
${{ runner.OS }}-
|
||||
|
|
@ -95,10 +95,11 @@ jobs:
|
|||
echo "MINIO_USE_SSL = ''" >> .env
|
||||
echo "MINIO_ACCESS_KEY = ''" >> .env
|
||||
echo "MINIO_SECRET_KEY = ''" >> .env
|
||||
echo "VERSION = '1.15.0'" >> .env
|
||||
echo "TRACKER_VERSION = '10.0.0'" >> .env
|
||||
echo "VERSION = '1.9.0'" >> .env
|
||||
echo "TRACKER_VERSION = '4.0.0'" >> .env
|
||||
echo "COMMIT_HASH = 'dev'" >> .env
|
||||
echo "{ \"account\": \"$CY_ACC\", \"password\": \"$CY_PASS\" }" >> cypress.env.json
|
||||
|
||||
- name: Setup packages
|
||||
run: |
|
||||
cd frontend
|
||||
|
|
@ -106,35 +107,15 @@ jobs:
|
|||
- name: Run unit tests
|
||||
run: |
|
||||
cd frontend
|
||||
yarn test:ci
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: ui
|
||||
name: ui
|
||||
- name: Run testing frontend
|
||||
run: |
|
||||
cd tracker/tracker-testing-playground
|
||||
yarn start &> testing.log &
|
||||
echo "Started"
|
||||
npm i -g wait-on
|
||||
echo "Got wait on"
|
||||
sleep 30
|
||||
cat testing.log
|
||||
npx wait-on http://localhost:3000
|
||||
echo "Done"
|
||||
timeout-minutes: 4
|
||||
yarn test
|
||||
- name: Run Frontend
|
||||
run: |
|
||||
cd frontend
|
||||
bun start &> frontend.log &
|
||||
echo "Started"
|
||||
sleep 30
|
||||
cat frontend.log
|
||||
yarn start &> frontend.log &
|
||||
- name: Wait for frontend
|
||||
run: |
|
||||
cd frontend
|
||||
npx wait-on http://0.0.0.0:3333
|
||||
echo "Done"
|
||||
timeout-minutes: 4
|
||||
- name: (Chrome) Run visual tests
|
||||
run: |
|
||||
cd frontend
|
||||
|
|
@ -144,7 +125,6 @@ jobs:
|
|||
# run: yarn cy:test-firefox
|
||||
# - name: (Edge) Run visual tests
|
||||
# run: yarn cy:test-edge
|
||||
timeout-minutes: 5
|
||||
- name: Upload Debug
|
||||
if: ${{ failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
|
|
|
|||
42
.github/workflows/update-tag.yaml
vendored
42
.github/workflows/update-tag.yaml
vendored
|
|
@ -1,42 +0,0 @@
|
|||
on:
|
||||
pull_request:
|
||||
types: [closed]
|
||||
branches:
|
||||
- main
|
||||
name: Release tag update --force
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Build Patch from main
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ (github.event_name == 'pull_request' && github.event.pull_request.merged == true) || github.event.inputs.services == 'true' }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Get latest release tag using GitHub API
|
||||
id: get-latest-tag
|
||||
run: |
|
||||
LATEST_TAG=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
|
||||
"https://api.github.com/repos/${{ github.repository }}/releases/latest" \
|
||||
| jq -r .tag_name)
|
||||
|
||||
# Fallback to git command if API doesn't return a tag
|
||||
if [ "$LATEST_TAG" == "null" ] || [ -z "$LATEST_TAG" ]; then
|
||||
echo "Not found latest tag"
|
||||
exit 100
|
||||
fi
|
||||
|
||||
echo "LATEST_TAG=$LATEST_TAG" >> $GITHUB_ENV
|
||||
echo "Latest tag: $LATEST_TAG"
|
||||
|
||||
- name: Set Remote with GITHUB_TOKEN
|
||||
run: |
|
||||
git config --unset http.https://github.com/.extraheader
|
||||
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}
|
||||
|
||||
- name: Push main branch to tag
|
||||
run: |
|
||||
git checkout main
|
||||
echo "Updating tag ${{ env.LATEST_TAG }} to point to latest commit on main"
|
||||
git push origin HEAD:refs/tags/${{ env.LATEST_TAG }} --force
|
||||
290
.github/workflows/workers-ee.yaml
vendored
290
.github/workflows/workers-ee.yaml
vendored
|
|
@ -6,14 +6,14 @@ on:
|
|||
build_service:
|
||||
description: 'Name of a single service to build(in small letters). "all" to build everything'
|
||||
required: false
|
||||
default: "false"
|
||||
default: 'false'
|
||||
skip_security_checks:
|
||||
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
|
||||
description: 'Skip Security checks if there is a unfixable vuln or error. Value: true/false'
|
||||
required: false
|
||||
default: "false"
|
||||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
- dev
|
||||
paths:
|
||||
- ee/backend/**
|
||||
- backend/**
|
||||
|
|
@ -26,168 +26,154 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
# ref: staging
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
# ref: staging
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.EE_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- name: Downloading yq
|
||||
run: |
|
||||
VERSION="v4.42.1"
|
||||
sudo wget https://github.com/mikefarah/yq/releases/download/${VERSION}/yq_linux_amd64 -O /usr/bin/yq
|
||||
sudo chmod +x /usr/bin/yq
|
||||
# # Caching docker images
|
||||
# - uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# # Ignore the failure of a step and avoid terminating the job.
|
||||
# continue-on-error: true
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
# # Caching docker images
|
||||
# - uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# # Ignore the failure of a step and avoid terminating the job.
|
||||
# continue-on-error: true
|
||||
|
||||
- name: Build, tag
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
#
|
||||
# TODO: Check the container tags are same, then skip the build and deployment.
|
||||
#
|
||||
# Build a docker container and push it to Docker Registry so that it can be deployed to Kubernetes cluster.
|
||||
#
|
||||
# Getting the images to build
|
||||
#
|
||||
set -x
|
||||
touch /tmp/images_to_build.txt
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
tmp_param=${{ github.event.inputs.build_service }}
|
||||
build_param=${tmp_param:-'false'}
|
||||
case ${build_param} in
|
||||
false)
|
||||
{
|
||||
git diff --name-only HEAD HEAD~1 | grep -E "backend/pkg|backend/internal" | grep -vE ^ee/ | cut -d '/' -f3 | uniq | while read -r pkg_name ; do
|
||||
grep -rl "pkg/$pkg_name" backend/services backend/cmd | cut -d '/' -f3
|
||||
done
|
||||
} | awk '!seen[$0]++' > /tmp/images_to_build.txt
|
||||
- name: Build, tag
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
#
|
||||
# TODO: Check the container tags are same, then skip the build and deployment.
|
||||
#
|
||||
# Build a docker container and push it to Docker Registry so that it can be deployed to Kubernetes cluster.
|
||||
#
|
||||
# Getting the images to build
|
||||
#
|
||||
set -x
|
||||
touch /tmp/images_to_build.txt
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
tmp_param=${{ github.event.inputs.build_service }}
|
||||
build_param=${tmp_param:-'false'}
|
||||
case ${build_param} in
|
||||
false)
|
||||
{
|
||||
git diff --name-only HEAD HEAD~1 | grep -E "backend/pkg|backend/internal" | grep -vE ^ee/ | cut -d '/' -f3 | uniq | while read -r pkg_name ; do
|
||||
grep -rl "pkg/$pkg_name" backend/services backend/cmd | cut -d '/' -f3
|
||||
done
|
||||
} | awk '!seen[$0]++' > /tmp/images_to_build.txt
|
||||
;;
|
||||
all)
|
||||
ls backend/cmd > /tmp/images_to_build.txt
|
||||
;;
|
||||
all)
|
||||
ls backend/cmd > /tmp/images_to_build.txt
|
||||
;;
|
||||
*)
|
||||
echo ${{github.event.inputs.build_service }} > /tmp/images_to_build.txt
|
||||
;;
|
||||
esac
|
||||
*)
|
||||
echo ${{github.event.inputs.build_service }} > /tmp/images_to_build.txt
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ $(cat /tmp/images_to_build.txt) == "" ]]; then
|
||||
echo "Nothing to build here"
|
||||
touch /tmp/nothing-to-build-here
|
||||
exit 0
|
||||
fi
|
||||
#
|
||||
# Pushing image to registry
|
||||
#
|
||||
cd backend
|
||||
cat /tmp/images_to_build.txt
|
||||
for image in $(cat /tmp/images_to_build.txt);
|
||||
do
|
||||
echo "Bulding $image"
|
||||
PUSH_IMAGE=0 bash -x ./build.sh ee $image
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
if [[ $(cat /tmp/images_to_build.txt) == "" ]]; then
|
||||
echo "Nothing to build here"
|
||||
touch /tmp/nothing-to-build-here
|
||||
exit 0
|
||||
fi
|
||||
#
|
||||
# Pushing image to registry
|
||||
#
|
||||
cd backend
|
||||
cat /tmp/images_to_build.txt
|
||||
for image in $(cat /tmp/images_to_build.txt);
|
||||
do
|
||||
echo "Bulding $image"
|
||||
PUSH_IMAGE=0 bash -x ./build.sh ee $image
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
./trivy image --exit-code 1 --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
echo "::set-output name=image::$DOCKER_REPO/$image:$IMAGE_TAG"
|
||||
done
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
echo "::set-output name=image::$DOCKER_REPO/$image:$IMAGE_TAG"
|
||||
done
|
||||
|
||||
- name: Deploying to kuberntes
|
||||
env:
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
run: |
|
||||
#
|
||||
# Deploying image to environment.
|
||||
#
|
||||
set -x
|
||||
[[ -f /tmp/nothing-to-build-here ]] && exit 0
|
||||
cd scripts/helmcharts/
|
||||
- name: Deploying to kuberntes
|
||||
env:
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
run: |
|
||||
#
|
||||
# Deploying image to environment.
|
||||
#
|
||||
set -x
|
||||
[[ -f /tmp/nothing-to-build-here ]] && exit 0
|
||||
cd scripts/helmcharts/
|
||||
|
||||
set -x
|
||||
echo > /tmp/image_override.yaml
|
||||
mkdir /tmp/helmcharts
|
||||
mv openreplay/charts/ingress-nginx /tmp/helmcharts/
|
||||
mv openreplay/charts/quickwit /tmp/helmcharts/
|
||||
mv openreplay/charts/connector /tmp/helmcharts/
|
||||
## Update images
|
||||
for image in $(cat /tmp/images_to_build.txt);
|
||||
do
|
||||
mv openreplay/charts/$image /tmp/helmcharts/
|
||||
cat <<EOF>>/tmp/image_override.yaml
|
||||
${image}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: ${IMAGE_TAG}
|
||||
EOF
|
||||
done
|
||||
ls /tmp/helmcharts
|
||||
rm -rf openreplay/charts/*
|
||||
ls openreplay/charts
|
||||
mv /tmp/helmcharts/* openreplay/charts/
|
||||
ls openreplay/charts
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml
|
||||
|
||||
# Deploy command
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true | kubectl apply -f -
|
||||
set -x
|
||||
echo > /tmp/image_override.yaml
|
||||
mkdir /tmp/helmcharts
|
||||
mv openreplay/charts/ingress-nginx /tmp/helmcharts/
|
||||
mv openreplay/charts/quickwit /tmp/helmcharts/
|
||||
## Update images
|
||||
for image in $(cat /tmp/images_to_build.txt);
|
||||
do
|
||||
mv openreplay/charts/$image /tmp/helmcharts/
|
||||
cat <<EOF>>/tmp/image_override.yaml
|
||||
${image}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: ${IMAGE_TAG}
|
||||
EOF
|
||||
done
|
||||
ls /tmp/helmcharts
|
||||
rm -rf openreplay/charts/*
|
||||
ls openreplay/charts
|
||||
mv /tmp/helmcharts/* openreplay/charts/
|
||||
ls openreplay/charts
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: ee
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: "Build failed :bomb:"
|
||||
# Deploy command
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true | kubectl apply -f -
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: ee
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# IMAGE_TAG: ${{ github.sha }}
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# iimit-access-to-actor: true
|
||||
#
|
||||
|
|
|
|||
286
.github/workflows/workers.yaml
vendored
286
.github/workflows/workers.yaml
vendored
|
|
@ -6,14 +6,14 @@ on:
|
|||
build_service:
|
||||
description: 'Name of a single service to build(in small letters). "all" to build everything'
|
||||
required: false
|
||||
default: "false"
|
||||
default: 'false'
|
||||
skip_security_checks:
|
||||
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
|
||||
description: 'Skip Security checks if there is a unfixable vuln or error. Value: true/false'
|
||||
required: false
|
||||
default: "false"
|
||||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
- dev
|
||||
paths:
|
||||
- backend/**
|
||||
|
||||
|
|
@ -25,162 +25,152 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
# ref: staging
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
# ref: staging
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
# Caching docker images
|
||||
# - uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# # Ignore the failure of a step and avoid terminating the job.
|
||||
# continue-on-error: true
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
# Caching docker images
|
||||
# - uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# # Ignore the failure of a step and avoid terminating the job.
|
||||
# continue-on-error: true
|
||||
|
||||
- name: Build, tag
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
#
|
||||
# TODO: Check the container tags are same, then skip the build and deployment.
|
||||
#
|
||||
# Build a docker container and push it to Docker Registry so that it can be deployed to Kubernetes cluster.
|
||||
#
|
||||
# Getting the images to build
|
||||
#
|
||||
set -xe
|
||||
touch /tmp/images_to_build.txt
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
tmp_param=${{ github.event.inputs.build_service }}
|
||||
build_param=${tmp_param:-'false'}
|
||||
case ${build_param} in
|
||||
false)
|
||||
{
|
||||
git diff --name-only HEAD HEAD~1 | grep -E "backend/pkg|backend/internal" | grep -vE ^ee/ | cut -d '/' -f3 | uniq | while read -r pkg_name ; do
|
||||
grep -rl "pkg/$pkg_name" backend/services backend/cmd | cut -d '/' -f3
|
||||
done
|
||||
} | awk '!seen[$0]++' > /tmp/images_to_build.txt
|
||||
- name: Build, tag
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
#
|
||||
# TODO: Check the container tags are same, then skip the build and deployment.
|
||||
#
|
||||
# Build a docker container and push it to Docker Registry so that it can be deployed to Kubernetes cluster.
|
||||
#
|
||||
# Getting the images to build
|
||||
#
|
||||
set -xe
|
||||
touch /tmp/images_to_build.txt
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
tmp_param=${{ github.event.inputs.build_service }}
|
||||
build_param=${tmp_param:-'false'}
|
||||
case ${build_param} in
|
||||
false)
|
||||
{
|
||||
git diff --name-only HEAD HEAD~1 | grep -E "backend/pkg|backend/internal" | grep -vE ^ee/ | cut -d '/' -f3 | uniq | while read -r pkg_name ; do
|
||||
grep -rl "pkg/$pkg_name" backend/services backend/cmd | cut -d '/' -f3
|
||||
done
|
||||
} | awk '!seen[$0]++' > /tmp/images_to_build.txt
|
||||
;;
|
||||
all)
|
||||
ls backend/cmd > /tmp/images_to_build.txt
|
||||
;;
|
||||
all)
|
||||
ls backend/cmd > /tmp/images_to_build.txt
|
||||
;;
|
||||
*)
|
||||
echo ${{github.event.inputs.build_service }} > /tmp/images_to_build.txt
|
||||
;;
|
||||
esac
|
||||
*)
|
||||
echo ${{github.event.inputs.build_service }} > /tmp/images_to_build.txt
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ $(cat /tmp/images_to_build.txt) == "" ]]; then
|
||||
echo "Nothing to build here"
|
||||
touch /tmp/nothing-to-build-here
|
||||
exit 0
|
||||
fi
|
||||
#
|
||||
# Pushing image to registry
|
||||
#
|
||||
cd backend
|
||||
cat /tmp/images_to_build.txt
|
||||
for image in $(cat /tmp/images_to_build.txt);
|
||||
do
|
||||
echo "Bulding $image"
|
||||
PUSH_IMAGE=0 bash -x ./build.sh skip $image
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
if [[ $(cat /tmp/images_to_build.txt) == "" ]]; then
|
||||
echo "Nothing to build here"
|
||||
touch /tmp/nothing-to-build-here
|
||||
exit 0
|
||||
fi
|
||||
#
|
||||
# Pushing image to registry
|
||||
#
|
||||
cd backend
|
||||
cat /tmp/images_to_build.txt
|
||||
for image in $(cat /tmp/images_to_build.txt);
|
||||
do
|
||||
echo "Bulding $image"
|
||||
PUSH_IMAGE=0 bash -x ./build.sh skip $image
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
./trivy image --exit-code 1 --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
echo "::set-output name=image::$DOCKER_REPO/$image:$IMAGE_TAG"
|
||||
done
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
echo "::set-output name=image::$DOCKER_REPO/$image:$IMAGE_TAG"
|
||||
done
|
||||
|
||||
- name: Deploying to kuberntes
|
||||
env:
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
run: |
|
||||
#
|
||||
# Deploying image to environment.
|
||||
#
|
||||
set -x
|
||||
[[ -f /tmp/nothing-to-build-here ]] && exit 0
|
||||
cd scripts/helmcharts/
|
||||
- name: Deploying to kuberntes
|
||||
env:
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
run: |
|
||||
#
|
||||
# Deploying image to environment.
|
||||
#
|
||||
set -x
|
||||
[[ -f /tmp/nothing-to-build-here ]] && exit 0
|
||||
cd scripts/helmcharts/
|
||||
|
||||
set -x
|
||||
echo > /tmp/image_override.yaml
|
||||
mkdir /tmp/helmcharts
|
||||
mv openreplay/charts/ingress-nginx /tmp/helmcharts/
|
||||
mv openreplay/charts/quickwit /tmp/helmcharts/
|
||||
mv openreplay/charts/connector /tmp/helmcharts/
|
||||
## Update images
|
||||
for image in $(cat /tmp/images_to_build.txt);
|
||||
do
|
||||
mv openreplay/charts/$image /tmp/helmcharts/
|
||||
cat <<EOF>>/tmp/image_override.yaml
|
||||
${image}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: ${IMAGE_TAG}
|
||||
EOF
|
||||
done
|
||||
ls /tmp/helmcharts
|
||||
rm -rf openreplay/charts/*
|
||||
ls openreplay/charts
|
||||
mv /tmp/helmcharts/* openreplay/charts/
|
||||
ls openreplay/charts
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
|
||||
# Deploy command
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true | kubectl apply -f -
|
||||
set -x
|
||||
echo > /tmp/image_override.yaml
|
||||
mkdir /tmp/helmcharts
|
||||
mv openreplay/charts/ingress-nginx /tmp/helmcharts/
|
||||
mv openreplay/charts/quickwit /tmp/helmcharts/
|
||||
## Update images
|
||||
for image in $(cat /tmp/images_to_build.txt);
|
||||
do
|
||||
mv openreplay/charts/$image /tmp/helmcharts/
|
||||
cat <<EOF>>/tmp/image_override.yaml
|
||||
${image}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: ${IMAGE_TAG}
|
||||
EOF
|
||||
done
|
||||
ls /tmp/helmcharts
|
||||
rm -rf openreplay/charts/*
|
||||
ls openreplay/charts
|
||||
mv /tmp/helmcharts/* openreplay/charts/
|
||||
ls openreplay/charts
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: foss
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: "Build failed :bomb:"
|
||||
# Deploy command
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true | kubectl apply -f -
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: foss
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# iimit-access-to-actor: true
|
||||
#
|
||||
#
|
||||
|
|
|
|||
1
.gitignore
vendored
1
.gitignore
vendored
|
|
@ -6,4 +6,3 @@ node_modules
|
|||
*.log
|
||||
**/*.envrc
|
||||
.idea
|
||||
*.mob*
|
||||
|
|
|
|||
|
|
@ -1,7 +0,0 @@
|
|||
repos:
|
||||
- repo: https://github.com/gitguardian/ggshield
|
||||
rev: v1.14.5
|
||||
hooks:
|
||||
- id: ggshield
|
||||
language_version: python3
|
||||
stages: [commit]
|
||||
702
LICENSE
702
LICENSE
|
|
@ -1,694 +1,64 @@
|
|||
Copyright (c) 2021-2025 Asayer, Inc dba OpenReplay
|
||||
Copyright (c) 2022 Asayer, Inc.
|
||||
|
||||
OpenReplay monorepo uses multiple licenses. Portions of this software are licensed as follows:
|
||||
- All content that resides under the "ee/" directory of this repository, is licensed under the license defined in "ee/LICENSE".
|
||||
- All third party components incorporated into the OpenReplay Software are licensed under the original license provided by the owner of the applicable component.
|
||||
- Some directories are licensed under the "MIT" license, as defined below.
|
||||
- Content outside of the above mentioned directories or restrictions defaults to the "GNU Affero General Public License Version 3 (AGPL v3)" license, as defined below.
|
||||
- Some directories have a specific LICENSE file and are licensed under the "MIT" license, as defined below.
|
||||
- Content outside of the above mentioned directories or restrictions defaults to the "Elastic License 2.0 (ELv2)" license, as defined below.
|
||||
|
||||
Reach out (license@openreplay.com) if you have any questions regarding licenses.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
MIT LICENSE
|
||||
------------------------------------------------------------------------------------
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||
Version 3, 19 November 2007
|
||||
------------------------------------------------------------------------------------
|
||||
Elastic License 2.0 (ELv2)
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
**Acceptance**
|
||||
By using the software, you agree to all of the terms and conditions below.
|
||||
|
||||
Preamble
|
||||
**Copyright License**
|
||||
The licensor grants you a non-exclusive, royalty-free, worldwide, non-sublicensable, non-transferable license to use, copy, distribute, make available, and prepare derivative works of the software, in each case subject to the limitations and conditions below
|
||||
|
||||
The GNU Affero General Public License is a free, copyleft license for
|
||||
software and other kinds of works, specifically designed to ensure
|
||||
cooperation with the community in the case of network server software.
|
||||
**Limitations**
|
||||
You may not provide the software to third parties as a hosted or managed service, where the service provides users with access to any substantial set of the features or functionality of the software.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
our General Public Licenses are intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users.
|
||||
You may not move, change, disable, or circumvent the license key functionality in the software, and you may not remove or obscure any functionality in the software that is protected by the license key.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
You may not alter, remove, or obscure any licensing, copyright, or other notices of the licensor in the software. Any use of the licensor’s trademarks is subject to applicable law.
|
||||
|
||||
Developers that use our General Public Licenses protect your rights
|
||||
with two steps: (1) assert copyright on the software, and (2) offer
|
||||
you this License which gives you legal permission to copy, distribute
|
||||
and/or modify the software.
|
||||
**Patents**
|
||||
The licensor grants you a license, under any patent claims the licensor can license, or becomes able to license, to make, have made, use, sell, offer for sale, import and have imported the software, in each case subject to the limitations and conditions in this license. This license does not cover any patent claims that you cause to be infringed by modifications or additions to the software. If you or your company make any written claim that the software infringes or contributes to infringement of any patent, your patent license for the software granted under these terms ends immediately. If your company makes such a claim, your patent license ends immediately for work on behalf of your company.
|
||||
|
||||
A secondary benefit of defending all users' freedom is that
|
||||
improvements made in alternate versions of the program, if they
|
||||
receive widespread use, become available for other developers to
|
||||
incorporate. Many developers of free software are heartened and
|
||||
encouraged by the resulting cooperation. However, in the case of
|
||||
software used on network servers, this result may fail to come about.
|
||||
The GNU General Public License permits making a modified version and
|
||||
letting the public access it on a server without ever releasing its
|
||||
source code to the public.
|
||||
**Notices**
|
||||
You must ensure that anyone who gets a copy of any part of the software from you also gets a copy of these terms.
|
||||
|
||||
The GNU Affero General Public License is designed specifically to
|
||||
ensure that, in such cases, the modified source code becomes available
|
||||
to the community. It requires the operator of a network server to
|
||||
provide the source code of the modified version running there to the
|
||||
users of that server. Therefore, public use of a modified version, on
|
||||
a publicly accessible server, gives the public access to the source
|
||||
code of the modified version.
|
||||
If you modify the software, you must include in any modified copies of the software prominent notices stating that you have modified the software.
|
||||
|
||||
An older license, called the Affero General Public License and
|
||||
published by Affero, was designed to accomplish similar goals. This is
|
||||
a different license, not a version of the Affero GPL, but Affero has
|
||||
released a new version of the Affero GPL which permits relicensing under
|
||||
this license.
|
||||
**No Other Rights**
|
||||
These terms do not imply any licenses other than those expressly granted in these terms.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
**Termination**
|
||||
If you use the software in violation of these terms, such use is not licensed, and your licenses will automatically terminate. If the licensor provides you with a notice of your violation, and you cease all violation of this license no later than 30 days after you receive that notice, your licenses will be reinstated retroactively. However, if you violate these terms after such reinstatement, any additional violation of these terms will cause your licenses to terminate automatically and permanently.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
**No Liability**
|
||||
As far as the law allows, the software comes as is, without any warranty or condition, and the licensor will not be liable to you for any damages arising out of these terms or the use or nature of the software, under any kind of legal claim.
|
||||
|
||||
0. Definitions.
|
||||
**Definitions**
|
||||
The *licensor* is the entity offering these terms, and the *software* is the software the licensor makes available under these terms, including any portion of it.
|
||||
|
||||
"This License" refers to version 3 of the GNU Affero General Public License.
|
||||
*you* refers to the individual or entity agreeing to these terms.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
*your company* is any legal entity, sole proprietorship, or other kind of organization that you work for, plus all organizations that have control over, are under the control of, or are under common control with that organization. *control* means ownership of substantially all the assets of an entity, or the power to direct its management and policies by vote, contract, or otherwise. Control can be direct or indirect.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
*your licenses* are all the licenses granted to you for the software under these terms.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
*use* means anything you do with the software requiring one of your licenses.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Remote Network Interaction; Use with the GNU General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, if you modify the
|
||||
Program, your modified version must prominently offer all users
|
||||
interacting with it remotely through a computer network (if your version
|
||||
supports such interaction) an opportunity to receive the Corresponding
|
||||
Source of your version by providing access to the Corresponding Source
|
||||
from a network server at no charge, through some standard or customary
|
||||
means of facilitating copying of software. This Corresponding Source
|
||||
shall include the Corresponding Source for any work covered by version 3
|
||||
of the GNU General Public License that is incorporated pursuant to the
|
||||
following paragraph.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the work with which it is combined will remain governed by version
|
||||
3 of the GNU General Public License.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU Affero General Public License from time to time. Such new versions
|
||||
will be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU Affero General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU Affero General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU Affero General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If your software can interact with users remotely through a computer
|
||||
network, you should also make sure that it provides a way for users to
|
||||
get its source. For example, if your program is a web application, its
|
||||
interface could display a "Source" link that leads users to an archive
|
||||
of the code. There are many ways you could offer source, and different
|
||||
solutions will be better for different programs; see section 13 for the
|
||||
specific requirements.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU AGPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
||||
*trademark* means trademarks, service marks, and similar rights.
|
||||
|
||||
|
|
|
|||
40
README.md
40
README.md
|
|
@ -1,53 +1,40 @@
|
|||
<p align="center">
|
||||
<a href="/README_FR.md">Français</a>
|
||||
|
|
||||
<a href="/README_ESP.md">Español</a>
|
||||
|
|
||||
<a href="/README_RU.md">Русский</a>
|
||||
|
|
||||
<a href="/README_AR.md">العربية</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://openreplay.com/#gh-light-mode-only">
|
||||
<img src="static/openreplay-git-banner-light.png" width="100%">
|
||||
</a>
|
||||
<a href="https://openreplay.com/#gh-dark-mode-only">
|
||||
<img src="static/openreplay-git-banner-dark.png" width="100%">
|
||||
<a href="https://openreplay.com">
|
||||
<img src="static/logo.svg" height="70">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<h3 align="center">Session replay for developers</h3>
|
||||
<p align="center">The most advanced session replay for building delightful web apps.</p>
|
||||
<p align="center">The most advanced open-source session replay to build delightful web apps.</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://docs.openreplay.com/deployment/deploy-aws">
|
||||
<img src="static/btn-deploy-aws.svg" height="40"/>
|
||||
<img src="static/deploy-aws.png" height="35"/>
|
||||
</a>
|
||||
|
||||
<a href="https://docs.openreplay.com/deployment/deploy-gcp">
|
||||
<img src="static/btn-deploy-google-cloud.svg" height="40" />
|
||||
<img src="static/deploy-gcp.png" height="35" />
|
||||
</a>
|
||||
|
||||
<a href="https://docs.openreplay.com/deployment/deploy-azure">
|
||||
<img src="static/btn-deploy-azure.svg" height="40" />
|
||||
<img src="static/deploy-azure.png" height="35" />
|
||||
</a>
|
||||
|
||||
<a href="https://docs.openreplay.com/deployment/deploy-digitalocean">
|
||||
<img src="static/btn-deploy-digital-ocean.svg" height="40" />
|
||||
<img src="static/deploy-do.png" height="35" />
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/openreplay/openreplay">
|
||||
<img src="static/openreplay-git-hero.svg">
|
||||
<img src="static/overview.png">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
OpenReplay is an open-source session replay suite you can host yourself, that lets you see what users do on your web app, helping you troubleshoot issues faster.
|
||||
OpenReplay is a session replay suite you can host yourself, that lets you see what users do on your web app, helping you troubleshoot issues faster. It's the only open-source alternative to products such as FullStory and LogRocket.
|
||||
|
||||
- **Session replay.** OpenReplay replays what users do, but not only. It also shows you what went under the hood, how your website or app behaves by capturing network activity, console logs, JS errors, store actions/state, page speed metrics, cpu/memory usage and much more.
|
||||
- **Low footprint**. With a ~26KB (.br) tracker that asynchronously sends minimal data for a very limited impact on performance.
|
||||
- **Low footprint**. With a ~19KB (.gz) tracker that asynchronously sends minimal data for a very limited impact on performance.
|
||||
- **Self-hosted**. No more security compliance checks, 3rd-parties processing user data. Everything OpenReplay captures stays in your cloud for a complete control over your data.
|
||||
- **Privacy controls**. Fine-grained security features for sanitizing user data.
|
||||
- **Easy deploy**. With support of major public cloud providers (AWS, GCP, Azure, DigitalOcean).
|
||||
|
|
@ -55,11 +42,10 @@ OpenReplay is an open-source session replay suite you can host yourself, that le
|
|||
## Features
|
||||
|
||||
- **Session replay:** Lets you relive your users' experience, see where they struggle and how it affects their behavior. Each session replay is automatically analyzed based on heuristics, for easy triage.
|
||||
- **Spot:** A Chrome extension that lets record bugs directly from your browser — each recording includes all the technical details developers need to fix them.
|
||||
- **DevTools:** It's like debugging in your own browser. OpenReplay provides you with the full context (network activity, JS errors, store actions/state and 40+ metrics) so you can instantly reproduce bugs and understand performance issues.
|
||||
- **Assist:** Helps you support your users by seeing their live screen and instantly hopping on call (WebRTC) with them without requiring any 3rd-party screen sharing software.
|
||||
- **Omni-search:** Search and filter by almost any user action/criteria, session attribute or technical event, so you can answer any question. No instrumentation required.
|
||||
- **Analytics:** For surfacing the most impactful issues causing conversion and revenue loss.
|
||||
- **Funnels:** For surfacing the most impactful issues causing conversion and revenue loss.
|
||||
- **Fine-grained privacy controls:** Choose what to capture, what to obscure or what to ignore so user data doesn't even reach your servers.
|
||||
- **Plugins oriented:** Get to the root cause even faster by tracking application state (Redux, VueX, MobX, NgRx, Pinia and Zustand) and logging GraphQL queries (Apollo, Relay) and Fetch/Axios requests.
|
||||
- **Integrations:** Sync your backend logs with your session replays and see what happened front-to-back. OpenReplay supports Sentry, Datadog, CloudWatch, Stackdriver, Elastic and more.
|
||||
|
|
@ -98,6 +84,10 @@ See our [Contributing Guide](CONTRIBUTING.md) for more details.
|
|||
|
||||
Also, feel free to join our [Slack](https://slack.openreplay.com) to ask questions, discuss ideas or connect with our contributors.
|
||||
|
||||
## Roadmap
|
||||
|
||||
Check out our [roadmap](https://www.notion.so/openreplay/Roadmap-889d2c3d968b4786ab9b281ab2394a94) and keep an eye on what's coming next. You're free to [submit](https://github.com/openreplay/openreplay/issues/new) new ideas and vote on features.
|
||||
|
||||
## License
|
||||
|
||||
This monorepo uses several licenses. See [LICENSE](/LICENSE) for more details.
|
||||
|
|
|
|||
106
README_AR.md
106
README_AR.md
|
|
@ -1,106 +0,0 @@
|
|||
<p align="center">
|
||||
<a href="/README_FR.md">Français</a>
|
||||
|
|
||||
<a href="/README_ESP.md">Español</a>
|
||||
|
|
||||
<a href="/README_RU.md">Русский</a>
|
||||
|
|
||||
<a href="/README.md">English</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://openreplay.com/#gh-light-mode-only">
|
||||
<img src="static/openreplay-git-banner-light.png" width="100%">
|
||||
</a>
|
||||
<a href="https://openreplay.com/#gh-dark-mode-only">
|
||||
<img src="static/openreplay-git-banner-dark.png" width="100%">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<h3 align="center">إعادة تشغيل الجلسة للمطورين</h3>
|
||||
<p align="center">إعادة تشغيل الجلسة الأكثر تقدمًا لإنشاء تطبيقات ويب رائعة</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://docs.openreplay.com/deployment/deploy-aws">
|
||||
<img src="static/btn-deploy-aws.svg" height="40"/>
|
||||
</a>
|
||||
|
||||
<a href="https://docs.openreplay.com/deployment/deploy-gcp">
|
||||
<img src="static/btn-deploy-google-cloud.svg" height="40" />
|
||||
</a>
|
||||
|
||||
<a href="https://docs.openreplay.com/deployment/deploy-azure">
|
||||
<img src="static/btn-deploy-azure.svg" height="40" />
|
||||
</a>
|
||||
|
||||
<a href="https://docs.openreplay.com/deployment/deploy-digitalocean">
|
||||
<img src="static/btn-deploy-digital-ocean.svg" height="40" />
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/openreplay/openreplay">
|
||||
<img src="static/openreplay-git-hero.svg">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
OpenReplay هو مجموعة إعادة تشغيل الجلسة التي يمكنك استضافتها بنفسك، والتي تتيح لك رؤية ما يقوم به المستخدمون على تطبيق الويب الخاص بك، مما يساعدك على حل المشكلات بشكل أسرع.
|
||||
|
||||
- **إعادة تشغيل الجلسة.** يقوم OpenReplay بإعادة تشغيل ما يقوم به المستخدمون، وكيف يتصرف موقع الويب الخاص بك أو التطبيق من خلال التقاط النشاط على الشبكة، وسجلات وحدة التحكم، وأخطاء JavaScript، وإجراءات/حالة التخزين، وقياسات سرعة الصفحة، واستخدام وحدة المعالجة المركزية/الذاكرة، وأكثر من ذلك بكثير.
|
||||
- **بصمة منخفضة**. مع متتبع بحجم حوالي 26 كيلوبايت (نوع .br) الذي يرسل بيانات دقيقة بشكل غير متزامن لتأثير محدود جدًا على الأداء.
|
||||
- **مضيف بواسطتك.** لا مزيد من فحوص الامتثال الأمني، ومعالجة بيانات المستخدمين من قبل جهات خارجية. كل ما يتم التقاطه بواسطة OpenReplay يبقى في سحابتك للتحكم الكامل في بياناتك.
|
||||
- **ضوابط الخصوصية.** ميزات أمان دقيقة لتنقية بيانات المستخدم.
|
||||
- **نشر سهل.** بدعم من مزودي الخدمة السحابية العامة الرئيسيين (AWS، GCP، Azure، DigitalOcean).
|
||||
|
||||
## الميزات
|
||||
|
||||
- **إعادة تشغيل الجلسة:** تتيح لك إعادة تشغيل الجلسة إعادة عيش تجربة مستخدميك، ورؤية أين يواجهون صعوبة وكيف يؤثر ذلك على سلوكهم. يتم تحليل كل إعادة تشغيل للجلسة تلقائيًا بناءً على الأساليب الاستدلالية، لسهولة التقييم.
|
||||
- **أدوات التطوير (DevTools):** إنها مثل التصحيح في متصفحك الخاص. يوفر لك OpenReplay السياق الكامل (نشاط الشبكة، أخطاء JavaScript، إجراءات/حالة التخزين وأكثر من 40 مقياسًا) حتى تتمكن من إعادة إنتاج الأخطاء فورًا وفهم مشكلات الأداء.
|
||||
- **المساعدة (Assist):** تساعدك في دعم مستخدميك من خلال رؤية شاشتهم مباشرة والانضمام فورًا إلى مكالمة (WebRTC) معهم دون الحاجة إلى برامج مشاركة الشاشة من جهات خارجية.
|
||||
- **البحث الشامل (Omni-search):** ابحث وفرز حسب أي عملية/معيار للمستخدم تقريبًا، أو سمة الجلسة أو الحدث التقني، حتى تتمكن من الرد على أي سؤال. لا يلزم تجهيز.
|
||||
- **الأنفاق (Funnels):** للكشف عن المشكلات الأكثر تأثيرًا التي تسبب في فقدان التحويل والإيرادات.
|
||||
- **ضوابط الخصوصية الدقيقة:** اختر ماذا تريد التقاطه، ماذا تريد أن تخفي أو تجاهل حتى لا تصل بيانات المستخدم حتى إلى خوادمك.
|
||||
- **موجهة للمكونات الإضافية (Plugins oriented):** تصل إلى السبب الجذري بشكل أسرع عن طريق تتبع حالة التطبيق (Redux، VueX، MobX، NgRx، Pinia، وZustand) وتسجيل استعلامات GraphQL (Apollo، Relay) وطلبات Fetch/Axios.
|
||||
- **التكاملات (Integrations):** مزامنة سجلات الخادم الخلفي مع إعادات التشغيل للجلسات ورؤية ما حدث من الأمام إلى الخلف. يدعم OpenReplay Sentry وDatadog وCloudWatch وStackdriver وElastic والمزيد.
|
||||
|
||||
## خيارات النشر
|
||||
|
||||
يمكن نشر OpenReplay في أي مكان. اتبع دليلنا الخطوة بالخطوة لنشره على خدمات السحابة العامة الرئيسية:
|
||||
|
||||
- [AWS](https://docs.openreplay.com/deployment/deploy-aws)
|
||||
- [Google Cloud](https://docs.openreplay.com/deployment/deploy-gcp)
|
||||
- [Azure](https://docs.openreplay.com/deployment/deploy-azure)
|
||||
- [Digital Ocean](https://docs.openreplay.com/deployment/deploy-digitalocean)
|
||||
- [Scaleway](https://docs.openreplay.com/deployment/deploy-scaleway)
|
||||
- [OVHcloud](https://docs.openreplay.com/deployment/deploy-ovhcloud)
|
||||
- [Kubernetes](https://docs.openreplay.com/deployment/deploy-kubernetes)
|
||||
|
||||
## سحابة OpenReplay
|
||||
|
||||
لأولئك الذين يرغبون في استخدام OpenReplay كخدمة، [قم بالتسجيل](https://app.openreplay.com/signup) للحصول على حساب مجاني على عرض السحابة لدينا.
|
||||
|
||||
## دعم المجتمع
|
||||
|
||||
يرجى الرجوع إلى [الوثائق الرسمية لـ OpenReplay](https://docs.openreplay.com/). سيساعدك ذلك في حل المشكلات الشائعة. للحصول على مساعدة إضافية، يمكنك الاتصال بنا عبر أحد هذه القنوات:
|
||||
|
||||
- [Slack](https://slack.openreplay.com) (الاتصال مع مهندسينا والمجتمع)
|
||||
- [GitHub](https://github.com/openreplay/openreplay/issues) (تقارير الأخطاء والمشكلات)
|
||||
- [Twitter](https://twitter.com/OpenReplayHQ) (تحديثات المنتج، محتوى رائع)
|
||||
- [YouTube](https://www.youtube.com/channel/UCcnWlW-5wEuuPAwjTR1Ydxw) (دروس حول كيفية الاستخدام، مكالمات مجتمع سابقة)
|
||||
- [دردشة الموقع الإلكتروني](https://openreplay.com) (تحدث معنا)
|
||||
|
||||
## المساهمة
|
||||
|
||||
نحن دائمًا في انتظار المساهمات في OpenReplay، ونحن سعداء بأنك تفكر في ذلك! غير متأكد من أين تبدأ؟ ابحث عن المشاكل المفتوحة، وخاصة تلك المُميزة بأنها مناسبة للمبتدئين.
|
||||
|
||||
انظر دليل المساهمة لدينا [دليل المساهمة](CONTRIBUTING.md) لمزيد من التفاصيل.
|
||||
|
||||
كما توجد حرية الانضمام إلى Slack لدينا [Slack](https://slack.openreplay.com) لطرح الأسئلة، مناقشة الأفكار أو التواصل مع مساهمينا.
|
||||
|
||||
## الخارطة الزمنية
|
||||
|
||||
تحقق من [الخارطة الزمنية لدينا](https://www.notion.so/openreplay/Roadmap-889d2c3d968b4786ab9b281ab2394a94) وابق على اطلاع على ما سيأتي لاحقًا. لديك حرية [تقديم أفكار جديدة](https://github.com/openreplay/openreplay/issues/new) والتصويت على الميزات.
|
||||
|
||||
## الترخيص
|
||||
|
||||
يستخدم هذا المستودع المتعدد التراخيص. انظر إلى [LICENSE](/LICENSE) لمزيد من التفاصيل.
|
||||
106
README_ESP.md
106
README_ESP.md
|
|
@ -1,106 +0,0 @@
|
|||
<p align="center">
|
||||
<a href="/README_FR.md">Français</a>
|
||||
|
|
||||
<a href="/README.md">English</a>
|
||||
|
|
||||
<a href="/README_RU.md">Русский</a>
|
||||
|
|
||||
<a href="/README_RU.md">العربية</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://openreplay.com/#gh-light-mode-only">
|
||||
<img src="static/openreplay-git-banner-light.png" width="100%">
|
||||
</a>
|
||||
<a href="https://openreplay.com/#gh-dark-mode-only">
|
||||
<img src="static/openreplay-git-banner-dark.png" width="100%">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<h3 align="center">Reproducción de sesiones para desarrolladores</h3>
|
||||
<p align="center">La reproducción de sesiones más avanzada para crear aplicaciones web encantadoras.</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://docs.openreplay.com/deployment/deploy-aws">
|
||||
<img src="static/btn-deploy-aws.svg" height="40"/>
|
||||
</a>
|
||||
|
||||
<a href="https://docs.openreplay.com/deployment/deploy-gcp">
|
||||
<img src="static/btn-deploy-google-cloud.svg" height="40" />
|
||||
</a>
|
||||
|
||||
<a href="https://docs.openreplay.com/deployment/deploy-azure">
|
||||
<img src="static/btn-deploy-azure.svg" height="40" />
|
||||
</a>
|
||||
|
||||
<a href="https://docs.openreplay.com/deployment/deploy-digitalocean">
|
||||
<img src="static/btn-deploy-digital-ocean.svg" height="40" />
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/openreplay/openreplay">
|
||||
<img src="static/openreplay-git-hero.svg">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
OpenReplay es una suite de retransmisión de sesiones que puedes alojar tú mismo, lo que te permite ver lo que hacen los usuarios en tu aplicación web y ayudarte a solucionar problemas más rápido.
|
||||
|
||||
- **Reproducción de sesiones.** OpenReplay reproduce lo que hacen los usuarios, pero no solo eso. También te muestra lo que ocurre bajo el capó, cómo se comporta tu sitio web o aplicación al capturar la actividad de la red, registros de la consola, errores de JavaScript, acciones/estado del almacén, métricas de velocidad de la página, uso de CPU/memoria y mucho más.
|
||||
- **Huella reducida.** Con un rastreador de aproximadamente 26 KB (.br) que envía datos mínimos de forma asíncrona, lo que tiene un impacto muy limitado en el rendimiento.
|
||||
- **Auto-alojado.** No más verificaciones de cumplimiento de seguridad, procesamiento de datos de usuario por terceros. Todo lo que OpenReplay captura se queda en tu nube para un control completo sobre tus datos.
|
||||
- **Controles de privacidad.** Funciones de seguridad detalladas para desinfectar los datos de usuario.
|
||||
- **Despliegue sencillo.** Con el soporte de los principales proveedores de nube pública (AWS, GCP, Azure, DigitalOcean).
|
||||
|
||||
## Características
|
||||
|
||||
- **Reproducción de sesiones:** Te permite revivir la experiencia de tus usuarios, ver dónde encuentran dificultades y cómo afecta su comportamiento. Cada reproducción de sesión se analiza automáticamente en función de heurísticas, para un triaje sencillo.
|
||||
- **Herramientas de desarrollo (DevTools):** Es como depurar en tu propio navegador. OpenReplay te proporciona el contexto completo (actividad de red, errores de JavaScript, acciones/estado del almacén y más de 40 métricas) para que puedas reproducir instantáneamente errores y entender problemas de rendimiento.
|
||||
- **Asistencia (Assist):** Te ayuda a brindar soporte a tus usuarios al ver su pantalla en tiempo real y unirte instantáneamente a una llamada (WebRTC) con ellos, sin necesidad de software de uso compartido de pantalla de terceros.
|
||||
- **Búsqueda universal (Omni-search):** Busca y filtra por casi cualquier acción/criterio de usuario, atributo de sesión o evento técnico, para que puedas responder a cualquier pregunta. No se requiere instrumentación.
|
||||
- **Embudos (Funnels):** Para resaltar los problemas más impactantes que causan la conversión y la pérdida de ingresos.
|
||||
- **Controles de privacidad detallados:** Elige qué capturar, qué ocultar o qué ignorar para que los datos de usuario ni siquiera lleguen a tus servidores.
|
||||
- **Orientado a complementos (Plugins oriented):** Llega más rápido a la causa raíz siguiendo el estado de la aplicación (Redux, VueX, MobX, NgRx, Pinia y Zustand) y registrando consultas GraphQL (Apollo, Relay) y solicitudes Fetch/Axios.
|
||||
- **Integraciones:** Sincroniza tus registros del servidor con tus repeticiones de sesiones y observa lo que sucedió de principio a fin. OpenReplay es compatible con Sentry, Datadog, CloudWatch, Stackdriver, Elastic y más.
|
||||
|
||||
## Opciones de implementación
|
||||
|
||||
OpenReplay se puede implementar en cualquier lugar. Sigue nuestras guías paso a paso para implementarlo en los principales servicios de nube pública:
|
||||
|
||||
- [AWS](https://docs.openreplay.com/deployment/deploy-aws)
|
||||
- [Google Cloud](https://docs.openreplay.com/deployment/deploy-gcp)
|
||||
- [Azure](https://docs.openreplay.com/deployment/deploy-azure)
|
||||
- [Digital Ocean](https://docs.openreplay.com/deployment/deploy-digitalocean)
|
||||
- [Scaleway](https://docs.openreplay.com/deployment/deploy-scaleway)
|
||||
- [OVHcloud](https://docs.openreplay.com/deployment/deploy-ovhcloud)
|
||||
- [Kubernetes](https://docs.openreplay.com/deployment/deploy-kubernetes)
|
||||
|
||||
## OpenReplay Cloud
|
||||
|
||||
Para aquellos que desean usar OpenReplay como un servicio, [regístrate](https://app.openreplay.com/signup) para obtener una cuenta gratuita en nuestra oferta en la nube.
|
||||
|
||||
## Soporte de la comunidad
|
||||
|
||||
Consulta la [documentación oficial de OpenReplay](https://docs.openreplay.com/). Eso debería ayudarte a solucionar problemas comunes. Para obtener ayuda adicional, puedes contactarnos a través de uno de estos canales:
|
||||
|
||||
- [Slack](https://slack.openreplay.com) (Conéctate con nuestros ingenieros y la comunidad)
|
||||
- [GitHub](https://github.com/openreplay/openreplay/issues) (Informes de errores y problemas)
|
||||
- [Twitter](https://twitter.com/OpenReplayHQ) (Actualizaciones del producto, contenido excelente)
|
||||
- [YouTube](https://www.youtube.com/channel/UCcnWlW-5wEuuPAwjTR1Ydxw) (Tutoriales, reuniones comunitarias anteriores)
|
||||
- [Chat en el sitio web](https://openreplay.com) (Háblanos)
|
||||
|
||||
## Contribución
|
||||
|
||||
Siempre estamos buscando contribuciones para OpenReplay, ¡y nos alegra que lo estés considerando! ¿No estás seguro por dónde empezar? Busca problemas abiertos, preferiblemente aquellos marcados como "buenas primeras contribuciones".
|
||||
|
||||
Consulta nuestra [Guía de Contribución](CONTRIBUTING.md) para obtener más detalles.
|
||||
|
||||
Además, no dudes en unirte a nuestro [Slack](https://slack.openreplay.com) para hacer preguntas, discutir ideas o conectarte con nuestros colaboradores.
|
||||
|
||||
## Hoja de ruta
|
||||
|
||||
Consulta nuestra [hoja de ruta](https://www.notion.so/openreplay/Roadmap-889d2c3d968b4786ab9b281ab2394a94) y mantente atento a lo que viene a continuación. Eres libre de [enviar](https://github.com/openreplay/openreplay/issues/new) nuevas ideas y votar por funciones.
|
||||
|
||||
## Licencia
|
||||
|
||||
Este monorepo utiliza varias licencias. Consulta [LICENSE](/LICENSE) para obtener más detalles.
|
||||
106
README_FR.md
106
README_FR.md
|
|
@ -1,106 +0,0 @@
|
|||
<p align="center">
|
||||
<a href="/README.md">English</a>
|
||||
|
|
||||
<a href="/README_ESP.md">Español</a>
|
||||
|
|
||||
<a href="/README_RU.md">Русский</a>
|
||||
|
|
||||
<a href="/README_RU.md">العربية</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://openreplay.com/#gh-light-mode-only">
|
||||
<img src="static/openreplay-git-banner-light.png" width="100%">
|
||||
</a>
|
||||
<a href="https://openreplay.com/#gh-dark-mode-only">
|
||||
<img src="static/openreplay-git-banner-dark.png" width="100%">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<h3 align="center">Relecture de session pour développeurs</h3>
|
||||
<p align="center">La relecture de session la plus avancée sur le marché pour des applications perfectionnées.</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://docs.openreplay.com/deployment/deploy-aws">
|
||||
<img src="static/btn-deploy-aws.svg" height="40"/>
|
||||
</a>
|
||||
|
||||
<a href="https://docs.openreplay.com/deployment/deploy-gcp">
|
||||
<img src="static/btn-deploy-google-cloud.svg" height="40" />
|
||||
</a>
|
||||
|
||||
<a href="https://docs.openreplay.com/deployment/deploy-azure">
|
||||
<img src="static/btn-deploy-azure.svg" height="40" />
|
||||
</a>
|
||||
|
||||
<a href="https://docs.openreplay.com/deployment/deploy-digitalocean">
|
||||
<img src="static/btn-deploy-digital-ocean.svg" height="40" />
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/openreplay/openreplay">
|
||||
<img src="static/openreplay-git-hero.svg">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
OpenReplay est une suite d'outils de relecture (appelée aussi "replay") de sessions que vous pouvez héberger vous-même, vous permettant de voir ce que les utilisateurs font sur une application web, vous aidant ainsi à résoudre différents types de problèmes plus rapidement.
|
||||
|
||||
- **Relecture de session.** OpenReplay rejoue ce que les utilisateurs font, mais pas seulement. Il vous montre également ce qui se passe en coulisse, comment votre site web ou votre application se comporte en capturant l'activité réseau, les journaux de console, les erreurs JS, les actions/états du store, les métriques de chargement des pages, l'utilisation du CPU/mémoire, et bien plus encore.
|
||||
- **Faible empreinte**. Avec un traqueur d'environ 26 Ko (.br) qui envoie de manière asynchrone des données minimales, ce qui a un impact très limité sur les performances.
|
||||
- **Auto-hébergé**. Plus de vérifications de conformité en matière de sécurité, plus de traitement des données des utilisateurs par des tiers. Tout ce qu'OpenReplay capture reste dans votre cloud pour un contrôle complet sur vos données.
|
||||
- **Contrôles de confidentialité**. Fonctionnalités de sécurité détaillées pour la désinfection des données utilisateur.
|
||||
- **Déploiement facile**. Avec le support des principaux fournisseurs de cloud public (AWS, GCP, Azure, DigitalOcean).
|
||||
|
||||
## Fonctionnalités
|
||||
|
||||
- **Relecture de session :** Vous permet de revivre l'expérience de vos utilisateurs, de voir où ils rencontrent des problèmes et comment cela affecte leur comportement. Chaque relecture de session est automatiquement analysée en se basant sur des heuristiques, pour un triage plus facile des problèmes en fonction de l'impact.
|
||||
- **Outils de développement (DevTools) :** C'est comme déboguer dans votre propre navigateur. OpenReplay vous fournit le contexte complet (activité réseau, erreurs JS, actions/états du store et plus de 40 métriques) pour que vous puissiez instantanément reproduire les bugs et comprendre les problèmes de performance.
|
||||
- **Assistance (Assist) :** Vous aide à soutenir vos utilisateurs en voyant leur écran en direct et en vous connectant instantanément avec eux via appel/vidéo (WebRTC), sans nécessiter de logiciel tiers de partage d'écran.
|
||||
- **Recherche universelle (Omni-search) :** Recherchez et filtrez presque n'importe quelle action/critère utilisateur, attribut de session ou événement technique, afin de pouvoir répondre à n'importe quelle question. Aucune instrumentation requise.
|
||||
- **Entonnoirs (Funnels) :** Pour mettre en évidence les problèmes les plus impactants entraînant une conversion et une perte de revenus.
|
||||
- **Contrôles de confidentialité détaillés :** Choisissez ce que vous voulez capturer, ce que vous voulez obscurcir ou ignorer, de sorte que les données utilisateur n'atteignent même pas vos serveurs.
|
||||
- **Orienté vers les plugins :** Corrigez plus rapidement les bogues en suivant l'état de l'application (Redux, VueX, MobX, NgRx, Pinia et Zustand) et enregistrant les requêtes GraphQL (Apollo, Relay) et les requêtes Fetch/Axios.
|
||||
- **Intégrations :** Synchronisez vos journaux backend avec vos relectures de sessions et voyez ce qui s'est passé du début à la fin. OpenReplay prend en charge Sentry, Datadog, CloudWatch, Stackdriver, Elastic et bien d'autres.
|
||||
|
||||
## Options de déploiement
|
||||
|
||||
OpenReplay peut être déployé n'importe où. Suivez nos guides détaillés pour le déployer sur les principaux clouds publics :
|
||||
|
||||
- [AWS](https://docs.openreplay.com/deployment/deploy-aws)
|
||||
- [Google Cloud](https://docs.openreplay.com/deployment/deploy-gcp)
|
||||
- [Azure](https://docs.openreplay.com/deployment/deploy-azure)
|
||||
- [Digital Ocean](https://docs.openreplay.com/deployment/deploy-digitalocean)
|
||||
- [Scaleway](https://docs.openreplay.com/deployment/deploy-scaleway)
|
||||
- [OVHcloud](https://docs.openreplay.com/deployment/deploy-ovhcloud)
|
||||
- [Kubernetes](https://docs.openreplay.com/deployment/deploy-kubernetes)
|
||||
|
||||
## OpenReplay Cloud
|
||||
|
||||
Pour ceux qui veulent simplement utiliser OpenReplay en tant que service, [inscrivez-vous](https://app.openreplay.com/signup) pour un compte gratuit sur notre offre cloud.
|
||||
|
||||
## Support de la communauté
|
||||
|
||||
Veuillez vous référer à la [documentation officielle d'OpenReplay](https://docs.openreplay.com/). Cela devrait vous aider à résoudre les problèmes courants. Pour toute aide ou question supplémentaire, vous pouvez nous contacter sur l'un des canaux suivants :
|
||||
|
||||
- [Slack](https://slack.openreplay.com) (Connectez-vous avec nos ingénieurs et notre communauté)
|
||||
- [GitHub](https://github.com/openreplay/openreplay/issues) (Rapports de bogues et problèmes)
|
||||
- [Twitter](https://twitter.com/OpenReplayHQ) (Mises à jour du produit, articles techniques et autres annonces)
|
||||
- [YouTube](https://www.youtube.com/channel/UCcnWlW-5wEuuPAwjTR1Ydxw) (Tutoriels)
|
||||
- [Chat sur le site Web](https://openreplay.com) (Nous contacter)
|
||||
|
||||
## Contribution
|
||||
|
||||
Nous sommes toujours à la recherche de contributions pour rendre OpenReplay meilleur. Vous ne savez pas par où commencer ? Recherchez dans notre "GitHub Issues" pour trouver des tickets ouverts, de préférence ceux marqués comme "bonnes premières contributions".
|
||||
|
||||
Consultez notre [Guide de contribution](CONTRIBUTING.md) pour plus de détails.
|
||||
|
||||
N'hésitez pas à rejoindre notre [Slack](https://slack.openreplay.com) pour poser des questions, discuter vos idées ou simplement pour vous connecter avec nos contributeurs.
|
||||
|
||||
## Feuille de route
|
||||
|
||||
Consultez notre [feuille de route](https://www.notion.so/openreplay/Roadmap-889d2c3d968b4786ab9b281ab2394a94) et gardez un œil sur ce qui arrive prochainement. Vous êtes libre de [proposer](https://github.com/openreplay/openreplay/issues/new) de nouvelles idées et de voter pour des fonctionnalités.
|
||||
|
||||
## Licence
|
||||
|
||||
Ce monorepo utilise plusieurs licences. Consultez [LICENSE](/LICENSE) pour plus de détails.
|
||||
107
README_RU.md
107
README_RU.md
|
|
@ -1,107 +0,0 @@
|
|||
<p align="center">
|
||||
<a href="/README_FR.md">Français</a>
|
||||
|
|
||||
<a href="/README_ESP.md">Español</a>
|
||||
|
|
||||
<a href="/README.md">English</a>
|
||||
|
|
||||
<a href="/README_RU.md">العربية</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://openreplay.com/#gh-light-mode-only">
|
||||
<img src="static/openreplay-git-banner-light.png" width="100%">
|
||||
</a>
|
||||
<a href="https://openreplay.com/#gh-dark-mode-only">
|
||||
<img src="static/openreplay-git-banner-dark.png" width="100%">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<h3 align="center">Реплей сессий для разработчиков</h3>
|
||||
<p align="center">Самое продвинутое решение для воспроизведения сессий с открытым исходным кодом для создания восхитительных веб-приложений.</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://docs.openreplay.com/deployment/deploy-aws">
|
||||
<img src="static/btn-deploy-aws.svg" height="40"/>
|
||||
</a>
|
||||
|
||||
<a href="https://docs.openreplay.com/deployment/deploy-gcp">
|
||||
<img src="static/btn-deploy-google-cloud.svg" height="40" />
|
||||
</a>
|
||||
|
||||
<a href="https://docs.openreplay.com/deployment/deploy-azure">
|
||||
<img src="static/btn-deploy-azure.svg" height="40" />
|
||||
</a>
|
||||
|
||||
<a href="https://docs.openreplay.com/deployment/deploy-digitalocean">
|
||||
<img src="static/btn-deploy-digital-ocean.svg" height="40" />
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/openreplay/openreplay">
|
||||
<img src="static/openreplay-git-hero.svg">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
OpenReplay - это набор инструментов для воспроизведения пользовательских сессий, позволяющий увидеть действия пользователи в вашем веб-приложении, который вы можете разместить в своем облаке или на серверах.
|
||||
|
||||
- **Воспроизведение сессий.** OpenReplay не только воспроизводит действия пользователей, но и показывает, что происходит под капотом сессии, как ведет себя ваш сайт или приложение, фиксируя сетевую активность, логи консоли, JS-ошибки, действия/состояние стейт менеджеров, показатели скорости страницы, использование процессора/памяти и многое другое.
|
||||
- **Компактность**. Размером всего в ~26 КБ (.br), трекер асинхронно отправляет минимальное количество данных, оказывая очень незначительное влияние на производительность вашего приложения.
|
||||
- **Self-hosted**. Больше никаких проверок на соответствие требованиям безопасности или обработки данных ваших пользователей третьими сторонами. Все, что фиксирует OpenReplay, остается в вашем облаке, что обеспечивает полный контроль над вашими данными.
|
||||
- **Контроль над приватностью**. Тонкие настройки приватности позволяют записывать только действительно необходимые данные.
|
||||
- **Легкая установка**. Мы поддерживаем всех крупных поставщиков облачных услуг (AWS, GCP, Azure, DigitalOcean).
|
||||
|
||||
## Особенности
|
||||
|
||||
- **Session Replay:** Позволяет повторить опыт пользователей, увидеть, где они испытывают трудности и как это влияет на конверсию. Каждый реплей автоматически анализируется на наличие ошибок и аномалий, что значительно облегчает сортировку и поиск проблемных сессий.
|
||||
- **DevTools:** Прямо как отладка в вашем собственном браузере. OpenReplay предоставляет вам полный контекст (сетевая активность, JS ошибки, действия/состояние стейт менеджеров и более 40 метрик), чтобы вы могли мгновенно воспроизвести ошибки и найти проблемы с производительностью.
|
||||
- **Assist:** Позволяет вам помочь вашим пользователям, наблюдая их экран в настоящем времени и мгновенно переходя на звонок (WebRTC) с ними, не требуя стороннего программного обеспечения для совместного просмотра экрана.
|
||||
- **Omni-search:** Поиск и фильтрация практически любого действия пользователя/критерия, атрибута сессии или технического события, чтобы вы могли ответить на любой вопрос.
|
||||
- **Воронки:** Для выявления наиболее влияющих на конверсию мест.
|
||||
- **Тонкая настройка приватности:** Выбирайте, что записывать, а что игнорировать, чтобы данные пользователя даже не отправлялись на ваши сервера.
|
||||
- **Ориентирован на плагины:** С помощью плагинов можно отслеживать состояние приложения (Redux, VueX, MobX, NgRx, Pinia, и Zustand), регистрировать запросы GraphQL (Apollo, Relay) и многое другое.
|
||||
- **Интеграции:** OpenReplay поддерживает интеграции с Sentry, Datadog, CloudWatch, Stackdriver, Elastic и другими провайдерами, позволяя получать еще больше информации о пользовательской сессии.
|
||||
|
||||
## Варианты развертывания
|
||||
|
||||
OpenReplay можно развернуть где угодно. Следуйте нашим пошаговым руководствам по развертыванию на основных публичных облаках:
|
||||
|
||||
- [AWS](https://docs.openreplay.com/deployment/deploy-aws)
|
||||
- [Google Cloud](https://docs.openreplay.com/deployment/deploy-gcp)
|
||||
- [Azure](https://docs.openreplay.com/deployment/deploy-azure)
|
||||
- [Digital Ocean](https://docs.openreplay.com/deployment/deploy-digitalocean)
|
||||
- [Scaleway](https://docs.openreplay.com/deployment/deploy-scaleway)
|
||||
- [OVHcloud](https://docs.openreplay.com/deployment/deploy-ovhcloud)
|
||||
- [Kubernetes](https://docs.openreplay.com/deployment/deploy-kubernetes)
|
||||
|
||||
## OpenReplay Cloud
|
||||
|
||||
Для тех, кто просто хочет использовать OpenReplay как сервис, [зарегистрируйте](https://app.openreplay.com/signup) бесплатную учетную запись в нашем приложении.
|
||||
|
||||
## Поддержка сообщества
|
||||
|
||||
В случае возникновения проблем, вы можете обратиться к [официальной документации OpenReplay](https://docs.openreplay.com/). Это поможет вам решить наиболее распространенные проблемы.
|
||||
Для дополнительной помощи, вы можете связаться с нами через один из этих каналов:
|
||||
|
||||
- [Slack](https://slack.openreplay.com) (Свяжитесь с нашими инженерами и сообществом)
|
||||
- [GitHub](https://github.com/openreplay/openreplay/issues) (Отчеты о багах и проблемах)
|
||||
- [Twitter](https://twitter.com/OpenReplayHQ) (Обновления продукта)
|
||||
- [YouTube](https://www.youtube.com/channel/UCcnWlW-5wEuuPAwjTR1Ydxw) (Учебные пособия, прошлые комьюнити-звонки)
|
||||
- [Чат на веб-сайте](https://openreplay.com) (Общайтесь с нами)
|
||||
|
||||
## Содействие
|
||||
|
||||
Мы всегда рады любой помощи в создании OpenReplay, и готовы услышать ваши идеи. Не уверены, с чего начать? Ищите открытые задачи, особенно те, которые отмечены как "good first issue".
|
||||
|
||||
Смотрите наше [руководство по содействию](CONTRIBUTING.md) для более подробной информации.
|
||||
|
||||
Также не стесняйтесь присоединиться к нашему [Slack](https://slack.openreplay.com), чтобы задавать вопросы, обсуждать идеи или связываться с нашими участниками.
|
||||
|
||||
## План развития
|
||||
|
||||
Ознакомьтесь с нашим [планом развития](https://www.notion.so/openreplay/Roadmap-889d2c3d968b4786ab9b281ab2394a94) и следите за тем, что будет далее. Вы можете свободно [предложить](https://github.com/openreplay/openreplay/issues/new) новые идеи и голосовать за функции.
|
||||
|
||||
## Лицензия
|
||||
|
||||
В этом монорепозитории используются разные лицензии. См. [LICENSE](/LICENSE) для получения более подробной информации.
|
||||
6
api/.gitignore
vendored
6
api/.gitignore
vendored
|
|
@ -83,6 +83,7 @@ wheels/
|
|||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
Pipfile
|
||||
Pipfile.lock
|
||||
|
||||
# PyInstaller
|
||||
|
|
@ -143,7 +144,7 @@ celerybeat-schedule
|
|||
|
||||
# Environments
|
||||
.env
|
||||
.venv/*
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
|
|
@ -175,5 +176,4 @@ SUBNETS.json
|
|||
|
||||
./chalicelib/.configs
|
||||
README/*
|
||||
.local
|
||||
/.dev/
|
||||
.local
|
||||
|
|
@ -1 +0,0 @@
|
|||
.venv
|
||||
|
|
@ -1,3 +1,3 @@
|
|||
# Accept the risk until
|
||||
# python setup tools recently fixed. Not yet available in distros.
|
||||
CVE-2023-5363 exp:2023-12-31
|
||||
# python setup tools recently fixed. Not yet avaialable in distros.
|
||||
CVE-2022-40897 exp:2023-02-01
|
||||
|
|
|
|||
|
|
@ -1,31 +1,29 @@
|
|||
FROM python:3.12-alpine AS builder
|
||||
LABEL maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
|
||||
LABEL maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
|
||||
|
||||
RUN apk add --no-cache build-base
|
||||
WORKDIR /work
|
||||
COPY requirements.txt ./requirements.txt
|
||||
RUN pip install --no-cache-dir --upgrade uv && \
|
||||
export UV_SYSTEM_PYTHON=true && \
|
||||
uv pip install --no-cache-dir --upgrade pip setuptools wheel && \
|
||||
uv pip install --no-cache-dir --upgrade -r requirements.txt
|
||||
|
||||
FROM python:3.12-alpine
|
||||
FROM python:3.11-alpine
|
||||
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
|
||||
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
|
||||
ARG GIT_SHA
|
||||
LABEL GIT_SHA=$GIT_SHA
|
||||
|
||||
RUN apk add --no-cache build-base tini
|
||||
ARG envarg
|
||||
# Add Tini
|
||||
# Startup daemon
|
||||
ENV SOURCE_MAP_VERSION=0.7.4 \
|
||||
APP_NAME=chalice \
|
||||
LISTEN_PORT=8000 \
|
||||
PRIVATE_ENDPOINTS=false \
|
||||
ENTERPRISE_BUILD=${envarg} \
|
||||
GIT_SHA=$GIT_SHA
|
||||
COPY --from=builder /usr/local/lib/python3.12/site-packages /usr/local/lib/python3.12/site-packages
|
||||
COPY --from=builder /usr/local/bin /usr/local/bin
|
||||
APP_NAME=chalice \
|
||||
LISTEN_PORT=8000 \
|
||||
PRIVATE_ENDPOINTS=false \
|
||||
ENTERPRISE_BUILD=${envarg} \
|
||||
GIT_SHA=$GIT_SHA
|
||||
|
||||
WORKDIR /work
|
||||
COPY requirements.txt ./requirements.txt
|
||||
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
||||
|
||||
COPY . .
|
||||
RUN apk add --no-cache tini && mv env.default .env
|
||||
RUN mv env.default .env
|
||||
|
||||
RUN adduser -u 1001 openreplay -D
|
||||
USER 1001
|
||||
|
||||
ENTRYPOINT ["/sbin/tini", "--"]
|
||||
CMD ["./entrypoint.sh"]
|
||||
CMD ./entrypoint.sh
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
FROM python:3.12-alpine
|
||||
FROM python:3.11-alpine
|
||||
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
|
||||
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
|
||||
ARG GIT_SHA
|
||||
|
|
@ -16,9 +16,7 @@ ENV APP_NAME=alerts \
|
|||
|
||||
WORKDIR /work
|
||||
COPY requirements-alerts.txt ./requirements.txt
|
||||
RUN pip install --no-cache-dir --upgrade uv
|
||||
RUN uv pip install --no-cache-dir --upgrade pip setuptools wheel --system
|
||||
RUN uv pip install --no-cache-dir --upgrade -r requirements.txt --system
|
||||
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
||||
|
||||
COPY . .
|
||||
RUN mv env.default .env && mv app_alerts.py app.py && mv entrypoint_alerts.sh entrypoint.sh
|
||||
|
|
|
|||
43
api/NOTES.md
43
api/NOTES.md
|
|
@ -1,43 +0,0 @@
|
|||
#### autogenerated api frontend
|
||||
|
||||
API can autogenerate a frontend that documents, and allows to play
|
||||
with, in a limited way, its interface. Make sure you have the
|
||||
following variables inside the current `.env`:
|
||||
|
||||
```
|
||||
docs_url=/docs
|
||||
root_path=''
|
||||
```
|
||||
|
||||
If the `.env` that is in-use is based on `env.default` then it is
|
||||
already the case. Start, or restart the http server, then go to
|
||||
`https://127.0.0.1:8000/docs`. That is autogenerated documentation
|
||||
based on pydantic schema, fastapi routes, and docstrings :wink:.
|
||||
|
||||
Happy experiments, and then documentation!
|
||||
|
||||
#### psycopg3 API
|
||||
|
||||
I mis-remember the psycopg v2 vs. v3 API.
|
||||
|
||||
For the record, the expected psycopg3's async api looks like the
|
||||
following pseudo code:
|
||||
|
||||
```python
|
||||
async with app.state.postgresql.connection() as cnx:
|
||||
async with cnx.transaction():
|
||||
row = await cnx.execute("SELECT EXISTS(SELECT 1 FROM public.tenants)")
|
||||
row = await row.fetchone()
|
||||
return row["exists"]
|
||||
```
|
||||
|
||||
Mind the following:
|
||||
|
||||
- Where `app.state.postgresql` is the postgresql connection pooler.
|
||||
- Wrap explicit transaction with `async with cnx.transaction():
|
||||
foobar()`
|
||||
- Most of the time the transaction object is not used;
|
||||
- Do execute await operation against `cnx`;
|
||||
- `await cnx.execute` returns a cursor object;
|
||||
- Do the `await cursor.fetchqux...` calls against the object return by
|
||||
a call to execute.
|
||||
29
api/Pipfile
29
api/Pipfile
|
|
@ -1,29 +0,0 @@
|
|||
[[source]]
|
||||
url = "https://pypi.org/simple"
|
||||
verify_ssl = true
|
||||
name = "pypi"
|
||||
|
||||
[packages]
|
||||
urllib3 = "==2.3.0"
|
||||
requests = "==2.32.3"
|
||||
boto3 = "==1.36.12"
|
||||
pyjwt = "==2.10.1"
|
||||
psycopg2-binary = "==2.9.10"
|
||||
psycopg = {extras = ["pool", "binary"], version = "==3.2.4"}
|
||||
clickhouse-driver = {extras = ["lz4"], version = "==0.2.9"}
|
||||
clickhouse-connect = "==0.8.15"
|
||||
elasticsearch = "==8.17.1"
|
||||
jira = "==3.8.0"
|
||||
cachetools = "==5.5.1"
|
||||
fastapi = "==0.115.8"
|
||||
uvicorn = {extras = ["standard"], version = "==0.34.0"}
|
||||
python-decouple = "==3.8"
|
||||
pydantic = {extras = ["email"], version = "==2.10.6"}
|
||||
apscheduler = "==3.11.0"
|
||||
redis = "==5.2.1"
|
||||
|
||||
[dev-packages]
|
||||
|
||||
[requires]
|
||||
python_version = "3.12"
|
||||
python_full_version = "3.12.8"
|
||||
133
api/app.py
133
api/app.py
|
|
@ -1,99 +1,33 @@
|
|||
import logging
|
||||
import time
|
||||
from contextlib import asynccontextmanager
|
||||
|
||||
import psycopg_pool
|
||||
from apscheduler.schedulers.asyncio import AsyncIOScheduler
|
||||
from decouple import config
|
||||
from fastapi import FastAPI, Request
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.middleware.gzip import GZipMiddleware
|
||||
from psycopg import AsyncConnection
|
||||
from psycopg.rows import dict_row
|
||||
from starlette.responses import StreamingResponse
|
||||
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils import pg_client, ch_client
|
||||
from crons import core_crons, core_dynamic_crons
|
||||
from chalicelib.utils import pg_client
|
||||
from routers import core, core_dynamic
|
||||
from routers.subs import insights, metrics, v1_api, health, usability_tests, spot, product_anaytics
|
||||
from routers.crons import core_crons
|
||||
from routers.crons import core_dynamic_crons
|
||||
from routers.subs import insights, metrics, v1_api
|
||||
|
||||
loglevel = config("LOGLEVEL", default=logging.WARNING)
|
||||
print(f">Loglevel set to: {loglevel}")
|
||||
logging.basicConfig(level=loglevel)
|
||||
|
||||
|
||||
class ORPYAsyncConnection(AsyncConnection):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, row_factory=dict_row, **kwargs)
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
# Startup
|
||||
logging.info(">>>>> starting up <<<<<")
|
||||
ap_logger = logging.getLogger('apscheduler')
|
||||
ap_logger.setLevel(loglevel)
|
||||
|
||||
app.schedule = AsyncIOScheduler()
|
||||
await pg_client.init()
|
||||
await ch_client.init()
|
||||
app.schedule.start()
|
||||
|
||||
for job in core_crons.cron_jobs + core_dynamic_crons.cron_jobs:
|
||||
app.schedule.add_job(id=job["func"].__name__, **job)
|
||||
|
||||
ap_logger.info(">Scheduled jobs:")
|
||||
for job in app.schedule.get_jobs():
|
||||
ap_logger.info({"Name": str(job.id), "Run Frequency": str(job.trigger), "Next Run": str(job.next_run_time)})
|
||||
|
||||
database = {
|
||||
"host": config("pg_host", default="localhost"),
|
||||
"dbname": config("pg_dbname", default="orpy"),
|
||||
"user": config("pg_user", default="orpy"),
|
||||
"password": config("pg_password", default="orpy"),
|
||||
"port": config("pg_port", cast=int, default=5432),
|
||||
"application_name": "AIO" + config("APP_NAME", default="PY"),
|
||||
}
|
||||
|
||||
database = psycopg_pool.AsyncConnectionPool(kwargs=database, connection_class=ORPYAsyncConnection,
|
||||
min_size=config("PG_AIO_MINCONN", cast=int, default=1),
|
||||
max_size=config("PG_AIO_MAXCONN", cast=int, default=5), )
|
||||
app.state.postgresql = database
|
||||
|
||||
# App listening
|
||||
yield
|
||||
|
||||
# Shutdown
|
||||
await database.close()
|
||||
logging.info(">>>>> shutting down <<<<<")
|
||||
app.schedule.shutdown(wait=False)
|
||||
await pg_client.terminate()
|
||||
|
||||
|
||||
app = FastAPI(root_path=config("root_path", default="/api"), docs_url=config("docs_url", default=""),
|
||||
redoc_url=config("redoc_url", default=""), lifespan=lifespan)
|
||||
app = FastAPI(root_path="/api", docs_url=config("docs_url", default=""), redoc_url=config("redoc_url", default=""))
|
||||
app.add_middleware(GZipMiddleware, minimum_size=1000)
|
||||
|
||||
|
||||
@app.middleware('http')
|
||||
async def or_middleware(request: Request, call_next):
|
||||
if helper.TRACK_TIME:
|
||||
now = time.time()
|
||||
try:
|
||||
response: StreamingResponse = await call_next(request)
|
||||
except:
|
||||
logging.error(f"{request.method}: {request.url.path} FAILED!")
|
||||
raise
|
||||
if response.status_code // 100 != 2:
|
||||
logging.warning(f"{request.method}:{request.url.path} {response.status_code}!")
|
||||
import time
|
||||
now = int(time.time() * 1000)
|
||||
response: StreamingResponse = await call_next(request)
|
||||
if helper.TRACK_TIME:
|
||||
now = time.time() - now
|
||||
if now > 2:
|
||||
now = round(now, 2)
|
||||
logging.warning(f"Execution time: {now} s for {request.method}: {request.url.path}")
|
||||
response.headers["x-robots-tag"] = 'noindex, nofollow'
|
||||
now = int(time.time() * 1000) - now
|
||||
if now > 500:
|
||||
logging.info(f"Execution time: {now} ms")
|
||||
return response
|
||||
|
||||
|
||||
|
|
@ -117,18 +51,39 @@ app.include_router(core_dynamic.app_apikey)
|
|||
app.include_router(metrics.app)
|
||||
app.include_router(insights.app)
|
||||
app.include_router(v1_api.app_apikey)
|
||||
app.include_router(health.public_app)
|
||||
app.include_router(health.app)
|
||||
app.include_router(health.app_apikey)
|
||||
|
||||
app.include_router(usability_tests.public_app)
|
||||
app.include_router(usability_tests.app)
|
||||
app.include_router(usability_tests.app_apikey)
|
||||
loglevel = config("LOGLEVEL", default=logging.INFO)
|
||||
print(f">Loglevel set to: {loglevel}")
|
||||
logging.basicConfig(level=loglevel)
|
||||
ap_logger = logging.getLogger('apscheduler')
|
||||
ap_logger.setLevel(loglevel)
|
||||
app.schedule = AsyncIOScheduler()
|
||||
|
||||
app.include_router(spot.public_app)
|
||||
app.include_router(spot.app)
|
||||
app.include_router(spot.app_apikey)
|
||||
|
||||
app.include_router(product_anaytics.public_app)
|
||||
app.include_router(product_anaytics.app)
|
||||
app.include_router(product_anaytics.app_apikey)
|
||||
@app.on_event("startup")
|
||||
async def startup():
|
||||
logging.info(">>>>> starting up <<<<<")
|
||||
await pg_client.init()
|
||||
app.schedule.start()
|
||||
|
||||
for job in core_crons.cron_jobs + core_dynamic_crons.cron_jobs:
|
||||
app.schedule.add_job(id=job["func"].__name__, **job)
|
||||
|
||||
ap_logger.info(">Scheduled jobs:")
|
||||
for job in app.schedule.get_jobs():
|
||||
ap_logger.info({"Name": str(job.id), "Run Frequency": str(job.trigger), "Next Run": str(job.next_run_time)})
|
||||
|
||||
|
||||
@app.on_event("shutdown")
|
||||
async def shutdown():
|
||||
logging.info(">>>>> shutting down <<<<<")
|
||||
app.schedule.shutdown(wait=False)
|
||||
await pg_client.terminate()
|
||||
|
||||
|
||||
@app.get('/private/shutdown', tags=["private"])
|
||||
async def stop_server():
|
||||
logging.info("Requested shutdown")
|
||||
await shutdown()
|
||||
import os, signal
|
||||
os.kill(1, signal.SIGTERM)
|
||||
|
|
|
|||
|
|
@ -1,18 +1,34 @@
|
|||
import logging
|
||||
from contextlib import asynccontextmanager
|
||||
|
||||
from apscheduler.schedulers.asyncio import AsyncIOScheduler
|
||||
from decouple import config
|
||||
from fastapi import FastAPI
|
||||
|
||||
from chalicelib.core.alerts import alerts_processor
|
||||
from chalicelib.utils import pg_client
|
||||
|
||||
from chalicelib.core import alerts_processor
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
# Startup
|
||||
ap_logger.info(">>>>> starting up <<<<<")
|
||||
app = FastAPI(root_path="/alerts", docs_url=config("docs_url", default=""), redoc_url=config("redoc_url", default=""))
|
||||
logging.info("============= ALERTS =============")
|
||||
|
||||
|
||||
@app.get("/")
|
||||
async def root():
|
||||
return {"status": "Running"}
|
||||
|
||||
|
||||
app.schedule = AsyncIOScheduler()
|
||||
|
||||
loglevel = config("LOGLEVEL", default=logging.INFO)
|
||||
print(f">Loglevel set to: {loglevel}")
|
||||
logging.basicConfig(level=loglevel)
|
||||
ap_logger = logging.getLogger('apscheduler')
|
||||
ap_logger.setLevel(loglevel)
|
||||
app.schedule = AsyncIOScheduler()
|
||||
|
||||
|
||||
@app.on_event("startup")
|
||||
async def startup():
|
||||
logging.info(">>>>> starting up <<<<<")
|
||||
await pg_client.init()
|
||||
app.schedule.start()
|
||||
app.schedule.add_job(id="alerts_processor", **{"func": alerts_processor.process, "trigger": "interval",
|
||||
|
|
@ -23,43 +39,24 @@ async def lifespan(app: FastAPI):
|
|||
for job in app.schedule.get_jobs():
|
||||
ap_logger.info({"Name": str(job.id), "Run Frequency": str(job.trigger), "Next Run": str(job.next_run_time)})
|
||||
|
||||
# App listening
|
||||
yield
|
||||
|
||||
# Shutdown
|
||||
ap_logger.info(">>>>> shutting down <<<<<")
|
||||
@app.on_event("shutdown")
|
||||
async def shutdown():
|
||||
logging.info(">>>>> shutting down <<<<<")
|
||||
app.schedule.shutdown(wait=False)
|
||||
await pg_client.terminate()
|
||||
|
||||
|
||||
loglevel = config("LOGLEVEL", default=logging.INFO)
|
||||
print(f">Loglevel set to: {loglevel}")
|
||||
logging.basicConfig(level=loglevel)
|
||||
ap_logger = logging.getLogger('apscheduler')
|
||||
ap_logger.setLevel(loglevel)
|
||||
|
||||
app = FastAPI(root_path=config("root_path", default="/alerts"), docs_url=config("docs_url", default=""),
|
||||
redoc_url=config("redoc_url", default=""), lifespan=lifespan)
|
||||
|
||||
app.schedule = AsyncIOScheduler()
|
||||
ap_logger.info("============= ALERTS =============")
|
||||
|
||||
|
||||
@app.get("/")
|
||||
async def root():
|
||||
return {"status": "Running"}
|
||||
|
||||
|
||||
@app.get("/health")
|
||||
async def get_health_status():
|
||||
return {"data": {
|
||||
"health": True,
|
||||
"details": {"version": config("version_number", default="unknown")}
|
||||
}}
|
||||
@app.get('/private/shutdown', tags=["private"])
|
||||
async def stop_server():
|
||||
logging.info("Requested shutdown")
|
||||
await shutdown()
|
||||
import os, signal
|
||||
os.kill(1, signal.SIGTERM)
|
||||
|
||||
|
||||
if config("LOCAL_DEV", default=False, cast=bool):
|
||||
@app.get('/trigger', tags=["private"])
|
||||
@app.get('/private/trigger', tags=["private"])
|
||||
async def trigger_main_cron():
|
||||
ap_logger.info("Triggering main cron")
|
||||
logging.info("Triggering main cron")
|
||||
alerts_processor.process()
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from fastapi import Request
|
||||
|
|
@ -9,8 +8,6 @@ from starlette.exceptions import HTTPException
|
|||
from chalicelib.core import authorizers
|
||||
from schemas import CurrentAPIContext
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class APIKeyAuth(APIKeyHeader):
|
||||
def __init__(self, auto_error: bool = True):
|
||||
|
|
@ -25,7 +22,7 @@ class APIKeyAuth(APIKeyHeader):
|
|||
detail="Invalid API Key",
|
||||
)
|
||||
r["authorizer_identity"] = "api_key"
|
||||
logger.debug(r)
|
||||
print(r)
|
||||
request.state.authorizer_identity = "api_key"
|
||||
request.state.currentContext = CurrentAPIContext(tenantId=r["tenantId"])
|
||||
request.state.currentContext = CurrentAPIContext(tenant_id=r["tenantId"])
|
||||
return request.state.currentContext
|
||||
|
|
|
|||
|
|
@ -1,30 +1,12 @@
|
|||
import datetime
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from decouple import config
|
||||
from fastapi import Request
|
||||
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
|
||||
from starlette import status
|
||||
from starlette.exceptions import HTTPException
|
||||
|
||||
from chalicelib.core import authorizers, users
|
||||
import schemas
|
||||
from chalicelib.core import authorizers, users, spot
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _get_current_auth_context(request: Request, jwt_payload: dict) -> schemas.CurrentContext:
|
||||
user = users.get(user_id=jwt_payload.get("userId", -1), tenant_id=jwt_payload.get("tenantId", -1))
|
||||
if user is None:
|
||||
logger.warning("User not found.")
|
||||
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="User not found.")
|
||||
request.state.authorizer_identity = "jwt"
|
||||
request.state.currentContext = schemas.CurrentContext(tenantId=jwt_payload.get("tenantId", -1),
|
||||
userId=jwt_payload.get("userId", -1),
|
||||
email=user["email"],
|
||||
role=user["role"])
|
||||
return request.state.currentContext
|
||||
|
||||
|
||||
class JWTAuth(HTTPBearer):
|
||||
|
|
@ -32,122 +14,46 @@ class JWTAuth(HTTPBearer):
|
|||
super(JWTAuth, self).__init__(auto_error=auto_error)
|
||||
|
||||
async def __call__(self, request: Request) -> Optional[schemas.CurrentContext]:
|
||||
if request.url.path in ["/refresh", "/api/refresh"]:
|
||||
return await self.__process_refresh_call(request)
|
||||
|
||||
elif request.url.path in ["/spot/refresh", "/api/spot/refresh"]:
|
||||
return await self.__process_spot_refresh_call(request)
|
||||
|
||||
else:
|
||||
credentials: HTTPAuthorizationCredentials = await super(JWTAuth, self).__call__(request)
|
||||
if credentials:
|
||||
if not credentials.scheme == "Bearer":
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="Invalid authentication scheme.")
|
||||
jwt_payload = authorizers.jwt_authorizer(scheme=credentials.scheme, token=credentials.credentials)
|
||||
auth_exists = jwt_payload is not None and users.auth_exists(user_id=jwt_payload.get("userId", -1),
|
||||
jwt_iat=jwt_payload.get("iat", 100))
|
||||
if jwt_payload is None \
|
||||
or jwt_payload.get("iat") is None or jwt_payload.get("aud") is None \
|
||||
or not auth_exists:
|
||||
if jwt_payload is not None:
|
||||
logger.debug(jwt_payload)
|
||||
if jwt_payload.get("iat") is None:
|
||||
logger.debug("iat is None")
|
||||
if jwt_payload.get("aud") is None:
|
||||
logger.debug("aud is None")
|
||||
if not auth_exists:
|
||||
logger.warning("not users.auth_exists")
|
||||
|
||||
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Invalid token or expired token.")
|
||||
|
||||
if jwt_payload.get("aud", "").startswith("spot") and not request.url.path.startswith("/spot"):
|
||||
# Allow access to spot endpoints only
|
||||
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Unauthorized access (spot).")
|
||||
elif jwt_payload.get("aud", "").startswith("front") and request.url.path.startswith("/spot"):
|
||||
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Unauthorized access endpoint reserved for Spot only.")
|
||||
|
||||
return _get_current_auth_context(request=request, jwt_payload=jwt_payload)
|
||||
|
||||
logger.warning("Invalid authorization code.")
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid authorization code.")
|
||||
|
||||
async def __process_refresh_call(self, request: Request) -> schemas.CurrentContext:
|
||||
if "refreshToken" not in request.cookies:
|
||||
logger.warning("Missing refreshToken cookie.")
|
||||
jwt_payload = None
|
||||
else:
|
||||
jwt_payload = authorizers.jwt_refresh_authorizer(scheme="Bearer", token=request.cookies["refreshToken"])
|
||||
|
||||
if jwt_payload is None or jwt_payload.get("jti") is None:
|
||||
logger.warning("Null refreshToken's payload, or null JTI.")
|
||||
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="Invalid refresh-token or expired refresh-token.")
|
||||
auth_exists = users.refresh_auth_exists(user_id=jwt_payload.get("userId", -1),
|
||||
jwt_jti=jwt_payload["jti"])
|
||||
if not auth_exists:
|
||||
logger.warning("refreshToken's user not found.")
|
||||
logger.warning(jwt_payload)
|
||||
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="Invalid refresh-token or expired refresh-token.")
|
||||
|
||||
credentials: HTTPAuthorizationCredentials = await super(JWTAuth, self).__call__(request)
|
||||
if credentials:
|
||||
if not credentials.scheme == "Bearer":
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="Invalid authentication scheme.")
|
||||
old_jwt_payload = authorizers.jwt_authorizer(scheme=credentials.scheme, token=credentials.credentials,
|
||||
leeway=datetime.timedelta(
|
||||
days=config("JWT_LEEWAY_DAYS", cast=int, default=3)
|
||||
))
|
||||
if old_jwt_payload is None \
|
||||
or old_jwt_payload.get("userId") is None \
|
||||
or old_jwt_payload.get("userId") != jwt_payload.get("userId"):
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid authentication scheme.")
|
||||
jwt_payload = authorizers.jwt_authorizer(credentials.scheme + " " + credentials.credentials)
|
||||
auth_exists = jwt_payload is not None \
|
||||
and users.auth_exists(user_id=jwt_payload.get("userId", -1),
|
||||
tenant_id=jwt_payload.get("tenantId", -1),
|
||||
jwt_iat=jwt_payload.get("iat", 100),
|
||||
jwt_aud=jwt_payload.get("aud", ""))
|
||||
if jwt_payload is None \
|
||||
or jwt_payload.get("iat") is None or jwt_payload.get("aud") is None \
|
||||
or not auth_exists:
|
||||
print("JWTAuth: Token issue")
|
||||
if jwt_payload is not None:
|
||||
print(jwt_payload)
|
||||
print(f"JWTAuth: user_id={jwt_payload.get('userId')} tenant_id={jwt_payload.get('tenantId')}")
|
||||
if jwt_payload is None:
|
||||
print("JWTAuth: jwt_payload is None")
|
||||
print(credentials.scheme + " " + credentials.credentials)
|
||||
if jwt_payload is not None and jwt_payload.get("iat") is None:
|
||||
print("JWTAuth: iat is None")
|
||||
if jwt_payload is not None and jwt_payload.get("aud") is None:
|
||||
print("JWTAuth: aud is None")
|
||||
if jwt_payload is not None and not auth_exists:
|
||||
print("JWTAuth: not users.auth_exists")
|
||||
|
||||
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Invalid token or expired token.")
|
||||
user = users.get(user_id=jwt_payload.get("userId", -1), tenant_id=jwt_payload.get("tenantId", -1))
|
||||
if user is None:
|
||||
print("JWTAuth: User not found.")
|
||||
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="User not found.")
|
||||
jwt_payload["authorizer_identity"] = "jwt"
|
||||
print(jwt_payload)
|
||||
request.state.authorizer_identity = "jwt"
|
||||
request.state.currentContext = schemas.CurrentContext(tenant_id=jwt_payload.get("tenantId", -1),
|
||||
user_id=jwt_payload.get("userId", -1),
|
||||
email=user["email"])
|
||||
return request.state.currentContext
|
||||
|
||||
return _get_current_auth_context(request=request, jwt_payload=jwt_payload)
|
||||
|
||||
logger.warning("Invalid authorization code (refresh logic).")
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid authorization code for refresh.")
|
||||
|
||||
async def __process_spot_refresh_call(self, request: Request) -> schemas.CurrentContext:
|
||||
if "spotRefreshToken" not in request.cookies:
|
||||
logger.warning("Missing soptRefreshToken cookie.")
|
||||
jwt_payload = None
|
||||
else:
|
||||
jwt_payload = authorizers.jwt_refresh_authorizer(scheme="Bearer", token=request.cookies["spotRefreshToken"])
|
||||
|
||||
if jwt_payload is None or jwt_payload.get("jti") is None:
|
||||
logger.warning("Null spotRefreshToken's payload, or null JTI.")
|
||||
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="Invalid spotRefreshToken or expired refresh-token.")
|
||||
auth_exists = spot.refresh_auth_exists(user_id=jwt_payload.get("userId", -1),
|
||||
jwt_jti=jwt_payload["jti"])
|
||||
if not auth_exists:
|
||||
logger.warning("spotRefreshToken's user not found.")
|
||||
logger.warning(jwt_payload)
|
||||
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="Invalid spotRefreshToken or expired refresh-token.")
|
||||
|
||||
credentials: HTTPAuthorizationCredentials = await super(JWTAuth, self).__call__(request)
|
||||
if credentials:
|
||||
if not credentials.scheme == "Bearer":
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="Invalid spot-authentication scheme.")
|
||||
old_jwt_payload = authorizers.jwt_authorizer(scheme=credentials.scheme, token=credentials.credentials,
|
||||
leeway=datetime.timedelta(
|
||||
days=config("JWT_LEEWAY_DAYS", cast=int, default=3)
|
||||
))
|
||||
if old_jwt_payload is None \
|
||||
or old_jwt_payload.get("userId") is None \
|
||||
or old_jwt_payload.get("userId") != jwt_payload.get("userId"):
|
||||
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="Invalid spot-token or expired token.")
|
||||
|
||||
return _get_current_auth_context(request=request, jwt_payload=jwt_payload)
|
||||
|
||||
logger.warning("Invalid authorization code (spot-refresh logic).")
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="Invalid authorization code for spot-refresh.")
|
||||
print("JWTAuth: Invalid authorization code.")
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid authorization code.")
|
||||
|
|
|
|||
|
|
@ -1,5 +1,3 @@
|
|||
import logging
|
||||
|
||||
from fastapi import Request
|
||||
from starlette import status
|
||||
from starlette.exceptions import HTTPException
|
||||
|
|
@ -8,8 +6,6 @@ import schemas
|
|||
from chalicelib.core import projects
|
||||
from or_dependencies import OR_context
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ProjectAuthorizer:
|
||||
def __init__(self, project_identifier):
|
||||
|
|
@ -20,19 +16,11 @@ class ProjectAuthorizer:
|
|||
return
|
||||
current_user: schemas.CurrentContext = await OR_context(request)
|
||||
value = request.path_params[self.project_identifier]
|
||||
current_project = None
|
||||
if self.project_identifier == "projectId" \
|
||||
and (isinstance(value, int) or isinstance(value, str) and value.isnumeric()):
|
||||
current_project = projects.get_project(project_id=value, tenant_id=current_user.tenant_id)
|
||||
elif self.project_identifier == "projectKey":
|
||||
current_project = projects.get_by_project_key(project_key=value)
|
||||
|
||||
if current_project is None:
|
||||
logger.debug(f"unauthorized project {self.project_identifier}:{value}")
|
||||
if (self.project_identifier == "projectId" \
|
||||
and (not (isinstance(value, int) or isinstance(value, str) and value.isnumeric())
|
||||
or projects.get_project(project_id=value, tenant_id=current_user.tenant_id) is None)) \
|
||||
or (self.project_identifier == "projectKey" \
|
||||
and projects.get_internal_project_id(project_key=value) is None):
|
||||
print("project not found")
|
||||
print(value)
|
||||
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="project not found.")
|
||||
else:
|
||||
current_project = schemas.ProjectContext(projectId=current_project["projectId"],
|
||||
projectKey=current_project["projectKey"],
|
||||
platform=current_project["platform"],
|
||||
name=current_project["name"])
|
||||
request.state.currentContext.project = current_project
|
||||
|
|
|
|||
62
api/build.sh
62
api/build.sh
|
|
@ -9,20 +9,16 @@
|
|||
|
||||
# Helper function
|
||||
exit_err() {
|
||||
err_code=$1
|
||||
if [[ $err_code != 0 ]]; then
|
||||
exit $err_code
|
||||
fi
|
||||
err_code=$1
|
||||
if [[ err_code != 0 ]]; then
|
||||
exit $err_code
|
||||
fi
|
||||
}
|
||||
|
||||
source ../scripts/lib/_docker.sh
|
||||
ARCH=${ARCH:-'amd64'}
|
||||
|
||||
environment=$1
|
||||
git_sha=$(git rev-parse --short HEAD)
|
||||
image_tag=${IMAGE_TAG:-git_sha}
|
||||
envarg="default-foss"
|
||||
chart="chalice"
|
||||
check_prereq() {
|
||||
which docker || {
|
||||
echo "Docker not installed, please install docker."
|
||||
|
|
@ -31,36 +27,13 @@ check_prereq() {
|
|||
return
|
||||
}
|
||||
|
||||
[[ $1 == ee ]] && ee=true
|
||||
[[ $PATCH -eq 1 ]] && {
|
||||
image_tag="$(grep -ER ^.ppVersion ../scripts/helmcharts/openreplay/charts/$chart | xargs | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
|
||||
[[ $ee == "true" ]] && {
|
||||
image_tag="${image_tag}-ee"
|
||||
}
|
||||
}
|
||||
update_helm_release() {
|
||||
[[ $ee == "true" ]] && return
|
||||
HELM_TAG="$(grep -iER ^version ../scripts/helmcharts/openreplay/charts/$chart | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
|
||||
# Update the chart version
|
||||
sed -i "s#^version.*#version: $HELM_TAG# g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
|
||||
# Update image tags
|
||||
sed -i "s#ppVersion.*#ppVersion: \"$image_tag\"#g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
|
||||
# Commit the changes
|
||||
git add ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
|
||||
git commit -m "chore(helm): Updating $chart image release"
|
||||
}
|
||||
|
||||
function build_api() {
|
||||
function build_api(){
|
||||
destination="_api"
|
||||
[[ $1 == "ee" ]] && {
|
||||
destination="_api_ee"
|
||||
}
|
||||
[[ -d ../${destination} ]] && {
|
||||
echo "Removing previous build cache"
|
||||
rm -rf ../${destination}
|
||||
}
|
||||
cp -R ../api ../${destination}
|
||||
cd ../${destination} || exit_err 100
|
||||
cd ../${destination}
|
||||
tag=""
|
||||
# Copy enterprise code
|
||||
[[ $1 == "ee" ]] && {
|
||||
|
|
@ -69,16 +42,16 @@ function build_api() {
|
|||
tag="ee-"
|
||||
}
|
||||
mv Dockerfile.dockerignore .dockerignore
|
||||
docker build -f ./Dockerfile --platform linux/${ARCH} --build-arg envarg=$envarg --build-arg GIT_SHA=$git_sha -t ${DOCKER_REPO:-'local'}/${IMAGE_NAME:-'chalice'}:${image_tag} .
|
||||
cd ../api || exit_err 100
|
||||
docker build -f ./Dockerfile --build-arg envarg=$envarg --build-arg GIT_SHA=$git_sha -t ${DOCKER_REPO:-'local'}/chalice:${image_tag} .
|
||||
cd ../api
|
||||
rm -rf ../${destination}
|
||||
[[ $PUSH_IMAGE -eq 1 ]] && {
|
||||
docker push ${DOCKER_REPO:-'local'}/${IMAGE_NAME:-'chalice'}:${image_tag}
|
||||
docker tag ${DOCKER_REPO:-'local'}/${IMAGE_NAME:-'chalice'}:${image_tag} ${DOCKER_REPO:-'local'}/chalice:${tag}latest
|
||||
docker push ${DOCKER_REPO:-'local'}/${IMAGE_NAME:-'chalice'}:${tag}latest
|
||||
docker push ${DOCKER_REPO:-'local'}/chalice:${image_tag}
|
||||
docker tag ${DOCKER_REPO:-'local'}/chalice:${image_tag} ${DOCKER_REPO:-'local'}/chalice:${tag}latest
|
||||
docker push ${DOCKER_REPO:-'local'}/chalice:${tag}latest
|
||||
}
|
||||
[[ $SIGN_IMAGE -eq 1 ]] && {
|
||||
cosign sign --key $SIGN_KEY ${DOCKER_REPO:-'local'}/${IMAGE_NAME:-'chalice'}:${image_tag}
|
||||
cosign sign --key $SIGN_KEY ${DOCKER_REPO:-'local'}/chalice:${image_tag}
|
||||
}
|
||||
echo "api docker build completed"
|
||||
}
|
||||
|
|
@ -86,6 +59,11 @@ function build_api() {
|
|||
check_prereq
|
||||
build_api $environment
|
||||
echo buil_complete
|
||||
if [[ $PATCH -eq 1 ]]; then
|
||||
update_helm_release
|
||||
fi
|
||||
#IMAGE_TAG=$IMAGE_TAG PUSH_IMAGE=$PUSH_IMAGE DOCKER_REPO=$DOCKER_REPO SIGN_IMAGE=$SIGN_IMAGE SIGN_KEY=$SIGN_KEY bash build_alerts.sh $1
|
||||
#
|
||||
#[[ $environment == "ee" ]] && {
|
||||
# cp ../ee/api/build_crons.sh .
|
||||
# IMAGE_TAG=$IMAGE_TAG PUSH_IMAGE=$PUSH_IMAGE DOCKER_REPO=$DOCKER_REPO SIGN_IMAGE=$SIGN_IMAGE SIGN_KEY=$SIGN_KEY bash build_crons.sh $1
|
||||
# exit_err $?
|
||||
# rm build_crons.sh
|
||||
#} || true
|
||||
|
|
|
|||
|
|
@ -10,7 +10,6 @@
|
|||
git_sha=$(git rev-parse --short HEAD)
|
||||
image_tag=${IMAGE_TAG:-git_sha}
|
||||
envarg="default-foss"
|
||||
source ../scripts/lib/_docker.sh
|
||||
check_prereq() {
|
||||
which docker || {
|
||||
echo "Docker not installed, please install docker."
|
||||
|
|
@ -18,26 +17,7 @@ check_prereq() {
|
|||
}
|
||||
}
|
||||
|
||||
[[ $1 == ee ]] && ee=true
|
||||
[[ $PATCH -eq 1 ]] && {
|
||||
image_tag="$(grep -ER ^.ppVersion ../scripts/helmcharts/openreplay/charts/$chart | xargs | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
|
||||
[[ $ee == "true" ]] && {
|
||||
image_tag="${image_tag}-ee"
|
||||
}
|
||||
}
|
||||
update_helm_release() {
|
||||
chart=$1
|
||||
HELM_TAG="$(grep -iER ^version ../scripts/helmcharts/openreplay/charts/$chart | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
|
||||
# Update the chart version
|
||||
sed -i "s#^version.*#version: $HELM_TAG# g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
|
||||
# Update image tags
|
||||
sed -i "s#ppVersion.*#ppVersion: \"$image_tag\"#g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
|
||||
# Commit the changes
|
||||
git add ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
|
||||
git commit -m "chore(helm): Updating $chart image release"
|
||||
}
|
||||
|
||||
function build_alerts() {
|
||||
function build_alerts(){
|
||||
destination="_alerts"
|
||||
[[ $1 == "ee" ]] && {
|
||||
destination="_alerts_ee"
|
||||
|
|
@ -52,7 +32,7 @@ function build_alerts() {
|
|||
tag="ee-"
|
||||
}
|
||||
mv Dockerfile_alerts.dockerignore .dockerignore
|
||||
docker build -f ./Dockerfile_alerts --platform linux/${ARCH:-"amd64"} --build-arg envarg=$envarg --build-arg GIT_SHA=$git_sha -t ${DOCKER_REPO:-'local'}/alerts:${image_tag} .
|
||||
docker build -f ./Dockerfile_alerts --build-arg envarg=$envarg --build-arg GIT_SHA=$git_sha -t ${DOCKER_REPO:-'local'}/alerts:${image_tag} .
|
||||
cd ../api
|
||||
rm -rf ../${destination}
|
||||
[[ $PUSH_IMAGE -eq 1 ]] && {
|
||||
|
|
@ -68,6 +48,3 @@ function build_alerts() {
|
|||
|
||||
check_prereq
|
||||
build_alerts $1
|
||||
if [[ $PATCH -eq 1 ]]; then
|
||||
update_helm_release alerts
|
||||
fi
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@
|
|||
|
||||
git_sha1=${IMAGE_TAG:-$(git rev-parse HEAD)}
|
||||
envarg="default-foss"
|
||||
source ../scripts/lib/_docker.sh
|
||||
check_prereq() {
|
||||
which docker || {
|
||||
echo "Docker not installed, please install docker."
|
||||
|
|
@ -18,7 +17,7 @@ check_prereq() {
|
|||
[[ exit -eq 1 ]] && exit 1
|
||||
}
|
||||
|
||||
function build_crons() {
|
||||
function build_crons(){
|
||||
destination="_crons_ee"
|
||||
cp -R ../api ../${destination}
|
||||
cd ../${destination}
|
||||
|
|
@ -29,7 +28,7 @@ function build_crons() {
|
|||
envarg="default-ee"
|
||||
tag="ee-"
|
||||
mv Dockerfile_crons.dockerignore .dockerignore
|
||||
docker build -f ./Dockerfile_crons --platform=linux/${ARCH:-'amd64'} --build-arg envarg=$envarg -t ${DOCKER_REPO:-'local'}/crons:${git_sha1} .
|
||||
docker build -f ./Dockerfile_crons --build-arg envarg=$envarg -t ${DOCKER_REPO:-'local'}/crons:${git_sha1} .
|
||||
cd ../api
|
||||
rm -rf ../${destination}
|
||||
[[ $PUSH_IMAGE -eq 1 ]] && {
|
||||
|
|
@ -46,7 +45,4 @@ function build_crons() {
|
|||
check_prereq
|
||||
[[ $1 == "ee" ]] && {
|
||||
build_crons $1
|
||||
} || {
|
||||
echo -e "Crons is only for ee. Rerun the script using \n bash $0 ee"
|
||||
exit 100
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1 @@
|
|||
from . import sessions as sessions_legacy
|
||||
236
api/chalicelib/core/alerts.py
Normal file
236
api/chalicelib/core/alerts.py
Normal file
|
|
@ -0,0 +1,236 @@
|
|||
import json
|
||||
import logging
|
||||
import time
|
||||
from datetime import datetime
|
||||
|
||||
from decouple import config
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import notifications, webhook
|
||||
from chalicelib.core.collaboration_msteams import MSTeams
|
||||
from chalicelib.core.collaboration_slack import Slack
|
||||
from chalicelib.utils import pg_client, helper, email_helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
|
||||
def get(id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
SELECT *
|
||||
FROM public.alerts
|
||||
WHERE alert_id =%(id)s;""",
|
||||
{"id": id})
|
||||
)
|
||||
a = helper.dict_to_camel_case(cur.fetchone())
|
||||
return helper.custom_alert_to_front(__process_circular(a))
|
||||
|
||||
|
||||
def get_all(project_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify("""\
|
||||
SELECT alerts.*,
|
||||
COALESCE(metrics.name || '.' || (COALESCE(metric_series.name, 'series ' || index)) || '.count',
|
||||
query ->> 'left') AS series_name
|
||||
FROM public.alerts
|
||||
LEFT JOIN metric_series USING (series_id)
|
||||
LEFT JOIN metrics USING (metric_id)
|
||||
WHERE alerts.project_id =%(project_id)s
|
||||
AND alerts.deleted_at ISNULL
|
||||
ORDER BY alerts.created_at;""",
|
||||
{"project_id": project_id})
|
||||
cur.execute(query=query)
|
||||
all = helper.list_to_camel_case(cur.fetchall())
|
||||
for i in range(len(all)):
|
||||
all[i] = helper.custom_alert_to_front(__process_circular(all[i]))
|
||||
return all
|
||||
|
||||
|
||||
def __process_circular(alert):
|
||||
if alert is None:
|
||||
return None
|
||||
alert.pop("deletedAt")
|
||||
alert["createdAt"] = TimeUTC.datetime_to_timestamp(alert["createdAt"])
|
||||
return alert
|
||||
|
||||
|
||||
def create(project_id, data: schemas.AlertSchema):
|
||||
data = data.dict()
|
||||
data["query"] = json.dumps(data["query"])
|
||||
data["options"] = json.dumps(data["options"])
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
INSERT INTO public.alerts(project_id, name, description, detection_method, query, options, series_id, change)
|
||||
VALUES (%(project_id)s, %(name)s, %(description)s, %(detection_method)s, %(query)s, %(options)s::jsonb, %(series_id)s, %(change)s)
|
||||
RETURNING *;""",
|
||||
{"project_id": project_id, **data})
|
||||
)
|
||||
a = helper.dict_to_camel_case(cur.fetchone())
|
||||
return {"data": helper.custom_alert_to_front(helper.dict_to_camel_case(__process_circular(a)))}
|
||||
|
||||
|
||||
def update(id, data: schemas.AlertSchema):
|
||||
data = data.dict()
|
||||
data["query"] = json.dumps(data["query"])
|
||||
data["options"] = json.dumps(data["options"])
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify("""\
|
||||
UPDATE public.alerts
|
||||
SET name = %(name)s,
|
||||
description = %(description)s,
|
||||
active = TRUE,
|
||||
detection_method = %(detection_method)s,
|
||||
query = %(query)s,
|
||||
options = %(options)s,
|
||||
series_id = %(series_id)s,
|
||||
change = %(change)s
|
||||
WHERE alert_id =%(id)s AND deleted_at ISNULL
|
||||
RETURNING *;""",
|
||||
{"id": id, **data})
|
||||
cur.execute(query=query)
|
||||
a = helper.dict_to_camel_case(cur.fetchone())
|
||||
return {"data": helper.custom_alert_to_front(__process_circular(a))}
|
||||
|
||||
|
||||
def process_notifications(data):
|
||||
full = {}
|
||||
for n in data:
|
||||
if "message" in n["options"]:
|
||||
webhook_data = {}
|
||||
if "data" in n["options"]:
|
||||
webhook_data = n["options"].pop("data")
|
||||
for c in n["options"].pop("message"):
|
||||
if c["type"] not in full:
|
||||
full[c["type"]] = []
|
||||
if c["type"] in ["slack", "msteams", "email"]:
|
||||
full[c["type"]].append({
|
||||
"notification": n,
|
||||
"destination": c["value"]
|
||||
})
|
||||
elif c["type"] in ["webhook"]:
|
||||
full[c["type"]].append({"data": webhook_data, "destination": c["value"]})
|
||||
notifications.create(data)
|
||||
BATCH_SIZE = 200
|
||||
for t in full.keys():
|
||||
for i in range(0, len(full[t]), BATCH_SIZE):
|
||||
notifications_list = full[t][i:i + BATCH_SIZE]
|
||||
if notifications_list is None or len(notifications_list) == 0:
|
||||
break
|
||||
|
||||
if t == "slack":
|
||||
try:
|
||||
send_to_slack_batch(notifications_list=notifications_list)
|
||||
except Exception as e:
|
||||
logging.error("!!!Error while sending slack notifications batch")
|
||||
logging.error(str(e))
|
||||
elif t == "msteams":
|
||||
try:
|
||||
send_to_msteams_batch(notifications_list=notifications_list)
|
||||
except Exception as e:
|
||||
logging.error("!!!Error while sending msteams notifications batch")
|
||||
logging.error(str(e))
|
||||
elif t == "email":
|
||||
try:
|
||||
send_by_email_batch(notifications_list=notifications_list)
|
||||
except Exception as e:
|
||||
logging.error("!!!Error while sending email notifications batch")
|
||||
logging.error(str(e))
|
||||
elif t == "webhook":
|
||||
try:
|
||||
webhook.trigger_batch(data_list=notifications_list)
|
||||
except Exception as e:
|
||||
logging.error("!!!Error while sending webhook notifications batch")
|
||||
logging.error(str(e))
|
||||
|
||||
|
||||
def send_by_email(notification, destination):
|
||||
if notification is None:
|
||||
return
|
||||
email_helper.alert_email(recipients=destination,
|
||||
subject=f'"{notification["title"]}" has been triggered',
|
||||
data={
|
||||
"message": f'"{notification["title"]}" {notification["description"]}',
|
||||
"project_id": notification["options"]["projectId"]})
|
||||
|
||||
|
||||
def send_by_email_batch(notifications_list):
|
||||
if not helper.has_smtp():
|
||||
logging.info("no SMTP configuration for email notifications")
|
||||
if notifications_list is None or len(notifications_list) == 0:
|
||||
logging.info("no email notifications")
|
||||
return
|
||||
for n in notifications_list:
|
||||
send_by_email(notification=n.get("notification"), destination=n.get("destination"))
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
def send_to_slack_batch(notifications_list):
|
||||
webhookId_map = {}
|
||||
for n in notifications_list:
|
||||
if n.get("destination") not in webhookId_map:
|
||||
webhookId_map[n.get("destination")] = {"tenantId": n["notification"]["tenantId"], "batch": []}
|
||||
webhookId_map[n.get("destination")]["batch"].append({"text": n["notification"]["description"] \
|
||||
+ f"\n<{config('SITE_URL')}{n['notification']['buttonUrl']}|{n['notification']['buttonText']}>",
|
||||
"title": n["notification"]["title"],
|
||||
"title_link": n["notification"]["buttonUrl"],
|
||||
"ts": datetime.now().timestamp()})
|
||||
for batch in webhookId_map.keys():
|
||||
Slack.send_batch(tenant_id=webhookId_map[batch]["tenantId"], webhook_id=batch,
|
||||
attachments=webhookId_map[batch]["batch"])
|
||||
|
||||
|
||||
def send_to_msteams_batch(notifications_list):
|
||||
webhookId_map = {}
|
||||
for n in notifications_list:
|
||||
if n.get("destination") not in webhookId_map:
|
||||
webhookId_map[n.get("destination")] = {"tenantId": n["notification"]["tenantId"], "batch": []}
|
||||
|
||||
link = f"[{n['notification']['buttonText']}]({config('SITE_URL')}{n['notification']['buttonUrl']})"
|
||||
webhookId_map[n.get("destination")]["batch"].append({"type": "ColumnSet",
|
||||
"style": "emphasis",
|
||||
"separator": True,
|
||||
"bleed": True,
|
||||
"columns": [{
|
||||
"width": "stretch",
|
||||
"items": [
|
||||
{"type": "TextBlock",
|
||||
"text": n["notification"]["title"],
|
||||
"style": "heading",
|
||||
"size": "Large"},
|
||||
{"type": "TextBlock",
|
||||
"spacing": "small",
|
||||
"text": n["notification"]["description"],
|
||||
"wrap": True},
|
||||
{"type": "TextBlock",
|
||||
"spacing": "small",
|
||||
"text": link}
|
||||
]
|
||||
}]})
|
||||
for batch in webhookId_map.keys():
|
||||
MSTeams.send_batch(tenant_id=webhookId_map[batch]["tenantId"], webhook_id=batch,
|
||||
attachments=webhookId_map[batch]["batch"])
|
||||
|
||||
|
||||
def delete(project_id, alert_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(""" UPDATE public.alerts
|
||||
SET deleted_at = timezone('utc'::text, now()),
|
||||
active = FALSE
|
||||
WHERE alert_id = %(alert_id)s AND project_id=%(project_id)s;""",
|
||||
{"alert_id": alert_id, "project_id": project_id})
|
||||
)
|
||||
return {"data": {"state": "success"}}
|
||||
|
||||
|
||||
def get_predefined_values():
|
||||
values = [e.value for e in schemas.AlertColumn]
|
||||
values = [{"name": v, "value": v,
|
||||
"unit": "count" if v.endswith(".count") else "ms",
|
||||
"predefined": True,
|
||||
"metricId": None,
|
||||
"seriesId": None} for v in values if v != schemas.AlertColumn.custom]
|
||||
return values
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
import logging
|
||||
|
||||
from decouple import config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
if config("EXP_ALERTS", cast=bool, default=False):
|
||||
logging.info(">>> Using experimental alerts")
|
||||
from . import alerts_processor_ch as alerts_processor
|
||||
else:
|
||||
from . import alerts_processor as alerts_processor
|
||||
|
|
@ -1,235 +0,0 @@
|
|||
import json
|
||||
import logging
|
||||
import time
|
||||
from datetime import datetime
|
||||
|
||||
from decouple import config
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import notifications, webhook
|
||||
from chalicelib.core.collaborations.collaboration_msteams import MSTeams
|
||||
from chalicelib.core.collaborations.collaboration_slack import Slack
|
||||
from chalicelib.utils import pg_client, helper, email_helper, smtp
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get(id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
SELECT *
|
||||
FROM public.alerts
|
||||
WHERE alert_id =%(id)s;""",
|
||||
{"id": id})
|
||||
)
|
||||
a = helper.dict_to_camel_case(cur.fetchone())
|
||||
return helper.custom_alert_to_front(__process_circular(a))
|
||||
|
||||
|
||||
def get_all(project_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify("""\
|
||||
SELECT alerts.*,
|
||||
COALESCE(metrics.name || '.' || (COALESCE(metric_series.name, 'series ' || index)) || '.count',
|
||||
query ->> 'left') AS series_name
|
||||
FROM public.alerts
|
||||
LEFT JOIN metric_series USING (series_id)
|
||||
LEFT JOIN metrics USING (metric_id)
|
||||
WHERE alerts.project_id =%(project_id)s
|
||||
AND alerts.deleted_at ISNULL
|
||||
ORDER BY alerts.created_at;""",
|
||||
{"project_id": project_id})
|
||||
cur.execute(query=query)
|
||||
all = helper.list_to_camel_case(cur.fetchall())
|
||||
for i in range(len(all)):
|
||||
all[i] = helper.custom_alert_to_front(__process_circular(all[i]))
|
||||
return all
|
||||
|
||||
|
||||
def __process_circular(alert):
|
||||
if alert is None:
|
||||
return None
|
||||
alert.pop("deletedAt")
|
||||
alert["createdAt"] = TimeUTC.datetime_to_timestamp(alert["createdAt"])
|
||||
return alert
|
||||
|
||||
|
||||
def create(project_id, data: schemas.AlertSchema):
|
||||
data = data.model_dump()
|
||||
data["query"] = json.dumps(data["query"])
|
||||
data["options"] = json.dumps(data["options"])
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
INSERT INTO public.alerts(project_id, name, description, detection_method, query, options, series_id, change)
|
||||
VALUES (%(project_id)s, %(name)s, %(description)s, %(detection_method)s, %(query)s, %(options)s::jsonb, %(series_id)s, %(change)s)
|
||||
RETURNING *;""",
|
||||
{"project_id": project_id, **data})
|
||||
)
|
||||
a = helper.dict_to_camel_case(cur.fetchone())
|
||||
return {"data": helper.custom_alert_to_front(helper.dict_to_camel_case(__process_circular(a)))}
|
||||
|
||||
|
||||
def update(id, data: schemas.AlertSchema):
|
||||
data = data.model_dump()
|
||||
data["query"] = json.dumps(data["query"])
|
||||
data["options"] = json.dumps(data["options"])
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify("""\
|
||||
UPDATE public.alerts
|
||||
SET name = %(name)s,
|
||||
description = %(description)s,
|
||||
active = TRUE,
|
||||
detection_method = %(detection_method)s,
|
||||
query = %(query)s,
|
||||
options = %(options)s,
|
||||
series_id = %(series_id)s,
|
||||
change = %(change)s
|
||||
WHERE alert_id =%(id)s AND deleted_at ISNULL
|
||||
RETURNING *;""",
|
||||
{"id": id, **data})
|
||||
cur.execute(query=query)
|
||||
a = helper.dict_to_camel_case(cur.fetchone())
|
||||
return {"data": helper.custom_alert_to_front(__process_circular(a))}
|
||||
|
||||
|
||||
def process_notifications(data):
|
||||
full = {}
|
||||
for n in data:
|
||||
if "message" in n["options"]:
|
||||
webhook_data = {}
|
||||
if "data" in n["options"]:
|
||||
webhook_data = n["options"].pop("data")
|
||||
for c in n["options"].pop("message"):
|
||||
if c["type"] not in full:
|
||||
full[c["type"]] = []
|
||||
if c["type"] in ["slack", "msteams", "email"]:
|
||||
full[c["type"]].append({
|
||||
"notification": n,
|
||||
"destination": c["value"]
|
||||
})
|
||||
elif c["type"] in ["webhook"]:
|
||||
full[c["type"]].append({"data": webhook_data, "destination": c["value"]})
|
||||
notifications.create(data)
|
||||
BATCH_SIZE = 200
|
||||
for t in full.keys():
|
||||
for i in range(0, len(full[t]), BATCH_SIZE):
|
||||
notifications_list = full[t][i:min(i + BATCH_SIZE, len(full[t]))]
|
||||
if notifications_list is None or len(notifications_list) == 0:
|
||||
break
|
||||
|
||||
if t == "slack":
|
||||
try:
|
||||
send_to_slack_batch(notifications_list=notifications_list)
|
||||
except Exception as e:
|
||||
logger.error("!!!Error while sending slack notifications batch")
|
||||
logger.error(str(e))
|
||||
elif t == "msteams":
|
||||
try:
|
||||
send_to_msteams_batch(notifications_list=notifications_list)
|
||||
except Exception as e:
|
||||
logger.error("!!!Error while sending msteams notifications batch")
|
||||
logger.error(str(e))
|
||||
elif t == "email":
|
||||
try:
|
||||
send_by_email_batch(notifications_list=notifications_list)
|
||||
except Exception as e:
|
||||
logger.error("!!!Error while sending email notifications batch")
|
||||
logger.error(str(e))
|
||||
elif t == "webhook":
|
||||
try:
|
||||
webhook.trigger_batch(data_list=notifications_list)
|
||||
except Exception as e:
|
||||
logger.error("!!!Error while sending webhook notifications batch")
|
||||
logger.error(str(e))
|
||||
|
||||
|
||||
def send_by_email(notification, destination):
|
||||
if notification is None:
|
||||
return
|
||||
email_helper.alert_email(recipients=destination,
|
||||
subject=f'"{notification["title"]}" has been triggered',
|
||||
data={
|
||||
"message": f'"{notification["title"]}" {notification["description"]}',
|
||||
"project_id": notification["options"]["projectId"]})
|
||||
|
||||
|
||||
def send_by_email_batch(notifications_list):
|
||||
if not smtp.has_smtp():
|
||||
logger.info("no SMTP configuration for email notifications")
|
||||
if notifications_list is None or len(notifications_list) == 0:
|
||||
logger.info("no email notifications")
|
||||
return
|
||||
for n in notifications_list:
|
||||
send_by_email(notification=n.get("notification"), destination=n.get("destination"))
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
def send_to_slack_batch(notifications_list):
|
||||
webhookId_map = {}
|
||||
for n in notifications_list:
|
||||
if n.get("destination") not in webhookId_map:
|
||||
webhookId_map[n.get("destination")] = {"tenantId": n["notification"]["tenantId"], "batch": []}
|
||||
webhookId_map[n.get("destination")]["batch"].append({"text": n["notification"]["description"] \
|
||||
+ f"\n<{config('SITE_URL')}{n['notification']['buttonUrl']}|{n['notification']['buttonText']}>",
|
||||
"title": n["notification"]["title"],
|
||||
"title_link": n["notification"]["buttonUrl"],
|
||||
"ts": datetime.now().timestamp()})
|
||||
for batch in webhookId_map.keys():
|
||||
Slack.send_batch(tenant_id=webhookId_map[batch]["tenantId"], webhook_id=batch,
|
||||
attachments=webhookId_map[batch]["batch"])
|
||||
|
||||
|
||||
def send_to_msteams_batch(notifications_list):
|
||||
webhookId_map = {}
|
||||
for n in notifications_list:
|
||||
if n.get("destination") not in webhookId_map:
|
||||
webhookId_map[n.get("destination")] = {"tenantId": n["notification"]["tenantId"], "batch": []}
|
||||
|
||||
link = f"{config('SITE_URL')}{n['notification']['buttonUrl']}"
|
||||
# for MSTeams, the batch is the list of `sections`
|
||||
webhookId_map[n.get("destination")]["batch"].append(
|
||||
{
|
||||
"activityTitle": n["notification"]["title"],
|
||||
"activitySubtitle": f"On Project *{n['notification']['projectName']}*",
|
||||
"facts": [
|
||||
{
|
||||
"name": "Target:",
|
||||
"value": link
|
||||
},
|
||||
{
|
||||
"name": "Description:",
|
||||
"value": n["notification"]["description"]
|
||||
}],
|
||||
"markdown": True
|
||||
}
|
||||
)
|
||||
for batch in webhookId_map.keys():
|
||||
MSTeams.send_batch(tenant_id=webhookId_map[batch]["tenantId"], webhook_id=batch,
|
||||
attachments=webhookId_map[batch]["batch"])
|
||||
|
||||
|
||||
def delete(project_id, alert_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(""" UPDATE public.alerts
|
||||
SET deleted_at = timezone('utc'::text, now()),
|
||||
active = FALSE
|
||||
WHERE alert_id = %(alert_id)s AND project_id=%(project_id)s;""",
|
||||
{"alert_id": alert_id, "project_id": project_id})
|
||||
)
|
||||
return {"data": {"state": "success"}}
|
||||
|
||||
|
||||
def get_predefined_values():
|
||||
values = [e.value for e in schemas.AlertColumn]
|
||||
values = [{"name": v, "value": v,
|
||||
"unit": "count" if v.endswith(".count") else "ms",
|
||||
"predefined": True,
|
||||
"metricId": None,
|
||||
"seriesId": None} for v in values if v != schemas.AlertColumn.CUSTOM]
|
||||
return values
|
||||
|
|
@ -1,33 +0,0 @@
|
|||
from chalicelib.core.alerts.modules import TENANT_ID
|
||||
from chalicelib.utils import pg_client, helper
|
||||
|
||||
|
||||
def get_all_alerts():
|
||||
with pg_client.PostgresClient(long_query=True) as cur:
|
||||
query = f"""SELECT {TENANT_ID} AS tenant_id,
|
||||
alert_id,
|
||||
projects.project_id,
|
||||
projects.name AS project_name,
|
||||
detection_method,
|
||||
query,
|
||||
options,
|
||||
(EXTRACT(EPOCH FROM alerts.created_at) * 1000)::BIGINT AS created_at,
|
||||
alerts.name,
|
||||
alerts.series_id,
|
||||
filter,
|
||||
change,
|
||||
COALESCE(metrics.name || '.' || (COALESCE(metric_series.name, 'series ' || index)) || '.count',
|
||||
query ->> 'left') AS series_name
|
||||
FROM public.alerts
|
||||
INNER JOIN projects USING (project_id)
|
||||
LEFT JOIN metric_series USING (series_id)
|
||||
LEFT JOIN metrics USING (metric_id)
|
||||
WHERE alerts.deleted_at ISNULL
|
||||
AND alerts.active
|
||||
AND projects.active
|
||||
AND projects.deleted_at ISNULL
|
||||
AND (alerts.series_id ISNULL OR metric_series.deleted_at ISNULL)
|
||||
ORDER BY alerts.created_at;"""
|
||||
cur.execute(query=query)
|
||||
all_alerts = helper.list_to_camel_case(cur.fetchall())
|
||||
return all_alerts
|
||||
|
|
@ -1,169 +0,0 @@
|
|||
import logging
|
||||
|
||||
from pydantic_core._pydantic_core import ValidationError
|
||||
|
||||
import schemas
|
||||
from chalicelib.core.alerts import alerts, alerts_listener
|
||||
from chalicelib.core.alerts.modules import alert_helpers
|
||||
from chalicelib.core.sessions import sessions_pg as sessions
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
LeftToDb = {
|
||||
schemas.AlertColumn.PERFORMANCE__DOM_CONTENT_LOADED__AVERAGE: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COALESCE(AVG(NULLIF(dom_content_loaded_time ,0)),0)"},
|
||||
schemas.AlertColumn.PERFORMANCE__FIRST_MEANINGFUL_PAINT__AVERAGE: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COALESCE(AVG(NULLIF(first_contentful_paint_time,0)),0)"},
|
||||
schemas.AlertColumn.PERFORMANCE__PAGE_LOAD_TIME__AVERAGE: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)", "formula": "AVG(NULLIF(load_time ,0))"},
|
||||
schemas.AlertColumn.PERFORMANCE__DOM_BUILD_TIME__AVERAGE: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "AVG(NULLIF(dom_building_time,0))"},
|
||||
schemas.AlertColumn.PERFORMANCE__SPEED_INDEX__AVERAGE: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)", "formula": "AVG(NULLIF(speed_index,0))"},
|
||||
schemas.AlertColumn.PERFORMANCE__PAGE_RESPONSE_TIME__AVERAGE: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "AVG(NULLIF(response_time,0))"},
|
||||
schemas.AlertColumn.PERFORMANCE__TTFB__AVERAGE: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "AVG(NULLIF(first_paint_time,0))"},
|
||||
schemas.AlertColumn.PERFORMANCE__TIME_TO_RENDER__AVERAGE: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "AVG(NULLIF(visually_complete,0))"},
|
||||
schemas.AlertColumn.PERFORMANCE__CRASHES__COUNT: {
|
||||
"table": "public.sessions",
|
||||
"formula": "COUNT(DISTINCT session_id)",
|
||||
"condition": "errors_count > 0 AND duration>0"},
|
||||
schemas.AlertColumn.ERRORS__JAVASCRIPT__COUNT: {
|
||||
"table": "events.errors INNER JOIN public.errors AS m_errors USING (error_id)",
|
||||
"formula": "COUNT(DISTINCT session_id)", "condition": "source='js_exception'", "joinSessions": False},
|
||||
schemas.AlertColumn.ERRORS__BACKEND__COUNT: {
|
||||
"table": "events.errors INNER JOIN public.errors AS m_errors USING (error_id)",
|
||||
"formula": "COUNT(DISTINCT session_id)", "condition": "source!='js_exception'", "joinSessions": False},
|
||||
}
|
||||
|
||||
|
||||
def Build(a):
|
||||
now = TimeUTC.now()
|
||||
params = {"project_id": a["projectId"], "now": now}
|
||||
full_args = {}
|
||||
j_s = True
|
||||
main_table = ""
|
||||
if a["seriesId"] is not None:
|
||||
a["filter"]["sort"] = "session_id"
|
||||
a["filter"]["order"] = schemas.SortOrderType.DESC
|
||||
a["filter"]["startDate"] = 0
|
||||
a["filter"]["endDate"] = TimeUTC.now()
|
||||
try:
|
||||
data = schemas.SessionsSearchPayloadSchema.model_validate(a["filter"])
|
||||
except ValidationError:
|
||||
logger.warning("Validation error for:")
|
||||
logger.warning(a["filter"])
|
||||
raise
|
||||
|
||||
full_args, query_part = sessions.search_query_parts(data=data, error_status=None, errors_only=False,
|
||||
issue=None, project_id=a["projectId"], user_id=None,
|
||||
favorite_only=False)
|
||||
subQ = f"""SELECT COUNT(session_id) AS value
|
||||
{query_part}"""
|
||||
else:
|
||||
colDef = LeftToDb[a["query"]["left"]]
|
||||
subQ = f"""SELECT {colDef["formula"]} AS value
|
||||
FROM {colDef["table"]}
|
||||
WHERE project_id = %(project_id)s
|
||||
{"AND " + colDef["condition"] if colDef.get("condition") else ""}"""
|
||||
j_s = colDef.get("joinSessions", True)
|
||||
main_table = colDef["table"]
|
||||
is_ss = main_table == "public.sessions"
|
||||
q = f"""SELECT coalesce(value,0) AS value, coalesce(value,0) {a["query"]["operator"]} {a["query"]["right"]} AS valid"""
|
||||
|
||||
if a["detectionMethod"] == schemas.AlertDetectionMethod.THRESHOLD:
|
||||
if a["seriesId"] is not None:
|
||||
q += f""" FROM ({subQ}) AS stat"""
|
||||
else:
|
||||
q += f""" FROM ({subQ} {"AND timestamp >= %(startDate)s AND timestamp <= %(now)s" if not is_ss else ""}
|
||||
{"AND start_ts >= %(startDate)s AND start_ts <= %(now)s" if j_s else ""}) AS stat"""
|
||||
params = {**params, **full_args, "startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000}
|
||||
else:
|
||||
if a["change"] == schemas.AlertDetectionType.CHANGE:
|
||||
if a["seriesId"] is not None:
|
||||
sub2 = subQ.replace("%(startDate)s", "%(timestamp_sub2)s").replace("%(endDate)s", "%(startDate)s")
|
||||
sub1 = f"SELECT (({subQ})-({sub2})) AS value"
|
||||
q += f" FROM ( {sub1} ) AS stat"
|
||||
params = {**params, **full_args,
|
||||
"startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000,
|
||||
"timestamp_sub2": TimeUTC.now() - 2 * a["options"]["currentPeriod"] * 60 * 1000}
|
||||
else:
|
||||
sub1 = f"""{subQ} {"AND timestamp >= %(startDate)s AND timestamp <= %(now)s" if not is_ss else ""}
|
||||
{"AND start_ts >= %(startDate)s AND start_ts <= %(now)s" if j_s else ""}"""
|
||||
params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000
|
||||
sub2 = f"""{subQ} {"AND timestamp < %(startDate)s AND timestamp >= %(timestamp_sub2)s" if not is_ss else ""}
|
||||
{"AND start_ts < %(startDate)s AND start_ts >= %(timestamp_sub2)s" if j_s else ""}"""
|
||||
params["timestamp_sub2"] = TimeUTC.now() - 2 * a["options"]["currentPeriod"] * 60 * 1000
|
||||
sub1 = f"SELECT (( {sub1} )-( {sub2} )) AS value"
|
||||
q += f" FROM ( {sub1} ) AS stat"
|
||||
|
||||
else:
|
||||
if a["seriesId"] is not None:
|
||||
sub2 = subQ.replace("%(startDate)s", "%(timestamp_sub2)s").replace("%(endDate)s", "%(startDate)s")
|
||||
sub1 = f"SELECT (({subQ})/NULLIF(({sub2}),0)-1)*100 AS value"
|
||||
q += f" FROM ({sub1}) AS stat"
|
||||
params = {**params, **full_args,
|
||||
"startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000,
|
||||
"timestamp_sub2": TimeUTC.now() \
|
||||
- (a["options"]["currentPeriod"] + a["options"]["currentPeriod"]) \
|
||||
* 60 * 1000}
|
||||
else:
|
||||
sub1 = f"""{subQ} {"AND timestamp >= %(startDate)s AND timestamp <= %(now)s" if not is_ss else ""}
|
||||
{"AND start_ts >= %(startDate)s AND start_ts <= %(now)s" if j_s else ""}"""
|
||||
params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000
|
||||
sub2 = f"""{subQ} {"AND timestamp < %(startDate)s AND timestamp >= %(timestamp_sub2)s" if not is_ss else ""}
|
||||
{"AND start_ts < %(startDate)s AND start_ts >= %(timestamp_sub2)s" if j_s else ""}"""
|
||||
params["timestamp_sub2"] = TimeUTC.now() \
|
||||
- (a["options"]["currentPeriod"] + a["options"]["currentPeriod"]) * 60 * 1000
|
||||
sub1 = f"SELECT (({sub1})/NULLIF(({sub2}),0)-1)*100 AS value"
|
||||
q += f" FROM ({sub1}) AS stat"
|
||||
|
||||
return q, params
|
||||
|
||||
|
||||
def process():
|
||||
logger.info("> processing alerts on PG")
|
||||
notifications = []
|
||||
all_alerts = alerts_listener.get_all_alerts()
|
||||
with pg_client.PostgresClient() as cur:
|
||||
for alert in all_alerts:
|
||||
if alert_helpers.can_check(alert):
|
||||
query, params = Build(alert)
|
||||
try:
|
||||
query = cur.mogrify(query, params)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"!!!Error while building alert query for alertId:{alert['alertId']} name: {alert['name']}")
|
||||
logger.error(e)
|
||||
continue
|
||||
logger.debug(alert)
|
||||
logger.debug(query)
|
||||
try:
|
||||
cur.execute(query)
|
||||
result = cur.fetchone()
|
||||
if result["valid"]:
|
||||
logger.info(f"Valid alert, notifying users, alertId:{alert['alertId']} name: {alert['name']}")
|
||||
notifications.append(alert_helpers.generate_notification(alert, result))
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"!!!Error while running alert query for alertId:{alert['alertId']} name: {alert['name']}")
|
||||
logger.error(query)
|
||||
logger.error(e)
|
||||
cur = cur.recreate(rollback=True)
|
||||
if len(notifications) > 0:
|
||||
cur.execute(
|
||||
cur.mogrify(f"""UPDATE public.alerts
|
||||
SET options = options||'{{"lastNotification":{TimeUTC.now()}}}'::jsonb
|
||||
WHERE alert_id IN %(ids)s;""", {"ids": tuple([n["alertId"] for n in notifications])}))
|
||||
if len(notifications) > 0:
|
||||
alerts.process_notifications(notifications)
|
||||
|
|
@ -1,195 +0,0 @@
|
|||
import logging
|
||||
|
||||
from pydantic_core._pydantic_core import ValidationError
|
||||
|
||||
import schemas
|
||||
from chalicelib.utils import pg_client, ch_client, exp_ch_helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.core.alerts import alerts, alerts_listener
|
||||
from chalicelib.core.alerts.modules import alert_helpers
|
||||
from chalicelib.core.sessions import sessions_ch as sessions
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
LeftToDb = {
|
||||
schemas.AlertColumn.PERFORMANCE__DOM_CONTENT_LOADED__AVERAGE: {
|
||||
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS pages",
|
||||
"formula": "COALESCE(AVG(NULLIF(dom_content_loaded_event_time ,0)),0)",
|
||||
"eventType": "LOCATION"
|
||||
},
|
||||
schemas.AlertColumn.PERFORMANCE__FIRST_MEANINGFUL_PAINT__AVERAGE: {
|
||||
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS pages",
|
||||
"formula": "COALESCE(AVG(NULLIF(first_contentful_paint_time,0)),0)",
|
||||
"eventType": "LOCATION"
|
||||
},
|
||||
schemas.AlertColumn.PERFORMANCE__PAGE_LOAD_TIME__AVERAGE: {
|
||||
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS pages",
|
||||
"formula": "AVG(NULLIF(load_event_time ,0))",
|
||||
"eventType": "LOCATION"
|
||||
},
|
||||
schemas.AlertColumn.PERFORMANCE__DOM_BUILD_TIME__AVERAGE: {
|
||||
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS pages",
|
||||
"formula": "AVG(NULLIF(dom_building_time,0))",
|
||||
"eventType": "LOCATION"
|
||||
},
|
||||
schemas.AlertColumn.PERFORMANCE__SPEED_INDEX__AVERAGE: {
|
||||
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS pages",
|
||||
"formula": "AVG(NULLIF(speed_index,0))",
|
||||
"eventType": "LOCATION"
|
||||
},
|
||||
schemas.AlertColumn.PERFORMANCE__PAGE_RESPONSE_TIME__AVERAGE: {
|
||||
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS pages",
|
||||
"formula": "AVG(NULLIF(response_time,0))",
|
||||
"eventType": "LOCATION"
|
||||
},
|
||||
schemas.AlertColumn.PERFORMANCE__TTFB__AVERAGE: {
|
||||
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS pages",
|
||||
"formula": "AVG(NULLIF(first_contentful_paint_time,0))",
|
||||
"eventType": "LOCATION"
|
||||
},
|
||||
schemas.AlertColumn.PERFORMANCE__TIME_TO_RENDER__AVERAGE: {
|
||||
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS pages",
|
||||
"formula": "AVG(NULLIF(visually_complete,0))",
|
||||
"eventType": "LOCATION"
|
||||
},
|
||||
schemas.AlertColumn.PERFORMANCE__CRASHES__COUNT: {
|
||||
"table": lambda timestamp: f"{exp_ch_helper.get_main_sessions_table(timestamp)} AS sessions",
|
||||
"formula": "COUNT(DISTINCT session_id)",
|
||||
"condition": "duration>0 AND errors_count>0"
|
||||
},
|
||||
schemas.AlertColumn.ERRORS__JAVASCRIPT__COUNT: {
|
||||
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS errors",
|
||||
"eventType": "ERROR",
|
||||
"formula": "COUNT(DISTINCT session_id)",
|
||||
"condition": "source='js_exception'"
|
||||
},
|
||||
schemas.AlertColumn.ERRORS__BACKEND__COUNT: {
|
||||
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS errors",
|
||||
"eventType": "ERROR",
|
||||
"formula": "COUNT(DISTINCT session_id)",
|
||||
"condition": "source!='js_exception'"
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def Build(a):
|
||||
now = TimeUTC.now()
|
||||
params = {"project_id": a["projectId"], "now": now}
|
||||
full_args = {}
|
||||
if a["seriesId"] is not None:
|
||||
a["filter"]["sort"] = "session_id"
|
||||
a["filter"]["order"] = schemas.SortOrderType.DESC
|
||||
a["filter"]["startDate"] = 0
|
||||
a["filter"]["endDate"] = TimeUTC.now()
|
||||
try:
|
||||
data = schemas.SessionsSearchPayloadSchema.model_validate(a["filter"])
|
||||
except ValidationError:
|
||||
logger.warning("Validation error for:")
|
||||
logger.warning(a["filter"])
|
||||
raise
|
||||
|
||||
full_args, query_part = sessions.search_query_parts_ch(data=data, error_status=None, errors_only=False,
|
||||
issue=None, project_id=a["projectId"], user_id=None,
|
||||
favorite_only=False)
|
||||
subQ = f"""SELECT COUNT(session_id) AS value
|
||||
{query_part}"""
|
||||
else:
|
||||
colDef = LeftToDb[a["query"]["left"]]
|
||||
params["event_type"] = LeftToDb[a["query"]["left"]].get("eventType")
|
||||
subQ = f"""SELECT {colDef["formula"]} AS value
|
||||
FROM {colDef["table"](now)}
|
||||
WHERE project_id = %(project_id)s
|
||||
{"AND event_type=%(event_type)s" if params["event_type"] else ""}
|
||||
{"AND " + colDef["condition"] if colDef.get("condition") else ""}"""
|
||||
|
||||
q = f"""SELECT coalesce(value,0) AS value, coalesce(value,0) {a["query"]["operator"]} {a["query"]["right"]} AS valid"""
|
||||
|
||||
if a["detectionMethod"] == schemas.AlertDetectionMethod.THRESHOLD:
|
||||
if a["seriesId"] is not None:
|
||||
q += f""" FROM ({subQ}) AS stat"""
|
||||
else:
|
||||
q += f""" FROM ({subQ}
|
||||
AND datetime>=toDateTime(%(startDate)s/1000)
|
||||
AND datetime<=toDateTime(%(now)s/1000) ) AS stat"""
|
||||
params = {**params, **full_args, "startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000}
|
||||
else:
|
||||
if a["change"] == schemas.AlertDetectionType.CHANGE:
|
||||
if a["seriesId"] is not None:
|
||||
sub2 = subQ.replace("%(startDate)s", "%(timestamp_sub2)s").replace("%(endDate)s", "%(startDate)s")
|
||||
sub1 = f"SELECT (({subQ})-({sub2})) AS value"
|
||||
q += f" FROM ( {sub1} ) AS stat"
|
||||
params = {**params, **full_args,
|
||||
"startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000,
|
||||
"timestamp_sub2": TimeUTC.now() - 2 * a["options"]["currentPeriod"] * 60 * 1000}
|
||||
else:
|
||||
sub1 = f"""{subQ} AND datetime>=toDateTime(%(startDate)s/1000)
|
||||
AND datetime<=toDateTime(%(now)s/1000)"""
|
||||
params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000
|
||||
sub2 = f"""{subQ} AND datetime<toDateTime(%(startDate)s/1000)
|
||||
AND datetime>=toDateTime(%(timestamp_sub2)s/1000)"""
|
||||
params["timestamp_sub2"] = TimeUTC.now() - 2 * a["options"]["currentPeriod"] * 60 * 1000
|
||||
sub1 = f"SELECT (( {sub1} )-( {sub2} )) AS value"
|
||||
q += f" FROM ( {sub1} ) AS stat"
|
||||
|
||||
else:
|
||||
if a["seriesId"] is not None:
|
||||
sub2 = subQ.replace("%(startDate)s", "%(timestamp_sub2)s").replace("%(endDate)s", "%(startDate)s")
|
||||
sub1 = f"SELECT (({subQ})/NULLIF(({sub2}),0)-1)*100 AS value"
|
||||
q += f" FROM ({sub1}) AS stat"
|
||||
params = {**params, **full_args,
|
||||
"startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000,
|
||||
"timestamp_sub2": TimeUTC.now() \
|
||||
- (a["options"]["currentPeriod"] + a["options"]["currentPeriod"]) \
|
||||
* 60 * 1000}
|
||||
else:
|
||||
sub1 = f"""{subQ} AND datetime>=toDateTime(%(startDate)s/1000)
|
||||
AND datetime<=toDateTime(%(now)s/1000)"""
|
||||
params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000
|
||||
sub2 = f"""{subQ} AND datetime<toDateTime(%(startDate)s/1000)
|
||||
AND datetime>=toDateTime(%(timestamp_sub2)s/1000)"""
|
||||
params["timestamp_sub2"] = TimeUTC.now() \
|
||||
- (a["options"]["currentPeriod"] + a["options"]["currentPeriod"]) * 60 * 1000
|
||||
sub1 = f"SELECT (({sub1})/NULLIF(({sub2}),0)-1)*100 AS value"
|
||||
q += f" FROM ({sub1}) AS stat"
|
||||
|
||||
return q, params
|
||||
|
||||
|
||||
def process():
|
||||
logger.info("> processing alerts on CH")
|
||||
notifications = []
|
||||
all_alerts = alerts_listener.get_all_alerts()
|
||||
with pg_client.PostgresClient() as cur, ch_client.ClickHouseClient() as ch_cur:
|
||||
for alert in all_alerts:
|
||||
if alert["query"]["left"] != "CUSTOM":
|
||||
continue
|
||||
if alert_helpers.can_check(alert):
|
||||
query, params = Build(alert)
|
||||
try:
|
||||
query = ch_cur.format(query=query, parameters=params)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"!!!Error while building alert query for alertId:{alert['alertId']} name: {alert['name']}")
|
||||
logger.error(e)
|
||||
continue
|
||||
logger.debug(alert)
|
||||
logger.debug(query)
|
||||
try:
|
||||
result = ch_cur.execute(query=query)
|
||||
if len(result) > 0:
|
||||
result = result[0]
|
||||
|
||||
if result["valid"]:
|
||||
logger.info("Valid alert, notifying users")
|
||||
notifications.append(alert_helpers.generate_notification(alert, result))
|
||||
except Exception as e:
|
||||
logger.error(f"!!!Error while running alert query for alertId:{alert['alertId']}")
|
||||
logger.error(str(e))
|
||||
logger.error(query)
|
||||
if len(notifications) > 0:
|
||||
cur.execute(
|
||||
cur.mogrify(f"""UPDATE public.alerts
|
||||
SET options = options||'{{"lastNotification":{TimeUTC.now()}}}'::jsonb
|
||||
WHERE alert_id IN %(ids)s;""", {"ids": tuple([n["alertId"] for n in notifications])}))
|
||||
if len(notifications) > 0:
|
||||
alerts.process_notifications(notifications)
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
TENANT_ID = "-1"
|
||||
|
||||
from . import helpers as alert_helpers
|
||||
|
|
@ -1,74 +0,0 @@
|
|||
import decimal
|
||||
import logging
|
||||
|
||||
import schemas
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
# This is the frequency of execution for each threshold
|
||||
TimeInterval = {
|
||||
15: 3,
|
||||
30: 5,
|
||||
60: 10,
|
||||
120: 20,
|
||||
240: 30,
|
||||
1440: 60,
|
||||
}
|
||||
|
||||
|
||||
def __format_value(x):
|
||||
if x % 1 == 0:
|
||||
x = int(x)
|
||||
else:
|
||||
x = round(x, 2)
|
||||
return f"{x:,}"
|
||||
|
||||
|
||||
def can_check(a) -> bool:
|
||||
now = TimeUTC.now()
|
||||
|
||||
repetitionBase = a["options"]["currentPeriod"] \
|
||||
if a["detectionMethod"] == schemas.AlertDetectionMethod.CHANGE \
|
||||
and a["options"]["currentPeriod"] > a["options"]["previousPeriod"] \
|
||||
else a["options"]["previousPeriod"]
|
||||
|
||||
if TimeInterval.get(repetitionBase) is None:
|
||||
logger.error(f"repetitionBase: {repetitionBase} NOT FOUND")
|
||||
return False
|
||||
|
||||
return (a["options"]["renotifyInterval"] <= 0 or
|
||||
a["options"].get("lastNotification") is None or
|
||||
a["options"]["lastNotification"] <= 0 or
|
||||
((now - a["options"]["lastNotification"]) > a["options"]["renotifyInterval"] * 60 * 1000)) \
|
||||
and ((now - a["createdAt"]) % (TimeInterval[repetitionBase] * 60 * 1000)) < 60 * 1000
|
||||
|
||||
|
||||
def generate_notification(alert, result):
|
||||
left = __format_value(result['value'])
|
||||
right = __format_value(alert['query']['right'])
|
||||
return {
|
||||
"alertId": alert["alertId"],
|
||||
"tenantId": alert["tenantId"],
|
||||
"title": alert["name"],
|
||||
"description": f"{alert['seriesName']} = {left} ({alert['query']['operator']} {right}).",
|
||||
"buttonText": "Check metrics for more details",
|
||||
"buttonUrl": f"/{alert['projectId']}/metrics",
|
||||
"imageUrl": None,
|
||||
"projectId": alert["projectId"],
|
||||
"projectName": alert["projectName"],
|
||||
"options": {"source": "ALERT", "sourceId": alert["alertId"],
|
||||
"sourceMeta": alert["detectionMethod"],
|
||||
"message": alert["options"]["message"], "projectId": alert["projectId"],
|
||||
"data": {"title": alert["name"],
|
||||
"limitValue": alert["query"]["right"],
|
||||
"actualValue": float(result["value"]) \
|
||||
if isinstance(result["value"], decimal.Decimal) \
|
||||
else result["value"],
|
||||
"operator": alert["query"]["operator"],
|
||||
"trigger": alert["query"]["left"],
|
||||
"alertId": alert["alertId"],
|
||||
"detectionMethod": alert["detectionMethod"],
|
||||
"currentPeriod": alert["options"]["currentPeriod"],
|
||||
"previousPeriod": alert["options"]["previousPeriod"],
|
||||
"createdAt": TimeUTC.now()}},
|
||||
}
|
||||
31
api/chalicelib/core/alerts_listener.py
Normal file
31
api/chalicelib/core/alerts_listener.py
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
from chalicelib.utils import pg_client, helper
|
||||
|
||||
|
||||
def get_all_alerts():
|
||||
with pg_client.PostgresClient(long_query=True) as cur:
|
||||
query = """SELECT -1 AS tenant_id,
|
||||
alert_id,
|
||||
projects.project_id,
|
||||
detection_method,
|
||||
query,
|
||||
options,
|
||||
(EXTRACT(EPOCH FROM alerts.created_at) * 1000)::BIGINT AS created_at,
|
||||
alerts.name,
|
||||
alerts.series_id,
|
||||
filter,
|
||||
change,
|
||||
COALESCE(metrics.name || '.' || (COALESCE(metric_series.name, 'series ' || index)) || '.count',
|
||||
query ->> 'left') AS series_name
|
||||
FROM public.alerts
|
||||
INNER JOIN projects USING (project_id)
|
||||
LEFT JOIN metric_series USING (series_id)
|
||||
LEFT JOIN metrics USING (metric_id)
|
||||
WHERE alerts.deleted_at ISNULL
|
||||
AND alerts.active
|
||||
AND projects.active
|
||||
AND projects.deleted_at ISNULL
|
||||
AND (alerts.series_id ISNULL OR metric_series.deleted_at ISNULL)
|
||||
ORDER BY alerts.created_at;"""
|
||||
cur.execute(query=query)
|
||||
all_alerts = helper.list_to_camel_case(cur.fetchall())
|
||||
return all_alerts
|
||||
251
api/chalicelib/core/alerts_processor.py
Normal file
251
api/chalicelib/core/alerts_processor.py
Normal file
|
|
@ -0,0 +1,251 @@
|
|||
import decimal
|
||||
import logging
|
||||
|
||||
from decouple import config
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import alerts_listener
|
||||
from chalicelib.core import sessions, alerts
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
logging.basicConfig(level=config("LOGLEVEL", default=logging.INFO))
|
||||
|
||||
LeftToDb = {
|
||||
schemas.AlertColumn.performance__dom_content_loaded__average: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COALESCE(AVG(NULLIF(dom_content_loaded_time ,0)),0)"},
|
||||
schemas.AlertColumn.performance__first_meaningful_paint__average: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COALESCE(AVG(NULLIF(first_contentful_paint_time,0)),0)"},
|
||||
schemas.AlertColumn.performance__page_load_time__average: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)", "formula": "AVG(NULLIF(load_time ,0))"},
|
||||
schemas.AlertColumn.performance__dom_build_time__average: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "AVG(NULLIF(dom_building_time,0))"},
|
||||
schemas.AlertColumn.performance__speed_index__average: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)", "formula": "AVG(NULLIF(speed_index,0))"},
|
||||
schemas.AlertColumn.performance__page_response_time__average: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "AVG(NULLIF(response_time,0))"},
|
||||
schemas.AlertColumn.performance__ttfb__average: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "AVG(NULLIF(first_paint_time,0))"},
|
||||
schemas.AlertColumn.performance__time_to_render__average: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "AVG(NULLIF(visually_complete,0))"},
|
||||
schemas.AlertColumn.performance__image_load_time__average: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "AVG(NULLIF(resources.duration,0))", "condition": "type='img'"},
|
||||
schemas.AlertColumn.performance__request_load_time__average: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "AVG(NULLIF(resources.duration,0))", "condition": "type='fetch'"},
|
||||
schemas.AlertColumn.resources__load_time__average: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "AVG(NULLIF(resources.duration,0))"},
|
||||
schemas.AlertColumn.resources__missing__count: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COUNT(DISTINCT url_hostpath)", "condition": "success= FALSE AND type='img'"},
|
||||
schemas.AlertColumn.errors__4xx_5xx__count: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)", "formula": "COUNT(session_id)",
|
||||
"condition": "status/100!=2"},
|
||||
schemas.AlertColumn.errors__4xx__count: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COUNT(session_id)", "condition": "status/100=4"},
|
||||
schemas.AlertColumn.errors__5xx__count: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COUNT(session_id)", "condition": "status/100=5"},
|
||||
schemas.AlertColumn.errors__javascript__impacted_sessions__count: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COUNT(DISTINCT session_id)", "condition": "success= FALSE AND type='script'"},
|
||||
schemas.AlertColumn.performance__crashes__count: {
|
||||
"table": "public.sessions",
|
||||
"formula": "COUNT(DISTINCT session_id)",
|
||||
"condition": "errors_count > 0 AND duration>0"},
|
||||
schemas.AlertColumn.errors__javascript__count: {
|
||||
"table": "events.errors INNER JOIN public.errors AS m_errors USING (error_id)",
|
||||
"formula": "COUNT(DISTINCT session_id)", "condition": "source='js_exception'", "joinSessions": False},
|
||||
schemas.AlertColumn.errors__backend__count: {
|
||||
"table": "events.errors INNER JOIN public.errors AS m_errors USING (error_id)",
|
||||
"formula": "COUNT(DISTINCT session_id)", "condition": "source!='js_exception'", "joinSessions": False},
|
||||
}
|
||||
|
||||
# This is the frequency of execution for each threshold
|
||||
TimeInterval = {
|
||||
15: 3,
|
||||
30: 5,
|
||||
60: 10,
|
||||
120: 20,
|
||||
240: 30,
|
||||
1440: 60,
|
||||
}
|
||||
|
||||
|
||||
def can_check(a) -> bool:
|
||||
now = TimeUTC.now()
|
||||
|
||||
repetitionBase = a["options"]["currentPeriod"] \
|
||||
if a["detectionMethod"] == schemas.AlertDetectionMethod.change \
|
||||
and a["options"]["currentPeriod"] > a["options"]["previousPeriod"] \
|
||||
else a["options"]["previousPeriod"]
|
||||
|
||||
if TimeInterval.get(repetitionBase) is None:
|
||||
logging.error(f"repetitionBase: {repetitionBase} NOT FOUND")
|
||||
return False
|
||||
|
||||
return (a["options"]["renotifyInterval"] <= 0 or
|
||||
a["options"].get("lastNotification") is None or
|
||||
a["options"]["lastNotification"] <= 0 or
|
||||
((now - a["options"]["lastNotification"]) > a["options"]["renotifyInterval"] * 60 * 1000)) \
|
||||
and ((now - a["createdAt"]) % (TimeInterval[repetitionBase] * 60 * 1000)) < 60 * 1000
|
||||
|
||||
|
||||
def Build(a):
|
||||
now = TimeUTC.now()
|
||||
params = {"project_id": a["projectId"], "now": now}
|
||||
full_args = {}
|
||||
j_s = True
|
||||
main_table = ""
|
||||
if a["seriesId"] is not None:
|
||||
a["filter"]["sort"] = "session_id"
|
||||
a["filter"]["order"] = schemas.SortOrderType.desc
|
||||
a["filter"]["startDate"] = -1
|
||||
a["filter"]["endDate"] = TimeUTC.now()
|
||||
full_args, query_part = sessions.search_query_parts(
|
||||
data=schemas.SessionsSearchPayloadSchema.parse_obj(a["filter"]), error_status=None, errors_only=False,
|
||||
issue=None, project_id=a["projectId"], user_id=None, favorite_only=False)
|
||||
subQ = f"""SELECT COUNT(session_id) AS value
|
||||
{query_part}"""
|
||||
else:
|
||||
colDef = LeftToDb[a["query"]["left"]]
|
||||
subQ = f"""SELECT {colDef["formula"]} AS value
|
||||
FROM {colDef["table"]}
|
||||
WHERE project_id = %(project_id)s
|
||||
{"AND " + colDef["condition"] if colDef.get("condition") else ""}"""
|
||||
j_s = colDef.get("joinSessions", True)
|
||||
main_table = colDef["table"]
|
||||
is_ss = main_table == "public.sessions"
|
||||
q = f"""SELECT coalesce(value,0) AS value, coalesce(value,0) {a["query"]["operator"]} {a["query"]["right"]} AS valid"""
|
||||
|
||||
if a["detectionMethod"] == schemas.AlertDetectionMethod.threshold:
|
||||
if a["seriesId"] is not None:
|
||||
q += f""" FROM ({subQ}) AS stat"""
|
||||
else:
|
||||
q += f""" FROM ({subQ} {"AND timestamp >= %(startDate)s AND timestamp <= %(now)s" if not is_ss else ""}
|
||||
{"AND start_ts >= %(startDate)s AND start_ts <= %(now)s" if j_s else ""}) AS stat"""
|
||||
params = {**params, **full_args, "startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000}
|
||||
else:
|
||||
if a["change"] == schemas.AlertDetectionType.change:
|
||||
if a["seriesId"] is not None:
|
||||
sub2 = subQ.replace("%(startDate)s", "%(timestamp_sub2)s").replace("%(endDate)s", "%(startDate)s")
|
||||
sub1 = f"SELECT (({subQ})-({sub2})) AS value"
|
||||
q += f" FROM ( {sub1} ) AS stat"
|
||||
params = {**params, **full_args,
|
||||
"startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000,
|
||||
"timestamp_sub2": TimeUTC.now() - 2 * a["options"]["currentPeriod"] * 60 * 1000}
|
||||
else:
|
||||
sub1 = f"""{subQ} {"AND timestamp >= %(startDate)s AND timestamp <= %(now)s" if not is_ss else ""}
|
||||
{"AND start_ts >= %(startDate)s AND start_ts <= %(now)s" if j_s else ""}"""
|
||||
params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000
|
||||
sub2 = f"""{subQ} {"AND timestamp < %(startDate)s AND timestamp >= %(timestamp_sub2)s" if not is_ss else ""}
|
||||
{"AND start_ts < %(startDate)s AND start_ts >= %(timestamp_sub2)s" if j_s else ""}"""
|
||||
params["timestamp_sub2"] = TimeUTC.now() - 2 * a["options"]["currentPeriod"] * 60 * 1000
|
||||
sub1 = f"SELECT (( {sub1} )-( {sub2} )) AS value"
|
||||
q += f" FROM ( {sub1} ) AS stat"
|
||||
|
||||
else:
|
||||
if a["seriesId"] is not None:
|
||||
sub2 = subQ.replace("%(startDate)s", "%(timestamp_sub2)s").replace("%(endDate)s", "%(startDate)s")
|
||||
sub1 = f"SELECT (({subQ})/NULLIF(({sub2}),0)-1)*100 AS value"
|
||||
q += f" FROM ({sub1}) AS stat"
|
||||
params = {**params, **full_args,
|
||||
"startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000,
|
||||
"timestamp_sub2": TimeUTC.now() \
|
||||
- (a["options"]["currentPeriod"] + a["options"]["currentPeriod"]) \
|
||||
* 60 * 1000}
|
||||
else:
|
||||
sub1 = f"""{subQ} {"AND timestamp >= %(startDate)s AND timestamp <= %(now)s" if not is_ss else ""}
|
||||
{"AND start_ts >= %(startDate)s AND start_ts <= %(now)s" if j_s else ""}"""
|
||||
params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000
|
||||
sub2 = f"""{subQ} {"AND timestamp < %(startDate)s AND timestamp >= %(timestamp_sub2)s" if not is_ss else ""}
|
||||
{"AND start_ts < %(startDate)s AND start_ts >= %(timestamp_sub2)s" if j_s else ""}"""
|
||||
params["timestamp_sub2"] = TimeUTC.now() \
|
||||
- (a["options"]["currentPeriod"] + a["options"]["currentPeriod"]) * 60 * 1000
|
||||
sub1 = f"SELECT (({sub1})/NULLIF(({sub2}),0)-1)*100 AS value"
|
||||
q += f" FROM ({sub1}) AS stat"
|
||||
|
||||
return q, params
|
||||
|
||||
|
||||
def process():
|
||||
notifications = []
|
||||
all_alerts = alerts_listener.get_all_alerts()
|
||||
with pg_client.PostgresClient() as cur:
|
||||
for alert in all_alerts:
|
||||
if can_check(alert):
|
||||
query, params = Build(alert)
|
||||
try:
|
||||
query = cur.mogrify(query, params)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
f"!!!Error while building alert query for alertId:{alert['alertId']} name: {alert['name']}")
|
||||
logging.error(e)
|
||||
continue
|
||||
logging.debug(alert)
|
||||
logging.debug(query)
|
||||
try:
|
||||
cur.execute(query)
|
||||
result = cur.fetchone()
|
||||
if result["valid"]:
|
||||
logging.info(f"Valid alert, notifying users, alertId:{alert['alertId']} name: {alert['name']}")
|
||||
notifications.append(generate_notification(alert, result))
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
f"!!!Error while running alert query for alertId:{alert['alertId']} name: {alert['name']}")
|
||||
logging.error(query)
|
||||
logging.error(e)
|
||||
cur = cur.recreate(rollback=True)
|
||||
if len(notifications) > 0:
|
||||
cur.execute(
|
||||
cur.mogrify(f"""UPDATE public.alerts
|
||||
SET options = options||'{{"lastNotification":{TimeUTC.now()}}}'::jsonb
|
||||
WHERE alert_id IN %(ids)s;""", {"ids": tuple([n["alertId"] for n in notifications])}))
|
||||
if len(notifications) > 0:
|
||||
alerts.process_notifications(notifications)
|
||||
|
||||
|
||||
def __format_value(x):
|
||||
if x % 1 == 0:
|
||||
x = int(x)
|
||||
else:
|
||||
x = round(x, 2)
|
||||
return f"{x:,}"
|
||||
|
||||
|
||||
def generate_notification(alert, result):
|
||||
left = __format_value(result['value'])
|
||||
right = __format_value(alert['query']['right'])
|
||||
return {
|
||||
"alertId": alert["alertId"],
|
||||
"tenantId": alert["tenantId"],
|
||||
"title": alert["name"],
|
||||
"description": f"has been triggered, {alert['seriesName']} = {left} ({alert['query']['operator']} {right}).",
|
||||
"buttonText": "Check metrics for more details",
|
||||
"buttonUrl": f"/{alert['projectId']}/metrics",
|
||||
"imageUrl": None,
|
||||
"options": {"source": "ALERT", "sourceId": alert["alertId"],
|
||||
"sourceMeta": alert["detectionMethod"],
|
||||
"message": alert["options"]["message"], "projectId": alert["projectId"],
|
||||
"data": {"title": alert["name"],
|
||||
"limitValue": alert["query"]["right"],
|
||||
"actualValue": float(result["value"]) \
|
||||
if isinstance(result["value"], decimal.Decimal) \
|
||||
else result["value"],
|
||||
"operator": alert["query"]["operator"],
|
||||
"trigger": alert["query"]["left"],
|
||||
"alertId": alert["alertId"],
|
||||
"detectionMethod": alert["detectionMethod"],
|
||||
"currentPeriod": alert["options"]["currentPeriod"],
|
||||
"previousPeriod": alert["options"]["previousPeriod"],
|
||||
"createdAt": TimeUTC.now()}},
|
||||
}
|
||||
|
|
@ -1,20 +1,32 @@
|
|||
import logging
|
||||
from os import access, R_OK
|
||||
from os.path import exists as path_exists, getsize
|
||||
|
||||
import jwt
|
||||
import requests
|
||||
from decouple import config
|
||||
from fastapi import HTTPException, status
|
||||
from starlette import status
|
||||
from fastapi import HTTPException
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import projects
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
ASSIST_KEY = config("ASSIST_KEY")
|
||||
ASSIST_URL = config("ASSIST_URL") % ASSIST_KEY
|
||||
SESSION_PROJECTION_COLS = """s.project_id,
|
||||
s.session_id::text AS session_id,
|
||||
s.user_uuid,
|
||||
s.user_id,
|
||||
s.user_agent,
|
||||
s.user_os,
|
||||
s.user_browser,
|
||||
s.user_device,
|
||||
s.user_device_type,
|
||||
s.user_country,
|
||||
s.start_ts,
|
||||
s.user_anonymous_id,
|
||||
s.platform
|
||||
"""
|
||||
|
||||
|
||||
def get_live_sessions_ws_user_id(project_id, user_id):
|
||||
|
|
@ -24,16 +36,6 @@ def get_live_sessions_ws_user_id(project_id, user_id):
|
|||
return __get_live_sessions_ws(project_id=project_id, data=data)
|
||||
|
||||
|
||||
def get_live_sessions_ws_test_id(project_id, test_id):
|
||||
data = {
|
||||
"filter": {
|
||||
'uxtId': test_id,
|
||||
'operator': 'is'
|
||||
}
|
||||
}
|
||||
return __get_live_sessions_ws(project_id=project_id, data=data)
|
||||
|
||||
|
||||
def get_live_sessions_ws(project_id, body: schemas.LiveSessionsSearchPayloadSchema):
|
||||
data = {
|
||||
"filter": {},
|
||||
|
|
@ -41,11 +43,11 @@ def get_live_sessions_ws(project_id, body: schemas.LiveSessionsSearchPayloadSche
|
|||
"sort": {"key": body.sort, "order": body.order}
|
||||
}
|
||||
for f in body.filters:
|
||||
if f.type == schemas.LiveFilterType.METADATA:
|
||||
if f.type == schemas.LiveFilterType.metadata:
|
||||
data["filter"][f.source] = {"values": f.value, "operator": f.operator}
|
||||
|
||||
else:
|
||||
data["filter"][f.type] = {"values": f.value, "operator": f.operator}
|
||||
data["filter"][f.type.value] = {"values": f.value, "operator": f.operator}
|
||||
return __get_live_sessions_ws(project_id=project_id, data=data)
|
||||
|
||||
|
||||
|
|
@ -55,21 +57,21 @@ def __get_live_sessions_ws(project_id, data):
|
|||
results = requests.post(ASSIST_URL + config("assist") + f"/{project_key}",
|
||||
json=data, timeout=config("assistTimeout", cast=int, default=5))
|
||||
if results.status_code != 200:
|
||||
logger.error(f"!! issue with the peer-server code:{results.status_code} for __get_live_sessions_ws")
|
||||
logger.error(results.text)
|
||||
print(f"!! issue with the peer-server code:{results.status_code}")
|
||||
print(results.text)
|
||||
return {"total": 0, "sessions": []}
|
||||
live_peers = results.json().get("data", [])
|
||||
except requests.exceptions.Timeout:
|
||||
logger.error("!! Timeout getting Assist response")
|
||||
print("!! Timeout getting Assist response")
|
||||
live_peers = {"total": 0, "sessions": []}
|
||||
except Exception as e:
|
||||
logger.error("!! Issue getting Live-Assist response")
|
||||
logger.exception(e)
|
||||
logger.error("expected JSON, received:")
|
||||
print("!! Issue getting Live-Assist response")
|
||||
print(str(e))
|
||||
print("expected JSON, received:")
|
||||
try:
|
||||
logger.error(results.text)
|
||||
print(results.text)
|
||||
except:
|
||||
logger.error("couldn't get response")
|
||||
print("couldn't get response")
|
||||
live_peers = {"total": 0, "sessions": []}
|
||||
_live_peers = live_peers
|
||||
if "sessions" in live_peers:
|
||||
|
|
@ -95,7 +97,7 @@ def __get_agent_token(project_id, project_key, session_id):
|
|||
"aud": f"openreplay:agent"
|
||||
},
|
||||
key=config("ASSIST_JWT_SECRET"),
|
||||
algorithm=config("JWT_ALGORITHM")
|
||||
algorithm=config("jwt_algorithm")
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -105,8 +107,8 @@ def get_live_session_by_id(project_id, session_id):
|
|||
results = requests.get(ASSIST_URL + config("assist") + f"/{project_key}/{session_id}",
|
||||
timeout=config("assistTimeout", cast=int, default=5))
|
||||
if results.status_code != 200:
|
||||
logger.error(f"!! issue with the peer-server code:{results.status_code} for get_live_session_by_id")
|
||||
logger.error(results.text)
|
||||
print(f"!! issue with the peer-server code:{results.status_code}")
|
||||
print(results.text)
|
||||
return None
|
||||
results = results.json().get("data")
|
||||
if results is None:
|
||||
|
|
@ -114,16 +116,16 @@ def get_live_session_by_id(project_id, session_id):
|
|||
results["live"] = True
|
||||
results["agentToken"] = __get_agent_token(project_id=project_id, project_key=project_key, session_id=session_id)
|
||||
except requests.exceptions.Timeout:
|
||||
logger.error("!! Timeout getting Assist response")
|
||||
print("!! Timeout getting Assist response")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error("!! Issue getting Assist response")
|
||||
logger.exception(e)
|
||||
logger.error("expected JSON, received:")
|
||||
print("!! Issue getting Assist response")
|
||||
print(str(e))
|
||||
print("expected JSON, received:")
|
||||
try:
|
||||
logger.error(results.text)
|
||||
print(results.text)
|
||||
except:
|
||||
logger.error("couldn't get response")
|
||||
print("couldn't get response")
|
||||
return None
|
||||
return results
|
||||
|
||||
|
|
@ -135,21 +137,21 @@ def is_live(project_id, session_id, project_key=None):
|
|||
results = requests.get(ASSIST_URL + config("assistList") + f"/{project_key}/{session_id}",
|
||||
timeout=config("assistTimeout", cast=int, default=5))
|
||||
if results.status_code != 200:
|
||||
logger.error(f"!! issue with the peer-server code:{results.status_code} for is_live")
|
||||
logger.error(results.text)
|
||||
print(f"!! issue with the peer-server code:{results.status_code}")
|
||||
print(results.text)
|
||||
return False
|
||||
results = results.json().get("data")
|
||||
except requests.exceptions.Timeout:
|
||||
logger.error("!! Timeout getting Assist response")
|
||||
print("!! Timeout getting Assist response")
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error("!! Issue getting Assist response")
|
||||
logger.exception(e)
|
||||
logger.error("expected JSON, received:")
|
||||
print("!! Issue getting Assist response")
|
||||
print(str(e))
|
||||
print("expected JSON, received:")
|
||||
try:
|
||||
logger.error(results.text)
|
||||
print(results.text)
|
||||
except:
|
||||
logger.error("couldn't get response")
|
||||
print("couldn't get response")
|
||||
return False
|
||||
return str(session_id) == results
|
||||
|
||||
|
|
@ -164,27 +166,32 @@ def autocomplete(project_id, q: str, key: str = None):
|
|||
ASSIST_URL + config("assistList") + f"/{project_key}/autocomplete",
|
||||
params=params, timeout=config("assistTimeout", cast=int, default=5))
|
||||
if results.status_code != 200:
|
||||
logger.error(f"!! issue with the peer-server code:{results.status_code} for autocomplete")
|
||||
logger.error(results.text)
|
||||
print(f"!! issue with the peer-server code:{results.status_code}")
|
||||
print(results.text)
|
||||
return {"errors": [f"Something went wrong wile calling assist:{results.text}"]}
|
||||
results = results.json().get("data", [])
|
||||
except requests.exceptions.Timeout:
|
||||
logger.error("!! Timeout getting Assist response")
|
||||
print("!! Timeout getting Assist response")
|
||||
return {"errors": ["Assist request timeout"]}
|
||||
except Exception as e:
|
||||
logger.error("!! Issue getting Assist response")
|
||||
logger.exception(e)
|
||||
logger.error("expected JSON, received:")
|
||||
print("!! Issue getting Assist response")
|
||||
print(str(e))
|
||||
print("expected JSON, received:")
|
||||
try:
|
||||
logger.error(results.text)
|
||||
print(results.text)
|
||||
except:
|
||||
logger.error("couldn't get response")
|
||||
print("couldn't get response")
|
||||
return {"errors": ["Something went wrong wile calling assist"]}
|
||||
for r in results:
|
||||
r["type"] = __change_keys(r["type"])
|
||||
return {"data": results}
|
||||
|
||||
|
||||
def get_ice_servers():
|
||||
return config("iceServers") if config("iceServers", default=None) is not None \
|
||||
and len(config("iceServers")) > 0 else None
|
||||
|
||||
|
||||
def __get_efs_path():
|
||||
efs_path = config("FS_DIR")
|
||||
if not path_exists(efs_path):
|
||||
|
|
@ -242,46 +249,46 @@ def session_exists(project_id, session_id):
|
|||
results = requests.get(ASSIST_URL + config("assist") + f"/{project_key}/{session_id}",
|
||||
timeout=config("assistTimeout", cast=int, default=5))
|
||||
if results.status_code != 200:
|
||||
logger.error(f"!! issue with the peer-server code:{results.status_code} for session_exists")
|
||||
logger.error(results.text)
|
||||
print(f"!! issue with the peer-server code:{results.status_code}")
|
||||
print(results.text)
|
||||
return None
|
||||
results = results.json().get("data")
|
||||
if results is None:
|
||||
return False
|
||||
return True
|
||||
except requests.exceptions.Timeout:
|
||||
logger.error("!! Timeout getting Assist response")
|
||||
print("!! Timeout getting Assist response")
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error("!! Issue getting Assist response")
|
||||
logger.exception(e)
|
||||
logger.error("expected JSON, received:")
|
||||
print("!! Issue getting Assist response")
|
||||
print(str(e))
|
||||
print("expected JSON, received:")
|
||||
try:
|
||||
logger.error(results.text)
|
||||
print(results.text)
|
||||
except:
|
||||
logger.error("couldn't get response")
|
||||
print("couldn't get response")
|
||||
return False
|
||||
|
||||
|
||||
def __change_keys(key):
|
||||
return {
|
||||
"PAGETITLE": schemas.LiveFilterType.PAGE_TITLE.value,
|
||||
"PAGETITLE": schemas.LiveFilterType.page_title.value,
|
||||
"ACTIVE": "active",
|
||||
"LIVE": "live",
|
||||
"SESSIONID": schemas.LiveFilterType.SESSION_ID.value,
|
||||
"METADATA": schemas.LiveFilterType.METADATA.value,
|
||||
"USERID": schemas.LiveFilterType.USER_ID.value,
|
||||
"USERUUID": schemas.LiveFilterType.USER_UUID.value,
|
||||
"SESSIONID": schemas.LiveFilterType.session_id.value,
|
||||
"METADATA": schemas.LiveFilterType.metadata.value,
|
||||
"USERID": schemas.LiveFilterType.user_id.value,
|
||||
"USERUUID": schemas.LiveFilterType.user_UUID.value,
|
||||
"PROJECTKEY": "projectKey",
|
||||
"REVID": schemas.LiveFilterType.REV_ID.value,
|
||||
"REVID": schemas.LiveFilterType.rev_id.value,
|
||||
"TIMESTAMP": "timestamp",
|
||||
"TRACKERVERSION": schemas.LiveFilterType.TRACKER_VERSION.value,
|
||||
"TRACKERVERSION": schemas.LiveFilterType.tracker_version.value,
|
||||
"ISSNIPPET": "isSnippet",
|
||||
"USEROS": schemas.LiveFilterType.USER_OS.value,
|
||||
"USERBROWSER": schemas.LiveFilterType.USER_BROWSER.value,
|
||||
"USERBROWSERVERSION": schemas.LiveFilterType.USER_BROWSER_VERSION.value,
|
||||
"USERDEVICE": schemas.LiveFilterType.USER_DEVICE.value,
|
||||
"USERDEVICETYPE": schemas.LiveFilterType.USER_DEVICE_TYPE.value,
|
||||
"USERCOUNTRY": schemas.LiveFilterType.USER_COUNTRY.value,
|
||||
"USEROS": schemas.LiveFilterType.user_os.value,
|
||||
"USERBROWSER": schemas.LiveFilterType.user_browser.value,
|
||||
"USERBROWSERVERSION": schemas.LiveFilterType.user_browser_version.value,
|
||||
"USERDEVICE": schemas.LiveFilterType.user_device.value,
|
||||
"USERDEVICETYPE": schemas.LiveFilterType.user_device_type.value,
|
||||
"USERCOUNTRY": schemas.LiveFilterType.user_country.value,
|
||||
"PROJECTID": "projectId"
|
||||
}.get(key.upper(), key)
|
||||
|
|
|
|||
|
|
@ -1,96 +1,58 @@
|
|||
import logging
|
||||
|
||||
import jwt
|
||||
from decouple import config
|
||||
|
||||
from chalicelib.core import tenants
|
||||
from chalicelib.core import users, spot
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
from decouple import config
|
||||
from chalicelib.core import tenants
|
||||
from chalicelib.core import users
|
||||
|
||||
|
||||
def get_supported_audience():
|
||||
return [users.AUDIENCE, spot.AUDIENCE]
|
||||
|
||||
|
||||
def is_spot_token(token: str) -> bool:
|
||||
try:
|
||||
decoded_token = jwt.decode(token, options={"verify_signature": False, "verify_exp": False})
|
||||
audience = decoded_token.get("aud")
|
||||
return audience == spot.AUDIENCE
|
||||
except jwt.InvalidTokenError:
|
||||
logger.error(f"Invalid token for is_spot_token: {token}")
|
||||
raise
|
||||
|
||||
|
||||
def jwt_authorizer(scheme: str, token: str, leeway=0) -> dict | None:
|
||||
if scheme.lower() != "bearer":
|
||||
def jwt_authorizer(token):
|
||||
token = token.split(" ")
|
||||
if len(token) != 2 or token[0].lower() != "bearer":
|
||||
return None
|
||||
try:
|
||||
payload = jwt.decode(jwt=token,
|
||||
key=config("JWT_SECRET") if not is_spot_token(token) else config("JWT_SPOT_SECRET"),
|
||||
algorithms=config("JWT_ALGORITHM"),
|
||||
audience=get_supported_audience(),
|
||||
leeway=leeway)
|
||||
payload = jwt.decode(
|
||||
token[1],
|
||||
config("jwt_secret"),
|
||||
algorithms=config("jwt_algorithm"),
|
||||
audience=[f"front:{helper.get_stage_name()}"]
|
||||
)
|
||||
except jwt.ExpiredSignatureError:
|
||||
logger.debug("! JWT Expired signature")
|
||||
print("! JWT Expired signature")
|
||||
return None
|
||||
except BaseException as e:
|
||||
logger.warning("! JWT Base Exception", exc_info=e)
|
||||
print("! JWT Base Exception")
|
||||
return None
|
||||
return payload
|
||||
|
||||
|
||||
def jwt_refresh_authorizer(scheme: str, token: str):
|
||||
if scheme.lower() != "bearer":
|
||||
def jwt_context(context):
|
||||
user = users.get(user_id=context["userId"], tenant_id=context["tenantId"])
|
||||
if user is None:
|
||||
return None
|
||||
try:
|
||||
payload = jwt.decode(jwt=token,
|
||||
key=config("JWT_REFRESH_SECRET") if not is_spot_token(token) \
|
||||
else config("JWT_SPOT_REFRESH_SECRET"),
|
||||
algorithms=config("JWT_ALGORITHM"),
|
||||
audience=get_supported_audience())
|
||||
except jwt.ExpiredSignatureError:
|
||||
logger.debug("! JWT-refresh Expired signature")
|
||||
return None
|
||||
except BaseException as e:
|
||||
logger.error("! JWT-refresh Base Exception", exc_info=e)
|
||||
return None
|
||||
return payload
|
||||
return {
|
||||
"tenantId": context["tenantId"],
|
||||
"userId": context["userId"],
|
||||
**user
|
||||
}
|
||||
|
||||
|
||||
def generate_jwt(user_id, tenant_id, iat, aud, for_spot=False):
|
||||
def get_jwt_exp(iat):
|
||||
return iat // 1000 + config("JWT_EXPIRATION", cast=int) + TimeUTC.get_utc_offset() // 1000
|
||||
|
||||
|
||||
def generate_jwt(id, tenant_id, iat, aud):
|
||||
token = jwt.encode(
|
||||
payload={
|
||||
"userId": user_id,
|
||||
"userId": id,
|
||||
"tenantId": tenant_id,
|
||||
"exp": iat + (config("JWT_EXPIRATION", cast=int) if not for_spot
|
||||
else config("JWT_SPOT_EXPIRATION", cast=int)),
|
||||
"exp": get_jwt_exp(iat),
|
||||
"iss": config("JWT_ISSUER"),
|
||||
"iat": iat,
|
||||
"iat": iat // 1000,
|
||||
"aud": aud
|
||||
},
|
||||
key=config("JWT_SECRET") if not for_spot else config("JWT_SPOT_SECRET"),
|
||||
algorithm=config("JWT_ALGORITHM")
|
||||
)
|
||||
return token
|
||||
|
||||
|
||||
def generate_jwt_refresh(user_id, tenant_id, iat, aud, jwt_jti, for_spot=False):
|
||||
token = jwt.encode(
|
||||
payload={
|
||||
"userId": user_id,
|
||||
"tenantId": tenant_id,
|
||||
"exp": iat + (config("JWT_REFRESH_EXPIRATION", cast=int) if not for_spot
|
||||
else config("JWT_SPOT_REFRESH_EXPIRATION", cast=int)),
|
||||
"iss": config("JWT_ISSUER"),
|
||||
"iat": iat,
|
||||
"aud": aud,
|
||||
"jti": jwt_jti
|
||||
},
|
||||
key=config("JWT_REFRESH_SECRET") if not for_spot else config("JWT_SPOT_REFRESH_SECRET"),
|
||||
algorithm=config("JWT_ALGORITHM")
|
||||
key=config("jwt_secret"),
|
||||
algorithm=config("jwt_algorithm")
|
||||
)
|
||||
return token
|
||||
|
||||
|
|
|
|||
329
api/chalicelib/core/autocomplete.py
Normal file
329
api/chalicelib/core/autocomplete.py
Normal file
|
|
@ -0,0 +1,329 @@
|
|||
import schemas
|
||||
from chalicelib.core import countries, events, metadata
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils.event_filter_definition import Event
|
||||
|
||||
TABLE = "public.autocomplete"
|
||||
|
||||
|
||||
def __get_autocomplete_table(value, project_id):
|
||||
autocomplete_events = [schemas.FilterType.rev_id,
|
||||
schemas.EventType.click,
|
||||
schemas.FilterType.user_device,
|
||||
schemas.FilterType.user_id,
|
||||
schemas.FilterType.user_browser,
|
||||
schemas.FilterType.user_os,
|
||||
schemas.EventType.custom,
|
||||
schemas.FilterType.user_country,
|
||||
schemas.EventType.location,
|
||||
schemas.EventType.input]
|
||||
autocomplete_events.sort()
|
||||
sub_queries = []
|
||||
c_list = []
|
||||
for e in autocomplete_events:
|
||||
if e == schemas.FilterType.user_country:
|
||||
c_list = countries.get_country_code_autocomplete(value)
|
||||
if len(c_list) > 0:
|
||||
sub_queries.append(f"""(SELECT DISTINCT ON(value) '{e.value}' AS _type, value
|
||||
FROM {TABLE}
|
||||
WHERE project_id = %(project_id)s
|
||||
AND type= '{e.value.upper()}'
|
||||
AND value IN %(c_list)s)""")
|
||||
continue
|
||||
sub_queries.append(f"""(SELECT '{e.value}' AS _type, value
|
||||
FROM {TABLE}
|
||||
WHERE project_id = %(project_id)s
|
||||
AND type= '{e.value.upper()}'
|
||||
AND value ILIKE %(svalue)s
|
||||
ORDER BY value
|
||||
LIMIT 5)""")
|
||||
if len(value) > 2:
|
||||
sub_queries.append(f"""(SELECT '{e.value}' AS _type, value
|
||||
FROM {TABLE}
|
||||
WHERE project_id = %(project_id)s
|
||||
AND type= '{e.value.upper()}'
|
||||
AND value ILIKE %(value)s
|
||||
ORDER BY value
|
||||
LIMIT 5)""")
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(" UNION DISTINCT ".join(sub_queries) + ";",
|
||||
{"project_id": project_id,
|
||||
"value": helper.string_to_sql_like(value),
|
||||
"svalue": helper.string_to_sql_like("^" + value),
|
||||
"c_list": tuple(c_list)
|
||||
})
|
||||
try:
|
||||
cur.execute(query)
|
||||
except Exception as err:
|
||||
print("--------- AUTOCOMPLETE SEARCH QUERY EXCEPTION -----------")
|
||||
print(query.decode('UTF-8'))
|
||||
print("--------- VALUE -----------")
|
||||
print(value)
|
||||
print("--------------------")
|
||||
raise err
|
||||
results = cur.fetchall()
|
||||
for r in results:
|
||||
r["type"] = r.pop("_type")
|
||||
results = helper.list_to_camel_case(results)
|
||||
return results
|
||||
|
||||
|
||||
def __generic_query(typename, value_length=None):
|
||||
if typename == schemas.FilterType.user_country:
|
||||
return f"""SELECT DISTINCT value, type
|
||||
FROM {TABLE}
|
||||
WHERE
|
||||
project_id = %(project_id)s
|
||||
AND type='{typename.upper()}'
|
||||
AND value IN %(value)s
|
||||
ORDER BY value"""
|
||||
|
||||
if value_length is None or value_length > 2:
|
||||
return f"""(SELECT DISTINCT value, type
|
||||
FROM {TABLE}
|
||||
WHERE
|
||||
project_id = %(project_id)s
|
||||
AND type='{typename.upper()}'
|
||||
AND value ILIKE %(svalue)s
|
||||
ORDER BY value
|
||||
LIMIT 5)
|
||||
UNION DISTINCT
|
||||
(SELECT DISTINCT value, type
|
||||
FROM {TABLE}
|
||||
WHERE
|
||||
project_id = %(project_id)s
|
||||
AND type='{typename.upper()}'
|
||||
AND value ILIKE %(value)s
|
||||
ORDER BY value
|
||||
LIMIT 5);"""
|
||||
return f"""SELECT DISTINCT value, type
|
||||
FROM {TABLE}
|
||||
WHERE
|
||||
project_id = %(project_id)s
|
||||
AND type='{typename.upper()}'
|
||||
AND value ILIKE %(svalue)s
|
||||
ORDER BY value
|
||||
LIMIT 10;"""
|
||||
|
||||
|
||||
def __generic_autocomplete(event: Event):
|
||||
def f(project_id, value, key=None, source=None):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = __generic_query(event.ui_type, value_length=len(value))
|
||||
params = {"project_id": project_id, "value": helper.string_to_sql_like(value),
|
||||
"svalue": helper.string_to_sql_like("^" + value)}
|
||||
cur.execute(cur.mogrify(query, params))
|
||||
return helper.list_to_camel_case(cur.fetchall())
|
||||
|
||||
return f
|
||||
|
||||
|
||||
def __generic_autocomplete_metas(typename):
|
||||
def f(project_id, text):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
params = {"project_id": project_id, "value": helper.string_to_sql_like(text),
|
||||
"svalue": helper.string_to_sql_like("^" + text)}
|
||||
|
||||
if typename == schemas.FilterType.user_country:
|
||||
params["value"] = tuple(countries.get_country_code_autocomplete(text))
|
||||
if len(params["value"]) == 0:
|
||||
return []
|
||||
|
||||
query = cur.mogrify(__generic_query(typename, value_length=len(text)), params)
|
||||
cur.execute(query)
|
||||
rows = cur.fetchall()
|
||||
return rows
|
||||
|
||||
return f
|
||||
|
||||
|
||||
def __errors_query(source=None, value_length=None):
|
||||
if value_length is None or value_length > 2:
|
||||
return f"""((SELECT DISTINCT ON(lg.message)
|
||||
lg.message AS value,
|
||||
source,
|
||||
'{events.EventType.ERROR.ui_type}' AS type
|
||||
FROM {events.EventType.ERROR.table} INNER JOIN public.errors AS lg USING (error_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.message ILIKE %(svalue)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
{"AND source = %(source)s" if source is not None else ""}
|
||||
LIMIT 5)
|
||||
UNION DISTINCT
|
||||
(SELECT DISTINCT ON(lg.name)
|
||||
lg.name AS value,
|
||||
source,
|
||||
'{events.EventType.ERROR.ui_type}' AS type
|
||||
FROM {events.EventType.ERROR.table} INNER JOIN public.errors AS lg USING (error_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.name ILIKE %(svalue)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
{"AND source = %(source)s" if source is not None else ""}
|
||||
LIMIT 5)
|
||||
UNION DISTINCT
|
||||
(SELECT DISTINCT ON(lg.message)
|
||||
lg.message AS value,
|
||||
source,
|
||||
'{events.EventType.ERROR.ui_type}' AS type
|
||||
FROM {events.EventType.ERROR.table} INNER JOIN public.errors AS lg USING (error_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.message ILIKE %(value)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
{"AND source = %(source)s" if source is not None else ""}
|
||||
LIMIT 5)
|
||||
UNION DISTINCT
|
||||
(SELECT DISTINCT ON(lg.name)
|
||||
lg.name AS value,
|
||||
source,
|
||||
'{events.EventType.ERROR.ui_type}' AS type
|
||||
FROM {events.EventType.ERROR.table} INNER JOIN public.errors AS lg USING (error_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.name ILIKE %(value)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
{"AND source = %(source)s" if source is not None else ""}
|
||||
LIMIT 5));"""
|
||||
return f"""((SELECT DISTINCT ON(lg.message)
|
||||
lg.message AS value,
|
||||
source,
|
||||
'{events.EventType.ERROR.ui_type}' AS type
|
||||
FROM {events.EventType.ERROR.table} INNER JOIN public.errors AS lg USING (error_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.message ILIKE %(svalue)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
{"AND source = %(source)s" if source is not None else ""}
|
||||
LIMIT 5)
|
||||
UNION DISTINCT
|
||||
(SELECT DISTINCT ON(lg.name)
|
||||
lg.name AS value,
|
||||
source,
|
||||
'{events.EventType.ERROR.ui_type}' AS type
|
||||
FROM {events.EventType.ERROR.table} INNER JOIN public.errors AS lg USING (error_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.name ILIKE %(svalue)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
{"AND source = %(source)s" if source is not None else ""}
|
||||
LIMIT 5));"""
|
||||
|
||||
|
||||
def __search_errors(project_id, value, key=None, source=None):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(__errors_query(source,
|
||||
value_length=len(value)),
|
||||
{"project_id": project_id, "value": helper.string_to_sql_like(value),
|
||||
"svalue": helper.string_to_sql_like("^" + value),
|
||||
"source": source}))
|
||||
results = helper.list_to_camel_case(cur.fetchall())
|
||||
return results
|
||||
|
||||
|
||||
def __search_errors_ios(project_id, value, key=None, source=None):
|
||||
if len(value) > 2:
|
||||
query = f"""(SELECT DISTINCT ON(lg.reason)
|
||||
lg.reason AS value,
|
||||
'{events.EventType.ERROR_IOS.ui_type}' AS type
|
||||
FROM {events.EventType.ERROR_IOS.table} INNER JOIN public.crashes_ios AS lg USING (crash_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
AND lg.reason ILIKE %(svalue)s
|
||||
LIMIT 5)
|
||||
UNION ALL
|
||||
(SELECT DISTINCT ON(lg.name)
|
||||
lg.name AS value,
|
||||
'{events.EventType.ERROR_IOS.ui_type}' AS type
|
||||
FROM {events.EventType.ERROR_IOS.table} INNER JOIN public.crashes_ios AS lg USING (crash_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
AND lg.name ILIKE %(svalue)s
|
||||
LIMIT 5)
|
||||
UNION ALL
|
||||
(SELECT DISTINCT ON(lg.reason)
|
||||
lg.reason AS value,
|
||||
'{events.EventType.ERROR_IOS.ui_type}' AS type
|
||||
FROM {events.EventType.ERROR_IOS.table} INNER JOIN public.crashes_ios AS lg USING (crash_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
AND lg.reason ILIKE %(value)s
|
||||
LIMIT 5)
|
||||
UNION ALL
|
||||
(SELECT DISTINCT ON(lg.name)
|
||||
lg.name AS value,
|
||||
'{events.EventType.ERROR_IOS.ui_type}' AS type
|
||||
FROM {events.EventType.ERROR_IOS.table} INNER JOIN public.crashes_ios AS lg USING (crash_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
AND lg.name ILIKE %(value)s
|
||||
LIMIT 5);"""
|
||||
else:
|
||||
query = f"""(SELECT DISTINCT ON(lg.reason)
|
||||
lg.reason AS value,
|
||||
'{events.EventType.ERROR_IOS.ui_type}' AS type
|
||||
FROM {events.EventType.ERROR_IOS.table} INNER JOIN public.crashes_ios AS lg USING (crash_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
AND lg.reason ILIKE %(svalue)s
|
||||
LIMIT 5)
|
||||
UNION ALL
|
||||
(SELECT DISTINCT ON(lg.name)
|
||||
lg.name AS value,
|
||||
'{events.EventType.ERROR_IOS.ui_type}' AS type
|
||||
FROM {events.EventType.ERROR_IOS.table} INNER JOIN public.crashes_ios AS lg USING (crash_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
AND lg.name ILIKE %(svalue)s
|
||||
LIMIT 5);"""
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(cur.mogrify(query, {"project_id": project_id, "value": helper.string_to_sql_like(value),
|
||||
"svalue": helper.string_to_sql_like("^" + value)}))
|
||||
results = helper.list_to_camel_case(cur.fetchall())
|
||||
return results
|
||||
|
||||
|
||||
def __search_metadata(project_id, value, key=None, source=None):
|
||||
meta_keys = metadata.get(project_id=project_id)
|
||||
meta_keys = {m["key"]: m["index"] for m in meta_keys}
|
||||
if len(meta_keys) == 0 or key is not None and key not in meta_keys.keys():
|
||||
return []
|
||||
sub_from = []
|
||||
if key is not None:
|
||||
meta_keys = {key: meta_keys[key]}
|
||||
|
||||
for k in meta_keys.keys():
|
||||
colname = metadata.index_to_colname(meta_keys[k])
|
||||
if len(value) > 2:
|
||||
sub_from.append(f"""((SELECT DISTINCT ON ({colname}) {colname} AS value, '{k}' AS key
|
||||
FROM public.sessions
|
||||
WHERE project_id = %(project_id)s
|
||||
AND {colname} ILIKE %(svalue)s LIMIT 5)
|
||||
UNION
|
||||
(SELECT DISTINCT ON ({colname}) {colname} AS value, '{k}' AS key
|
||||
FROM public.sessions
|
||||
WHERE project_id = %(project_id)s
|
||||
AND {colname} ILIKE %(value)s LIMIT 5))
|
||||
""")
|
||||
else:
|
||||
sub_from.append(f"""(SELECT DISTINCT ON ({colname}) {colname} AS value, '{k}' AS key
|
||||
FROM public.sessions
|
||||
WHERE project_id = %(project_id)s
|
||||
AND {colname} ILIKE %(svalue)s LIMIT 5)""")
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(cur.mogrify(f"""\
|
||||
SELECT key, value, 'METADATA' AS TYPE
|
||||
FROM({" UNION ALL ".join(sub_from)}) AS all_metas
|
||||
LIMIT 5;""", {"project_id": project_id, "value": helper.string_to_sql_like(value),
|
||||
"svalue": helper.string_to_sql_like("^" + value)}))
|
||||
results = helper.list_to_camel_case(cur.fetchall())
|
||||
return results
|
||||
|
|
@ -1,439 +0,0 @@
|
|||
import logging
|
||||
import schemas
|
||||
from chalicelib.core import countries, events, metadata
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils.event_filter_definition import Event
|
||||
from chalicelib.utils.or_cache import CachedResponse
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
TABLE = "public.autocomplete"
|
||||
|
||||
|
||||
def __get_autocomplete_table(value, project_id):
|
||||
autocomplete_events = [schemas.FilterType.REV_ID,
|
||||
schemas.EventType.CLICK,
|
||||
schemas.FilterType.USER_DEVICE,
|
||||
schemas.FilterType.USER_ID,
|
||||
schemas.FilterType.USER_BROWSER,
|
||||
schemas.FilterType.USER_OS,
|
||||
schemas.EventType.CUSTOM,
|
||||
schemas.FilterType.USER_COUNTRY,
|
||||
schemas.FilterType.USER_CITY,
|
||||
schemas.FilterType.USER_STATE,
|
||||
schemas.EventType.LOCATION,
|
||||
schemas.EventType.INPUT]
|
||||
autocomplete_events.sort()
|
||||
sub_queries = []
|
||||
c_list = []
|
||||
for e in autocomplete_events:
|
||||
if e == schemas.FilterType.USER_COUNTRY:
|
||||
c_list = countries.get_country_code_autocomplete(value)
|
||||
if len(c_list) > 0:
|
||||
sub_queries.append(f"""(SELECT DISTINCT ON(value) '{e.value}' AS _type, value
|
||||
FROM {TABLE}
|
||||
WHERE project_id = %(project_id)s
|
||||
AND type= '{e.value.upper()}'
|
||||
AND value IN %(c_list)s)""")
|
||||
continue
|
||||
sub_queries.append(f"""(SELECT '{e.value}' AS _type, value
|
||||
FROM {TABLE}
|
||||
WHERE project_id = %(project_id)s
|
||||
AND type= '{e.value.upper()}'
|
||||
AND value ILIKE %(svalue)s
|
||||
ORDER BY value
|
||||
LIMIT 5)""")
|
||||
if len(value) > 2:
|
||||
sub_queries.append(f"""(SELECT '{e.value}' AS _type, value
|
||||
FROM {TABLE}
|
||||
WHERE project_id = %(project_id)s
|
||||
AND type= '{e.value.upper()}'
|
||||
AND value ILIKE %(value)s
|
||||
ORDER BY value
|
||||
LIMIT 5)""")
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(" UNION DISTINCT ".join(sub_queries) + ";",
|
||||
{"project_id": project_id,
|
||||
"value": helper.string_to_sql_like(value),
|
||||
"svalue": helper.string_to_sql_like("^" + value),
|
||||
"c_list": tuple(c_list)
|
||||
})
|
||||
try:
|
||||
cur.execute(query)
|
||||
except Exception as err:
|
||||
logger.exception("--------- AUTOCOMPLETE SEARCH QUERY EXCEPTION -----------")
|
||||
logger.exception(query.decode('UTF-8'))
|
||||
logger.exception("--------- VALUE -----------")
|
||||
logger.exception(value)
|
||||
logger.exception("--------------------")
|
||||
raise err
|
||||
results = cur.fetchall()
|
||||
for r in results:
|
||||
r["type"] = r.pop("_type")
|
||||
results = helper.list_to_camel_case(results)
|
||||
return results
|
||||
|
||||
|
||||
def __generic_query(typename, value_length=None):
|
||||
if typename == schemas.FilterType.USER_COUNTRY:
|
||||
return f"""SELECT DISTINCT value, type
|
||||
FROM {TABLE}
|
||||
WHERE
|
||||
project_id = %(project_id)s
|
||||
AND type='{typename.upper()}'
|
||||
AND value IN %(value)s
|
||||
ORDER BY value"""
|
||||
|
||||
if value_length is None or value_length > 2:
|
||||
return f"""SELECT DISTINCT ON(value,type) value, type
|
||||
((SELECT DISTINCT value, type
|
||||
FROM {TABLE}
|
||||
WHERE
|
||||
project_id = %(project_id)s
|
||||
AND type='{typename.upper()}'
|
||||
AND value ILIKE %(svalue)s
|
||||
ORDER BY value
|
||||
LIMIT 5)
|
||||
UNION DISTINCT
|
||||
(SELECT DISTINCT value, type
|
||||
FROM {TABLE}
|
||||
WHERE
|
||||
project_id = %(project_id)s
|
||||
AND type='{typename.upper()}'
|
||||
AND value ILIKE %(value)s
|
||||
ORDER BY value
|
||||
LIMIT 5)) AS raw;"""
|
||||
return f"""SELECT DISTINCT value, type
|
||||
FROM {TABLE}
|
||||
WHERE
|
||||
project_id = %(project_id)s
|
||||
AND type='{typename.upper()}'
|
||||
AND value ILIKE %(svalue)s
|
||||
ORDER BY value
|
||||
LIMIT 10;"""
|
||||
|
||||
|
||||
def __generic_autocomplete(event: Event):
|
||||
def f(project_id, value, key=None, source=None):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = __generic_query(event.ui_type, value_length=len(value))
|
||||
params = {"project_id": project_id, "value": helper.string_to_sql_like(value),
|
||||
"svalue": helper.string_to_sql_like("^" + value)}
|
||||
cur.execute(cur.mogrify(query, params))
|
||||
return helper.list_to_camel_case(cur.fetchall())
|
||||
|
||||
return f
|
||||
|
||||
|
||||
def generic_autocomplete_metas(typename):
|
||||
def f(project_id, text):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
params = {"project_id": project_id, "value": helper.string_to_sql_like(text),
|
||||
"svalue": helper.string_to_sql_like("^" + text)}
|
||||
|
||||
if typename == schemas.FilterType.USER_COUNTRY:
|
||||
params["value"] = tuple(countries.get_country_code_autocomplete(text))
|
||||
if len(params["value"]) == 0:
|
||||
return []
|
||||
|
||||
query = cur.mogrify(__generic_query(typename, value_length=len(text)), params)
|
||||
cur.execute(query)
|
||||
rows = cur.fetchall()
|
||||
return rows
|
||||
|
||||
return f
|
||||
|
||||
|
||||
def __errors_query(source=None, value_length=None):
|
||||
if value_length is None or value_length > 2:
|
||||
return f"""((SELECT DISTINCT ON(lg.message)
|
||||
lg.message AS value,
|
||||
source,
|
||||
'{events.EventType.ERROR.ui_type}' AS type
|
||||
FROM {events.EventType.ERROR.table} INNER JOIN public.errors AS lg USING (error_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.message ILIKE %(svalue)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
{"AND source = %(source)s" if source is not None else ""}
|
||||
LIMIT 5)
|
||||
UNION DISTINCT
|
||||
(SELECT DISTINCT ON(lg.name)
|
||||
lg.name AS value,
|
||||
source,
|
||||
'{events.EventType.ERROR.ui_type}' AS type
|
||||
FROM {events.EventType.ERROR.table} INNER JOIN public.errors AS lg USING (error_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.name ILIKE %(svalue)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
{"AND source = %(source)s" if source is not None else ""}
|
||||
LIMIT 5)
|
||||
UNION DISTINCT
|
||||
(SELECT DISTINCT ON(lg.message)
|
||||
lg.message AS value,
|
||||
source,
|
||||
'{events.EventType.ERROR.ui_type}' AS type
|
||||
FROM {events.EventType.ERROR.table} INNER JOIN public.errors AS lg USING (error_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.message ILIKE %(value)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
{"AND source = %(source)s" if source is not None else ""}
|
||||
LIMIT 5)
|
||||
UNION DISTINCT
|
||||
(SELECT DISTINCT ON(lg.name)
|
||||
lg.name AS value,
|
||||
source,
|
||||
'{events.EventType.ERROR.ui_type}' AS type
|
||||
FROM {events.EventType.ERROR.table} INNER JOIN public.errors AS lg USING (error_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.name ILIKE %(value)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
{"AND source = %(source)s" if source is not None else ""}
|
||||
LIMIT 5));"""
|
||||
return f"""((SELECT DISTINCT ON(lg.message)
|
||||
lg.message AS value,
|
||||
source,
|
||||
'{events.EventType.ERROR.ui_type}' AS type
|
||||
FROM {events.EventType.ERROR.table} INNER JOIN public.errors AS lg USING (error_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.message ILIKE %(svalue)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
{"AND source = %(source)s" if source is not None else ""}
|
||||
LIMIT 5)
|
||||
UNION DISTINCT
|
||||
(SELECT DISTINCT ON(lg.name)
|
||||
lg.name AS value,
|
||||
source,
|
||||
'{events.EventType.ERROR.ui_type}' AS type
|
||||
FROM {events.EventType.ERROR.table} INNER JOIN public.errors AS lg USING (error_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.name ILIKE %(svalue)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
{"AND source = %(source)s" if source is not None else ""}
|
||||
LIMIT 5));"""
|
||||
|
||||
|
||||
def __search_errors(project_id, value, key=None, source=None):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(__errors_query(source,
|
||||
value_length=len(value)),
|
||||
{"project_id": project_id, "value": helper.string_to_sql_like(value),
|
||||
"svalue": helper.string_to_sql_like("^" + value),
|
||||
"source": source}))
|
||||
results = helper.list_to_camel_case(cur.fetchall())
|
||||
return results
|
||||
|
||||
|
||||
def __search_errors_mobile(project_id, value, key=None, source=None):
|
||||
if len(value) > 2:
|
||||
query = f"""(SELECT DISTINCT ON(lg.reason)
|
||||
lg.reason AS value,
|
||||
'{events.EventType.CRASH_MOBILE.ui_type}' AS type
|
||||
FROM {events.EventType.CRASH_MOBILE.table} INNER JOIN public.crashes_ios AS lg USING (crash_ios_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
AND lg.reason ILIKE %(svalue)s
|
||||
LIMIT 5)
|
||||
UNION ALL
|
||||
(SELECT DISTINCT ON(lg.name)
|
||||
lg.name AS value,
|
||||
'{events.EventType.CRASH_MOBILE.ui_type}' AS type
|
||||
FROM {events.EventType.CRASH_MOBILE.table} INNER JOIN public.crashes_ios AS lg USING (crash_ios_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
AND lg.name ILIKE %(svalue)s
|
||||
LIMIT 5)
|
||||
UNION ALL
|
||||
(SELECT DISTINCT ON(lg.reason)
|
||||
lg.reason AS value,
|
||||
'{events.EventType.CRASH_MOBILE.ui_type}' AS type
|
||||
FROM {events.EventType.CRASH_MOBILE.table} INNER JOIN public.crashes_ios AS lg USING (crash_ios_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
AND lg.reason ILIKE %(value)s
|
||||
LIMIT 5)
|
||||
UNION ALL
|
||||
(SELECT DISTINCT ON(lg.name)
|
||||
lg.name AS value,
|
||||
'{events.EventType.CRASH_MOBILE.ui_type}' AS type
|
||||
FROM {events.EventType.CRASH_MOBILE.table} INNER JOIN public.crashes_ios AS lg USING (crash_ios_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
AND lg.name ILIKE %(value)s
|
||||
LIMIT 5);"""
|
||||
else:
|
||||
query = f"""(SELECT DISTINCT ON(lg.reason)
|
||||
lg.reason AS value,
|
||||
'{events.EventType.CRASH_MOBILE.ui_type}' AS type
|
||||
FROM {events.EventType.CRASH_MOBILE.table} INNER JOIN public.crashes_ios AS lg USING (crash_ios_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
AND lg.reason ILIKE %(svalue)s
|
||||
LIMIT 5)
|
||||
UNION ALL
|
||||
(SELECT DISTINCT ON(lg.name)
|
||||
lg.name AS value,
|
||||
'{events.EventType.CRASH_MOBILE.ui_type}' AS type
|
||||
FROM {events.EventType.CRASH_MOBILE.table} INNER JOIN public.crashes_ios AS lg USING (crash_ios_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
AND lg.name ILIKE %(svalue)s
|
||||
LIMIT 5);"""
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(cur.mogrify(query, {"project_id": project_id, "value": helper.string_to_sql_like(value),
|
||||
"svalue": helper.string_to_sql_like("^" + value)}))
|
||||
results = helper.list_to_camel_case(cur.fetchall())
|
||||
return results
|
||||
|
||||
|
||||
def __search_metadata(project_id, value, key=None, source=None):
|
||||
meta_keys = metadata.get(project_id=project_id)
|
||||
meta_keys = {m["key"]: m["index"] for m in meta_keys}
|
||||
if len(meta_keys) == 0 or key is not None and key not in meta_keys.keys():
|
||||
return []
|
||||
sub_from = []
|
||||
if key is not None:
|
||||
meta_keys = {key: meta_keys[key]}
|
||||
|
||||
for k in meta_keys.keys():
|
||||
colname = metadata.index_to_colname(meta_keys[k])
|
||||
if len(value) > 2:
|
||||
sub_from.append(f"""((SELECT DISTINCT ON ({colname}) {colname} AS value, '{k}' AS key
|
||||
FROM public.sessions
|
||||
WHERE project_id = %(project_id)s
|
||||
AND {colname} ILIKE %(svalue)s LIMIT 5)
|
||||
UNION
|
||||
(SELECT DISTINCT ON ({colname}) {colname} AS value, '{k}' AS key
|
||||
FROM public.sessions
|
||||
WHERE project_id = %(project_id)s
|
||||
AND {colname} ILIKE %(value)s LIMIT 5))
|
||||
""")
|
||||
else:
|
||||
sub_from.append(f"""(SELECT DISTINCT ON ({colname}) {colname} AS value, '{k}' AS key
|
||||
FROM public.sessions
|
||||
WHERE project_id = %(project_id)s
|
||||
AND {colname} ILIKE %(svalue)s LIMIT 5)""")
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(cur.mogrify(f"""\
|
||||
SELECT DISTINCT ON(key, value) key, value, 'METADATA' AS TYPE
|
||||
FROM({" UNION ALL ".join(sub_from)}) AS all_metas
|
||||
LIMIT 5;""", {"project_id": project_id, "value": helper.string_to_sql_like(value),
|
||||
"svalue": helper.string_to_sql_like("^" + value)}))
|
||||
results = helper.list_to_camel_case(cur.fetchall())
|
||||
return results
|
||||
|
||||
|
||||
TYPE_TO_COLUMN = {
|
||||
schemas.EventType.CLICK: "label",
|
||||
schemas.EventType.INPUT: "label",
|
||||
schemas.EventType.LOCATION: "path",
|
||||
schemas.EventType.CUSTOM: "name",
|
||||
schemas.FetchFilterType.FETCH_URL: "path",
|
||||
schemas.GraphqlFilterType.GRAPHQL_NAME: "name",
|
||||
schemas.EventType.STATE_ACTION: "name",
|
||||
# For ERROR, sessions search is happening over name OR message,
|
||||
# for simplicity top 10 is using name only
|
||||
schemas.EventType.ERROR: "name",
|
||||
schemas.FilterType.USER_COUNTRY: "user_country",
|
||||
schemas.FilterType.USER_CITY: "user_city",
|
||||
schemas.FilterType.USER_STATE: "user_state",
|
||||
schemas.FilterType.USER_ID: "user_id",
|
||||
schemas.FilterType.USER_ANONYMOUS_ID: "user_anonymous_id",
|
||||
schemas.FilterType.USER_OS: "user_os",
|
||||
schemas.FilterType.USER_BROWSER: "user_browser",
|
||||
schemas.FilterType.USER_DEVICE: "user_device",
|
||||
schemas.FilterType.PLATFORM: "platform",
|
||||
schemas.FilterType.REV_ID: "rev_id",
|
||||
schemas.FilterType.REFERRER: "referrer",
|
||||
schemas.FilterType.UTM_SOURCE: "utm_source",
|
||||
schemas.FilterType.UTM_MEDIUM: "utm_medium",
|
||||
schemas.FilterType.UTM_CAMPAIGN: "utm_campaign",
|
||||
}
|
||||
|
||||
TYPE_TO_TABLE = {
|
||||
schemas.EventType.CLICK: "events.clicks",
|
||||
schemas.EventType.INPUT: "events.inputs",
|
||||
schemas.EventType.LOCATION: "events.pages",
|
||||
schemas.EventType.CUSTOM: "events_common.customs",
|
||||
schemas.FetchFilterType.FETCH_URL: "events_common.requests",
|
||||
schemas.GraphqlFilterType.GRAPHQL_NAME: "events.graphql",
|
||||
schemas.EventType.STATE_ACTION: "events.state_actions",
|
||||
}
|
||||
|
||||
|
||||
def is_top_supported(event_type):
|
||||
return TYPE_TO_COLUMN.get(event_type, False)
|
||||
|
||||
|
||||
@CachedResponse(table="or_cache.autocomplete_top_values", ttl=5 * 60)
|
||||
def get_top_values(project_id, event_type, event_key=None):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
if schemas.FilterType.has_value(event_type):
|
||||
if event_type == schemas.FilterType.METADATA \
|
||||
and (event_key is None \
|
||||
or (colname := metadata.get_colname_by_key(project_id=project_id, key=event_key)) is None) \
|
||||
or event_type != schemas.FilterType.METADATA \
|
||||
and (colname := TYPE_TO_COLUMN.get(event_type)) is None:
|
||||
return []
|
||||
|
||||
query = f"""WITH raw AS (SELECT DISTINCT {colname} AS c_value,
|
||||
COUNT(1) OVER (PARTITION BY {colname}) AS row_count,
|
||||
COUNT(1) OVER () AS total_count
|
||||
FROM public.sessions
|
||||
WHERE project_id = %(project_id)s
|
||||
AND {colname} IS NOT NULL
|
||||
AND sessions.duration IS NOT NULL
|
||||
AND sessions.duration > 0
|
||||
ORDER BY row_count DESC
|
||||
LIMIT 10)
|
||||
SELECT c_value AS value, row_count, trunc(row_count * 100 / total_count, 2) AS row_percentage
|
||||
FROM raw;"""
|
||||
elif event_type == schemas.EventType.ERROR:
|
||||
colname = TYPE_TO_COLUMN.get(event_type)
|
||||
query = f"""WITH raw AS (SELECT DISTINCT {colname} AS c_value,
|
||||
COUNT(1) OVER (PARTITION BY {colname}) AS row_count,
|
||||
COUNT(1) OVER () AS total_count
|
||||
FROM public.errors
|
||||
WHERE project_id = %(project_id)s
|
||||
AND {colname} IS NOT NULL
|
||||
AND {colname} != ''
|
||||
ORDER BY row_count DESC
|
||||
LIMIT 10)
|
||||
SELECT c_value AS value, row_count, trunc(row_count * 100 / total_count,2) AS row_percentage
|
||||
FROM raw;"""
|
||||
else:
|
||||
colname = TYPE_TO_COLUMN.get(event_type)
|
||||
table = TYPE_TO_TABLE.get(event_type)
|
||||
query = f"""WITH raw AS (SELECT DISTINCT {colname} AS c_value,
|
||||
COUNT(1) OVER (PARTITION BY {colname}) AS row_count,
|
||||
COUNT(1) OVER () AS total_count
|
||||
FROM {table} INNER JOIN public.sessions USING(session_id)
|
||||
WHERE project_id = %(project_id)s
|
||||
AND {colname} IS NOT NULL
|
||||
AND {colname} != ''
|
||||
AND sessions.duration IS NOT NULL
|
||||
AND sessions.duration > 0
|
||||
ORDER BY row_count DESC
|
||||
LIMIT 10)
|
||||
SELECT c_value AS value, row_count, trunc(row_count * 100 / total_count,2) AS row_percentage
|
||||
FROM raw;"""
|
||||
params = {"project_id": project_id}
|
||||
query = cur.mogrify(query, params)
|
||||
logger.debug("--------------------")
|
||||
logger.debug(query)
|
||||
logger.debug("--------------------")
|
||||
cur.execute(query=query)
|
||||
results = cur.fetchall()
|
||||
return helper.list_to_camel_case(results)
|
||||
|
|
@ -1,8 +1,7 @@
|
|||
from chalicelib.core import projects
|
||||
from chalicelib.core import users
|
||||
from chalicelib.core.log_tools import datadog, stackdriver, sentry
|
||||
from chalicelib.core.modules import TENANT_CONDITION
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.core import projects, log_tool_datadog, log_tool_stackdriver, log_tool_sentry
|
||||
|
||||
from chalicelib.core import users
|
||||
|
||||
|
||||
def get_state(tenant_id):
|
||||
|
|
@ -13,61 +12,47 @@ def get_state(tenant_id):
|
|||
|
||||
if len(pids) > 0:
|
||||
cur.execute(
|
||||
cur.mogrify(
|
||||
"""SELECT EXISTS(( SELECT 1
|
||||
cur.mogrify("""SELECT EXISTS(( SELECT 1
|
||||
FROM public.sessions AS s
|
||||
WHERE s.project_id IN %(ids)s)) AS exists;""",
|
||||
{"ids": tuple(pids)},
|
||||
)
|
||||
{"ids": tuple(pids)})
|
||||
)
|
||||
recorded = cur.fetchone()["exists"]
|
||||
meta = False
|
||||
if recorded:
|
||||
query = cur.mogrify(
|
||||
f"""SELECT EXISTS((SELECT 1
|
||||
cur.execute("""SELECT EXISTS((SELECT 1
|
||||
FROM public.projects AS p
|
||||
LEFT JOIN LATERAL ( SELECT 1
|
||||
FROM public.sessions
|
||||
WHERE sessions.project_id = p.project_id
|
||||
AND sessions.user_id IS NOT NULL
|
||||
LIMIT 1) AS sessions(user_id) ON (TRUE)
|
||||
WHERE {TENANT_CONDITION} AND p.deleted_at ISNULL
|
||||
WHERE p.deleted_at ISNULL
|
||||
AND ( sessions.user_id IS NOT NULL OR p.metadata_1 IS NOT NULL
|
||||
OR p.metadata_2 IS NOT NULL OR p.metadata_3 IS NOT NULL
|
||||
OR p.metadata_4 IS NOT NULL OR p.metadata_5 IS NOT NULL
|
||||
OR p.metadata_6 IS NOT NULL OR p.metadata_7 IS NOT NULL
|
||||
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
|
||||
OR p.metadata_10 IS NOT NULL )
|
||||
)) AS exists;""",
|
||||
{"tenant_id": tenant_id},
|
||||
)
|
||||
cur.execute(query)
|
||||
)) AS exists;""")
|
||||
|
||||
meta = cur.fetchone()["exists"]
|
||||
|
||||
return [
|
||||
{
|
||||
"task": "Install OpenReplay",
|
||||
"done": recorded,
|
||||
"URL": "https://docs.openreplay.com/getting-started/quick-start",
|
||||
},
|
||||
{
|
||||
"task": "Identify Users",
|
||||
"done": meta,
|
||||
"URL": "https://docs.openreplay.com/data-privacy-security/metadata",
|
||||
},
|
||||
{
|
||||
"task": "Invite Team Members",
|
||||
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
|
||||
"URL": "https://app.openreplay.com/client/manage-users",
|
||||
},
|
||||
{
|
||||
"task": "Integrations",
|
||||
"done": len(datadog.get_all(tenant_id=tenant_id)) > 0
|
||||
or len(sentry.get_all(tenant_id=tenant_id)) > 0
|
||||
or len(stackdriver.get_all(tenant_id=tenant_id)) > 0,
|
||||
"URL": "https://docs.openreplay.com/integrations",
|
||||
},
|
||||
{"task": "Install OpenReplay",
|
||||
"done": recorded,
|
||||
"URL": "https://docs.openreplay.com/getting-started/quick-start"},
|
||||
{"task": "Identify Users",
|
||||
"done": meta,
|
||||
"URL": "https://docs.openreplay.com/data-privacy-security/metadata"},
|
||||
{"task": "Invite Team Members",
|
||||
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
|
||||
"URL": "https://app.openreplay.com/client/manage-users"},
|
||||
{"task": "Integrations",
|
||||
"done": len(log_tool_datadog.get_all(tenant_id=tenant_id)) > 0 \
|
||||
or len(log_tool_sentry.get_all(tenant_id=tenant_id)) > 0 \
|
||||
or len(log_tool_stackdriver.get_all(tenant_id=tenant_id)) > 0,
|
||||
"URL": "https://docs.openreplay.com/integrations"}
|
||||
]
|
||||
|
||||
|
||||
|
|
@ -78,66 +63,52 @@ def get_state_installing(tenant_id):
|
|||
|
||||
if len(pids) > 0:
|
||||
cur.execute(
|
||||
cur.mogrify(
|
||||
"""SELECT EXISTS(( SELECT 1
|
||||
cur.mogrify("""SELECT EXISTS(( SELECT 1
|
||||
FROM public.sessions AS s
|
||||
WHERE s.project_id IN %(ids)s)) AS exists;""",
|
||||
{"ids": tuple(pids)},
|
||||
)
|
||||
{"ids": tuple(pids)})
|
||||
)
|
||||
recorded = cur.fetchone()["exists"]
|
||||
|
||||
return {
|
||||
"task": "Install OpenReplay",
|
||||
"done": recorded,
|
||||
"URL": "https://docs.openreplay.com/getting-started/quick-start",
|
||||
}
|
||||
return {"task": "Install OpenReplay",
|
||||
"done": recorded,
|
||||
"URL": "https://docs.openreplay.com/getting-started/quick-start"}
|
||||
|
||||
|
||||
def get_state_identify_users(tenant_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
f"""SELECT EXISTS((SELECT 1
|
||||
cur.execute("""SELECT EXISTS((SELECT 1
|
||||
FROM public.projects AS p
|
||||
LEFT JOIN LATERAL ( SELECT 1
|
||||
FROM public.sessions
|
||||
WHERE sessions.project_id = p.project_id
|
||||
AND sessions.user_id IS NOT NULL
|
||||
LIMIT 1) AS sessions(user_id) ON (TRUE)
|
||||
WHERE {TENANT_CONDITION} AND p.deleted_at ISNULL
|
||||
WHERE p.deleted_at ISNULL
|
||||
AND ( sessions.user_id IS NOT NULL OR p.metadata_1 IS NOT NULL
|
||||
OR p.metadata_2 IS NOT NULL OR p.metadata_3 IS NOT NULL
|
||||
OR p.metadata_4 IS NOT NULL OR p.metadata_5 IS NOT NULL
|
||||
OR p.metadata_6 IS NOT NULL OR p.metadata_7 IS NOT NULL
|
||||
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
|
||||
OR p.metadata_10 IS NOT NULL )
|
||||
)) AS exists;""",
|
||||
{"tenant_id": tenant_id},
|
||||
)
|
||||
cur.execute(query)
|
||||
)) AS exists;""")
|
||||
|
||||
meta = cur.fetchone()["exists"]
|
||||
|
||||
return {
|
||||
"task": "Identify Users",
|
||||
"done": meta,
|
||||
"URL": "https://docs.openreplay.com/data-privacy-security/metadata",
|
||||
}
|
||||
return {"task": "Identify Users",
|
||||
"done": meta,
|
||||
"URL": "https://docs.openreplay.com/data-privacy-security/metadata"}
|
||||
|
||||
|
||||
def get_state_manage_users(tenant_id):
|
||||
return {
|
||||
"task": "Invite Team Members",
|
||||
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
|
||||
"URL": "https://app.openreplay.com/client/manage-users",
|
||||
}
|
||||
return {"task": "Invite Team Members",
|
||||
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
|
||||
"URL": "https://app.openreplay.com/client/manage-users"}
|
||||
|
||||
|
||||
def get_state_integrations(tenant_id):
|
||||
return {
|
||||
"task": "Integrations",
|
||||
"done": len(datadog.get_all(tenant_id=tenant_id)) > 0
|
||||
or len(sentry.get_all(tenant_id=tenant_id)) > 0
|
||||
or len(stackdriver.get_all(tenant_id=tenant_id)) > 0,
|
||||
"URL": "https://docs.openreplay.com/integrations",
|
||||
}
|
||||
return {"task": "Integrations",
|
||||
"done": len(log_tool_datadog.get_all(tenant_id=tenant_id)) > 0 \
|
||||
or len(log_tool_sentry.get_all(tenant_id=tenant_id)) > 0 \
|
||||
or len(log_tool_stackdriver.get_all(tenant_id=tenant_id)) > 0,
|
||||
"URL": "https://docs.openreplay.com/integrations"}
|
||||
|
|
|
|||
|
|
@ -1,35 +0,0 @@
|
|||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils.storage import StorageClient
|
||||
from decouple import config
|
||||
|
||||
|
||||
def get_canvas_presigned_urls(session_id, project_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(cur.mogrify("""\
|
||||
SELECT *
|
||||
FROM events.canvas_recordings
|
||||
WHERE session_id = %(session_id)s
|
||||
ORDER BY timestamp;""",
|
||||
{"project_id": project_id, "session_id": session_id})
|
||||
)
|
||||
rows = cur.fetchall()
|
||||
urls = []
|
||||
for i in range(len(rows)):
|
||||
params = {
|
||||
"sessionId": session_id,
|
||||
"projectId": project_id,
|
||||
"recordingId": rows[i]["recording_id"]
|
||||
}
|
||||
oldKey = "%(sessionId)s/%(recordingId)s.mp4" % params
|
||||
key = config("CANVAS_PATTERN", default="%(sessionId)s/%(recordingId)s.tar.zst") % params
|
||||
urls.append(StorageClient.get_presigned_url_for_sharing(
|
||||
bucket=config("CANVAS_BUCKET", default=config("sessions_bucket")),
|
||||
expires_in=config("PRESIGNED_URL_EXPIRATION", cast=int, default=900),
|
||||
key=key
|
||||
))
|
||||
urls.append(StorageClient.get_presigned_url_for_sharing(
|
||||
bucket=config("CANVAS_BUCKET", default=config("sessions_bucket")),
|
||||
expires_in=config("PRESIGNED_URL_EXPIRATION", cast=int, default=900),
|
||||
key=oldKey
|
||||
))
|
||||
return urls
|
||||
77
api/chalicelib/core/click_maps.py
Normal file
77
api/chalicelib/core/click_maps.py
Normal file
|
|
@ -0,0 +1,77 @@
|
|||
import schemas
|
||||
from chalicelib.core import sessions_mobs, sessions_legacy as sessions_search, events
|
||||
from chalicelib.utils import pg_client, helper
|
||||
|
||||
SESSION_PROJECTION_COLS = """s.project_id,
|
||||
s.session_id::text AS session_id,
|
||||
s.user_uuid,
|
||||
s.user_id,
|
||||
s.user_os,
|
||||
s.user_browser,
|
||||
s.user_device,
|
||||
s.user_device_type,
|
||||
s.user_country,
|
||||
s.start_ts,
|
||||
s.duration,
|
||||
s.events_count,
|
||||
s.pages_count,
|
||||
s.errors_count,
|
||||
s.user_anonymous_id,
|
||||
s.platform,
|
||||
s.issue_score,
|
||||
to_jsonb(s.issue_types) AS issue_types,
|
||||
favorite_sessions.session_id NOTNULL AS favorite,
|
||||
COALESCE((SELECT TRUE
|
||||
FROM public.user_viewed_sessions AS fs
|
||||
WHERE s.session_id = fs.session_id
|
||||
AND fs.user_id = %(userId)s LIMIT 1), FALSE) AS viewed """
|
||||
|
||||
|
||||
def search_short_session(data: schemas.FlatClickMapSessionsSearch, project_id, user_id, include_mobs: bool = True):
|
||||
no_platform = True
|
||||
for f in data.filters:
|
||||
if f.type == schemas.FilterType.platform:
|
||||
no_platform = False
|
||||
break
|
||||
if no_platform:
|
||||
data.filters.append(schemas.SessionSearchFilterSchema(type=schemas.FilterType.platform,
|
||||
value=[schemas.PlatformType.desktop],
|
||||
operator=schemas.SearchEventOperator._is))
|
||||
|
||||
full_args, query_part = sessions_search.search_query_parts(data=data, error_status=None, errors_only=False,
|
||||
favorite_only=data.bookmarked, issue=None,
|
||||
project_id=project_id, user_id=user_id)
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
data.order = schemas.SortOrderType.desc
|
||||
data.sort = 'duration'
|
||||
|
||||
# meta_keys = metadata.get(project_id=project_id)
|
||||
meta_keys = []
|
||||
main_query = cur.mogrify(f"""SELECT {SESSION_PROJECTION_COLS}
|
||||
{"," if len(meta_keys) > 0 else ""}{",".join([f'metadata_{m["index"]}' for m in meta_keys])}
|
||||
{query_part}
|
||||
ORDER BY {data.sort} {data.order.value}
|
||||
LIMIT 1;""", full_args)
|
||||
# print("--------------------")
|
||||
# print(main_query)
|
||||
# print("--------------------")
|
||||
try:
|
||||
cur.execute(main_query)
|
||||
except Exception as err:
|
||||
print("--------- CLICK MAP SHORT SESSION SEARCH QUERY EXCEPTION -----------")
|
||||
print(main_query.decode('UTF-8'))
|
||||
print("--------- PAYLOAD -----------")
|
||||
print(data.json())
|
||||
print("--------------------")
|
||||
raise err
|
||||
|
||||
session = cur.fetchone()
|
||||
if session:
|
||||
if include_mobs:
|
||||
session['domURL'] = sessions_mobs.get_urls(session_id=session["session_id"], project_id=project_id)
|
||||
session['mobsUrl'] = sessions_mobs.get_urls_depercated(session_id=session["session_id"])
|
||||
session['events'] = events.get_by_session_id(project_id=project_id, session_id=session["session_id"],
|
||||
event_type=schemas.EventType.location)
|
||||
|
||||
return helper.dict_to_camel_case(session)
|
||||
|
|
@ -26,17 +26,17 @@ class BaseCollaboration(ABC):
|
|||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def __share(cls, tenant_id, integration_id, attachments, extra=None):
|
||||
def __share(cls, tenant_id, integration_id, attachments):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def share_session(cls, tenant_id, project_id, session_id, user, comment, project_name=None, integration_id=None):
|
||||
def share_session(cls, tenant_id, project_id, session_id, user, comment, integration_id=None):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def share_error(cls, tenant_id, project_id, error_id, user, comment, project_name=None, integration_id=None):
|
||||
def share_error(cls, tenant_id, project_id, error_id, user, comment, integration_id=None):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
195
api/chalicelib/core/collaboration_msteams.py
Normal file
195
api/chalicelib/core/collaboration_msteams.py
Normal file
|
|
@ -0,0 +1,195 @@
|
|||
import json
|
||||
|
||||
import requests
|
||||
from decouple import config
|
||||
from fastapi import HTTPException
|
||||
from starlette import status
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import webhook
|
||||
from chalicelib.core.collaboration_base import BaseCollaboration
|
||||
|
||||
|
||||
class MSTeams(BaseCollaboration):
|
||||
@classmethod
|
||||
def add(cls, tenant_id, data: schemas.AddCollaborationSchema):
|
||||
if webhook.exists_by_name(tenant_id=tenant_id, name=data.name, exclude_id=None,
|
||||
webhook_type=schemas.WebhookType.msteams):
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=f"name already exists.")
|
||||
if cls.say_hello(data.url):
|
||||
return webhook.add(tenant_id=tenant_id,
|
||||
endpoint=data.url,
|
||||
webhook_type=schemas.WebhookType.msteams,
|
||||
name=data.name)
|
||||
return None
|
||||
|
||||
# https://messagecardplayground.azurewebsites.net
|
||||
# https://adaptivecards.io/designer/
|
||||
@classmethod
|
||||
def say_hello(cls, url):
|
||||
r = requests.post(
|
||||
url=url,
|
||||
json={
|
||||
"@type": "MessageCard",
|
||||
"@context": "https://schema.org/extensions",
|
||||
"summary": "Hello message",
|
||||
"title": "Welcome to OpenReplay"
|
||||
})
|
||||
if r.status_code != 200:
|
||||
print("MSTeams integration failed")
|
||||
print(r.text)
|
||||
return False
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def send_raw(cls, tenant_id, webhook_id, body):
|
||||
integration = cls.get_integration(tenant_id=tenant_id, integration_id=webhook_id)
|
||||
if integration is None:
|
||||
return {"errors": ["msteams integration not found"]}
|
||||
try:
|
||||
r = requests.post(
|
||||
url=integration["endpoint"],
|
||||
json=body,
|
||||
timeout=5)
|
||||
if r.status_code != 200:
|
||||
print(f"!! issue sending msteams raw; webhookId:{webhook_id} code:{r.status_code}")
|
||||
print(r.text)
|
||||
return None
|
||||
except requests.exceptions.Timeout:
|
||||
print(f"!! Timeout sending msteams raw webhookId:{webhook_id}")
|
||||
return None
|
||||
except Exception as e:
|
||||
print(f"!! Issue sending msteams raw webhookId:{webhook_id}")
|
||||
print(str(e))
|
||||
return None
|
||||
return {"data": r.text}
|
||||
|
||||
@classmethod
|
||||
def send_batch(cls, tenant_id, webhook_id, attachments):
|
||||
integration = cls.get_integration(tenant_id=tenant_id, integration_id=webhook_id)
|
||||
if integration is None:
|
||||
return {"errors": ["msteams integration not found"]}
|
||||
print(f"====> sending msteams batch notification: {len(attachments)}")
|
||||
for i in range(0, len(attachments), 100):
|
||||
print(json.dumps({"type": "message",
|
||||
"attachments": [
|
||||
{"contentType": "application/vnd.microsoft.card.adaptive",
|
||||
"contentUrl": None,
|
||||
"content": {
|
||||
"$schema": "http://adaptivecards.io/schemas/adaptive-card.json",
|
||||
"type": "AdaptiveCard",
|
||||
"version": "1.2",
|
||||
"body": attachments[i:i + 100]}}
|
||||
]}))
|
||||
r = requests.post(
|
||||
url=integration["endpoint"],
|
||||
json={"type": "message",
|
||||
"attachments": [
|
||||
{"contentType": "application/vnd.microsoft.card.adaptive",
|
||||
"contentUrl": None,
|
||||
"content": {
|
||||
"$schema": "http://adaptivecards.io/schemas/adaptive-card.json",
|
||||
"type": "AdaptiveCard",
|
||||
"version": "1.2",
|
||||
"body": attachments[i:i + 100]}}
|
||||
]})
|
||||
if r.status_code != 200:
|
||||
print("!!!! something went wrong")
|
||||
print(r)
|
||||
print(r.text)
|
||||
|
||||
@classmethod
|
||||
def __share(cls, tenant_id, integration_id, attachement):
|
||||
integration = cls.get_integration(tenant_id=tenant_id, integration_id=integration_id)
|
||||
if integration is None:
|
||||
return {"errors": ["Microsoft Teams integration not found"]}
|
||||
r = requests.post(
|
||||
url=integration["endpoint"],
|
||||
json={"type": "message",
|
||||
"attachments": [
|
||||
{"contentType": "application/vnd.microsoft.card.adaptive",
|
||||
"contentUrl": None,
|
||||
"content": {
|
||||
"$schema": "http://adaptivecards.io/schemas/adaptive-card.json",
|
||||
"type": "AdaptiveCard",
|
||||
"version": "1.5",
|
||||
"body": [attachement]}}
|
||||
]
|
||||
})
|
||||
|
||||
return r.text
|
||||
|
||||
@classmethod
|
||||
def share_session(cls, tenant_id, project_id, session_id, user, comment, integration_id=None):
|
||||
title = f"[{user}](mailto:{user}) has shared the below session!"
|
||||
link = f"{config('SITE_URL')}/{project_id}/session/{session_id}"
|
||||
link = f"[{link}]({link})"
|
||||
args = {"type": "ColumnSet",
|
||||
"style": "emphasis",
|
||||
"separator": True,
|
||||
"bleed": True,
|
||||
"columns": [{
|
||||
"width": "stretch",
|
||||
"items": [
|
||||
{"type": "TextBlock",
|
||||
"text": title,
|
||||
"style": "heading",
|
||||
"size": "Large"},
|
||||
{"type": "TextBlock",
|
||||
"spacing": "small",
|
||||
"text": link}
|
||||
]
|
||||
}]}
|
||||
if comment and len(comment) > 0:
|
||||
args["columns"][0]["items"].append({
|
||||
"type": "TextBlock",
|
||||
"spacing": "small",
|
||||
"text": comment
|
||||
})
|
||||
data = cls.__share(tenant_id, integration_id, attachement=args)
|
||||
if "errors" in data:
|
||||
return data
|
||||
return {"data": data}
|
||||
|
||||
@classmethod
|
||||
def share_error(cls, tenant_id, project_id, error_id, user, comment, integration_id=None):
|
||||
title = f"[{user}](mailto:{user}) has shared the below error!"
|
||||
link = f"{config('SITE_URL')}/{project_id}/errors/{error_id}"
|
||||
link = f"[{link}]({link})"
|
||||
args = {"type": "ColumnSet",
|
||||
"style": "emphasis",
|
||||
"separator": True,
|
||||
"bleed": True,
|
||||
"columns": [{
|
||||
"width": "stretch",
|
||||
"items": [
|
||||
{"type": "TextBlock",
|
||||
"text": title,
|
||||
"style": "heading",
|
||||
"size": "Large"},
|
||||
{"type": "TextBlock",
|
||||
"spacing": "small",
|
||||
"text": link}
|
||||
]
|
||||
}]}
|
||||
if comment and len(comment) > 0:
|
||||
args["columns"][0]["items"].append({
|
||||
"type": "TextBlock",
|
||||
"spacing": "small",
|
||||
"text": comment
|
||||
})
|
||||
data = cls.__share(tenant_id, integration_id, attachement=args)
|
||||
if "errors" in data:
|
||||
return data
|
||||
return {"data": data}
|
||||
|
||||
@classmethod
|
||||
def get_integration(cls, tenant_id, integration_id=None):
|
||||
if integration_id is not None:
|
||||
return webhook.get_webhook(tenant_id=tenant_id, webhook_id=integration_id,
|
||||
webhook_type=schemas.WebhookType.msteams)
|
||||
|
||||
integrations = webhook.get_by_type(tenant_id=tenant_id, webhook_type=schemas.WebhookType.msteams)
|
||||
if integrations is None or len(integrations) == 0:
|
||||
return None
|
||||
return integrations[0]
|
||||
|
|
@ -1,24 +1,25 @@
|
|||
from datetime import datetime
|
||||
|
||||
import requests
|
||||
from decouple import config
|
||||
from fastapi import HTTPException, status
|
||||
from datetime import datetime
|
||||
|
||||
from fastapi import HTTPException
|
||||
from starlette import status
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import webhook
|
||||
from chalicelib.core.collaborations.collaboration_base import BaseCollaboration
|
||||
from chalicelib.core.collaboration_base import BaseCollaboration
|
||||
|
||||
|
||||
class Slack(BaseCollaboration):
|
||||
@classmethod
|
||||
def add(cls, tenant_id, data: schemas.AddCollaborationSchema):
|
||||
if webhook.exists_by_name(tenant_id=tenant_id, name=data.name, exclude_id=None,
|
||||
webhook_type=schemas.WebhookType.SLACK):
|
||||
webhook_type=schemas.WebhookType.slack):
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=f"name already exists.")
|
||||
if cls.say_hello(data.url):
|
||||
return webhook.add(tenant_id=tenant_id,
|
||||
endpoint=data.url.unicode_string(),
|
||||
webhook_type=schemas.WebhookType.SLACK,
|
||||
endpoint=data.url,
|
||||
webhook_type=schemas.WebhookType.slack,
|
||||
name=data.name)
|
||||
return None
|
||||
|
||||
|
|
@ -80,18 +81,16 @@ class Slack(BaseCollaboration):
|
|||
print(r.text)
|
||||
|
||||
@classmethod
|
||||
def __share(cls, tenant_id, integration_id, attachement, extra=None):
|
||||
if extra is None:
|
||||
extra = {}
|
||||
def __share(cls, tenant_id, integration_id, attachement):
|
||||
integration = cls.get_integration(tenant_id=tenant_id, integration_id=integration_id)
|
||||
if integration is None:
|
||||
return {"errors": ["slack integration not found"]}
|
||||
attachement["ts"] = datetime.now().timestamp()
|
||||
r = requests.post(url=integration["endpoint"], json={"attachments": [attachement], **extra})
|
||||
r = requests.post(url=integration["endpoint"], json={"attachments": [attachement]})
|
||||
return r.text
|
||||
|
||||
@classmethod
|
||||
def share_session(cls, tenant_id, project_id, session_id, user, comment, project_name=None, integration_id=None):
|
||||
def share_session(cls, tenant_id, project_id, session_id, user, comment, integration_id=None):
|
||||
args = {"fallback": f"{user} has shared the below session!",
|
||||
"pretext": f"{user} has shared the below session!",
|
||||
"title": f"{config('SITE_URL')}/{project_id}/session/{session_id}",
|
||||
|
|
@ -103,7 +102,7 @@ class Slack(BaseCollaboration):
|
|||
return {"data": data}
|
||||
|
||||
@classmethod
|
||||
def share_error(cls, tenant_id, project_id, error_id, user, comment, project_name=None, integration_id=None):
|
||||
def share_error(cls, tenant_id, project_id, error_id, user, comment, integration_id=None):
|
||||
args = {"fallback": f"{user} has shared the below error!",
|
||||
"pretext": f"{user} has shared the below error!",
|
||||
"title": f"{config('SITE_URL')}/{project_id}/errors/{error_id}",
|
||||
|
|
@ -118,9 +117,9 @@ class Slack(BaseCollaboration):
|
|||
def get_integration(cls, tenant_id, integration_id=None):
|
||||
if integration_id is not None:
|
||||
return webhook.get_webhook(tenant_id=tenant_id, webhook_id=integration_id,
|
||||
webhook_type=schemas.WebhookType.SLACK)
|
||||
webhook_type=schemas.WebhookType.slack)
|
||||
|
||||
integrations = webhook.get_by_type(tenant_id=tenant_id, webhook_type=schemas.WebhookType.SLACK)
|
||||
integrations = webhook.get_by_type(tenant_id=tenant_id, webhook_type=schemas.WebhookType.slack)
|
||||
if integrations is None or len(integrations) == 0:
|
||||
return None
|
||||
return integrations[0]
|
||||
|
|
@ -1 +0,0 @@
|
|||
from . import collaboration_base as _
|
||||
|
|
@ -1,171 +0,0 @@
|
|||
import logging
|
||||
|
||||
import requests
|
||||
from decouple import config
|
||||
from fastapi import HTTPException, status
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import webhook
|
||||
from chalicelib.core.collaborations.collaboration_base import BaseCollaboration
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MSTeams(BaseCollaboration):
|
||||
@classmethod
|
||||
def add(cls, tenant_id, data: schemas.AddCollaborationSchema):
|
||||
if webhook.exists_by_name(tenant_id=tenant_id, name=data.name, exclude_id=None,
|
||||
webhook_type=schemas.WebhookType.MSTEAMS):
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=f"name already exists.")
|
||||
if cls.say_hello(data.url):
|
||||
return webhook.add(tenant_id=tenant_id,
|
||||
endpoint=data.url.unicode_string(),
|
||||
webhook_type=schemas.WebhookType.MSTEAMS,
|
||||
name=data.name)
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def say_hello(cls, url):
|
||||
try:
|
||||
r = requests.post(
|
||||
url=url,
|
||||
json={
|
||||
"@type": "MessageCard",
|
||||
"@context": "https://schema.org/extensions",
|
||||
"summary": "Welcome to OpenReplay",
|
||||
"title": "Welcome to OpenReplay"
|
||||
},
|
||||
timeout=3)
|
||||
if r.status_code != 200:
|
||||
logger.warning("MSTeams integration failed")
|
||||
logger.warning(r.text)
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.warning("!!! MSTeams integration failed")
|
||||
logger.exception(e)
|
||||
return False
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def send_raw(cls, tenant_id, webhook_id, body):
|
||||
integration = cls.get_integration(tenant_id=tenant_id, integration_id=webhook_id)
|
||||
if integration is None:
|
||||
return {"errors": ["msteams integration not found"]}
|
||||
try:
|
||||
r = requests.post(
|
||||
url=integration["endpoint"],
|
||||
json=body,
|
||||
timeout=5)
|
||||
if r.status_code != 200:
|
||||
logger.warning(f"!! issue sending msteams raw; webhookId:{webhook_id} code:{r.status_code}")
|
||||
logger.warning(r.text)
|
||||
return None
|
||||
except requests.exceptions.Timeout:
|
||||
logger.warning(f"!! Timeout sending msteams raw webhookId:{webhook_id}")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.warning(f"!! Issue sending msteams raw webhookId:{webhook_id}")
|
||||
logger.warning(e)
|
||||
return None
|
||||
return {"data": r.text}
|
||||
|
||||
@classmethod
|
||||
def send_batch(cls, tenant_id, webhook_id, attachments):
|
||||
integration = cls.get_integration(tenant_id=tenant_id, integration_id=webhook_id)
|
||||
if integration is None:
|
||||
return {"errors": ["msteams integration not found"]}
|
||||
logger.debug(f"====> sending msteams batch notification: {len(attachments)}")
|
||||
for i in range(0, len(attachments), 50):
|
||||
part = attachments[i:i + 50]
|
||||
for j in range(1, len(part), 2):
|
||||
part.insert(j, {"text": "***"})
|
||||
|
||||
r = requests.post(url=integration["endpoint"],
|
||||
json={
|
||||
"@type": "MessageCard",
|
||||
"@context": "http://schema.org/extensions",
|
||||
"summary": part[0]["activityTitle"],
|
||||
"sections": part
|
||||
})
|
||||
if r.status_code != 200:
|
||||
logger.warning("!!!! something went wrong")
|
||||
logger.warning(r.text)
|
||||
|
||||
@classmethod
|
||||
def __share(cls, tenant_id, integration_id, attachement, extra=None):
|
||||
if extra is None:
|
||||
extra = {}
|
||||
integration = cls.get_integration(tenant_id=tenant_id, integration_id=integration_id)
|
||||
if integration is None:
|
||||
return {"errors": ["Microsoft Teams integration not found"]}
|
||||
r = requests.post(
|
||||
url=integration["endpoint"],
|
||||
json={
|
||||
"@type": "MessageCard",
|
||||
"@context": "http://schema.org/extensions",
|
||||
"sections": [attachement],
|
||||
**extra
|
||||
})
|
||||
|
||||
return r.text
|
||||
|
||||
@classmethod
|
||||
def share_session(cls, tenant_id, project_id, session_id, user, comment, project_name=None, integration_id=None):
|
||||
title = f"*{user}* has shared the below session!"
|
||||
link = f"{config('SITE_URL')}/{project_id}/session/{session_id}"
|
||||
args = {
|
||||
"activityTitle": title,
|
||||
"facts": [
|
||||
{
|
||||
"name": "Session:",
|
||||
"value": link
|
||||
}],
|
||||
"markdown": True
|
||||
}
|
||||
if project_name and len(project_name) > 0:
|
||||
args["activitySubtitle"] = f"On Project *{project_name}*"
|
||||
if comment and len(comment) > 0:
|
||||
args["facts"].append({
|
||||
"name": "Comment:",
|
||||
"value": comment
|
||||
})
|
||||
data = cls.__share(tenant_id, integration_id, attachement=args, extra={"summary": title})
|
||||
if "errors" in data:
|
||||
return data
|
||||
return {"data": data}
|
||||
|
||||
@classmethod
|
||||
def share_error(cls, tenant_id, project_id, error_id, user, comment, project_name=None, integration_id=None):
|
||||
title = f"*{user}* has shared the below error!"
|
||||
link = f"{config('SITE_URL')}/{project_id}/errors/{error_id}"
|
||||
args = {
|
||||
"activityTitle": title,
|
||||
"facts": [
|
||||
{
|
||||
"name": "Session:",
|
||||
"value": link
|
||||
}],
|
||||
"markdown": True
|
||||
}
|
||||
if project_name and len(project_name) > 0:
|
||||
args["activitySubtitle"] = f"On Project *{project_name}*"
|
||||
if comment and len(comment) > 0:
|
||||
args["facts"].append({
|
||||
"name": "Comment:",
|
||||
"value": comment
|
||||
})
|
||||
data = cls.__share(tenant_id, integration_id, attachement=args, extra={"summary": title})
|
||||
if "errors" in data:
|
||||
return data
|
||||
return {"data": data}
|
||||
|
||||
@classmethod
|
||||
def get_integration(cls, tenant_id, integration_id=None):
|
||||
if integration_id is not None:
|
||||
return webhook.get_webhook(tenant_id=tenant_id, webhook_id=integration_id,
|
||||
webhook_type=schemas.WebhookType.MSTEAMS)
|
||||
|
||||
integrations = webhook.get_by_type(tenant_id=tenant_id, webhook_type=schemas.WebhookType.MSTEAMS)
|
||||
if integrations is None or len(integrations) == 0:
|
||||
return None
|
||||
return integrations[0]
|
||||
650
api/chalicelib/core/custom_metrics.py
Normal file
650
api/chalicelib/core/custom_metrics.py
Normal file
|
|
@ -0,0 +1,650 @@
|
|||
import json
|
||||
from typing import Union
|
||||
|
||||
from decouple import config
|
||||
from fastapi import HTTPException
|
||||
from starlette import status
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import sessions, funnels, errors, issues, metrics, click_maps, sessions_mobs
|
||||
from chalicelib.utils import helper, pg_client, s3
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
PIE_CHART_GROUP = 5
|
||||
|
||||
|
||||
def __try_live(project_id, data: schemas.CreateCardSchema):
|
||||
results = []
|
||||
for i, s in enumerate(data.series):
|
||||
s.filter.startDate = data.startTimestamp
|
||||
s.filter.endDate = data.endTimestamp
|
||||
results.append(sessions.search2_series(data=s.filter, project_id=project_id, density=data.density,
|
||||
view_type=data.view_type, metric_type=data.metric_type,
|
||||
metric_of=data.metric_of, metric_value=data.metric_value))
|
||||
if data.view_type == schemas.MetricTimeseriesViewType.progress:
|
||||
r = {"count": results[-1]}
|
||||
diff = s.filter.endDate - s.filter.startDate
|
||||
s.filter.endDate = s.filter.startDate
|
||||
s.filter.startDate = s.filter.endDate - diff
|
||||
r["previousCount"] = sessions.search2_series(data=s.filter, project_id=project_id, density=data.density,
|
||||
view_type=data.view_type, metric_type=data.metric_type,
|
||||
metric_of=data.metric_of, metric_value=data.metric_value)
|
||||
r["countProgress"] = helper.__progress(old_val=r["previousCount"], new_val=r["count"])
|
||||
# r["countProgress"] = ((r["count"] - r["previousCount"]) / r["previousCount"]) * 100 \
|
||||
# if r["previousCount"] > 0 else 0
|
||||
r["seriesName"] = s.name if s.name else i + 1
|
||||
r["seriesId"] = s.series_id if s.series_id else None
|
||||
results[-1] = r
|
||||
elif data.view_type == schemas.MetricTableViewType.pie_chart:
|
||||
if len(results[i].get("values", [])) > PIE_CHART_GROUP:
|
||||
results[i]["values"] = results[i]["values"][:PIE_CHART_GROUP] \
|
||||
+ [{
|
||||
"name": "Others", "group": True,
|
||||
"sessionCount": sum(r["sessionCount"] for r in results[i]["values"][PIE_CHART_GROUP:])
|
||||
}]
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def __is_funnel_chart(data: schemas.CreateCardSchema):
|
||||
return data.metric_type == schemas.MetricType.funnel
|
||||
|
||||
|
||||
def __get_funnel_chart(project_id, data: schemas.CreateCardSchema):
|
||||
if len(data.series) == 0:
|
||||
return {
|
||||
"stages": [],
|
||||
"totalDropDueToIssues": 0
|
||||
}
|
||||
data.series[0].filter.startDate = data.startTimestamp
|
||||
data.series[0].filter.endDate = data.endTimestamp
|
||||
return funnels.get_top_insights_on_the_fly_widget(project_id=project_id, data=data.series[0].filter)
|
||||
|
||||
|
||||
def __is_errors_list(data: schemas.CreateCardSchema):
|
||||
return data.metric_type == schemas.MetricType.table \
|
||||
and data.metric_of == schemas.MetricOfTable.errors
|
||||
|
||||
|
||||
def __get_errors_list(project_id, user_id, data: schemas.CreateCardSchema):
|
||||
if len(data.series) == 0:
|
||||
return {
|
||||
"total": 0,
|
||||
"errors": []
|
||||
}
|
||||
data.series[0].filter.startDate = data.startTimestamp
|
||||
data.series[0].filter.endDate = data.endTimestamp
|
||||
data.series[0].filter.page = data.page
|
||||
data.series[0].filter.limit = data.limit
|
||||
return errors.search(data.series[0].filter, project_id=project_id, user_id=user_id)
|
||||
|
||||
|
||||
def __is_sessions_list(data: schemas.CreateCardSchema):
|
||||
return data.metric_type == schemas.MetricType.table \
|
||||
and data.metric_of == schemas.MetricOfTable.sessions
|
||||
|
||||
|
||||
def __get_sessions_list(project_id, user_id, data: schemas.CreateCardSchema):
|
||||
if len(data.series) == 0:
|
||||
print("empty series")
|
||||
return {
|
||||
"total": 0,
|
||||
"sessions": []
|
||||
}
|
||||
data.series[0].filter.startDate = data.startTimestamp
|
||||
data.series[0].filter.endDate = data.endTimestamp
|
||||
data.series[0].filter.page = data.page
|
||||
data.series[0].filter.limit = data.limit
|
||||
return sessions.search_sessions(data=data.series[0].filter, project_id=project_id, user_id=user_id)
|
||||
|
||||
|
||||
def __is_predefined(data: schemas.CreateCardSchema):
|
||||
return data.is_template
|
||||
|
||||
|
||||
def __is_click_map(data: schemas.CreateCardSchema):
|
||||
return data.metric_type == schemas.MetricType.click_map
|
||||
|
||||
|
||||
def __get_click_map_chart(project_id, user_id, data: schemas.CreateCardSchema, include_mobs: bool = True):
|
||||
if len(data.series) == 0:
|
||||
return None
|
||||
data.series[0].filter.startDate = data.startTimestamp
|
||||
data.series[0].filter.endDate = data.endTimestamp
|
||||
return click_maps.search_short_session(project_id=project_id, user_id=user_id,
|
||||
data=schemas.FlatClickMapSessionsSearch(**data.series[0].filter.dict()),
|
||||
include_mobs=include_mobs)
|
||||
|
||||
|
||||
def merged_live(project_id, data: schemas.CreateCardSchema, user_id=None):
|
||||
if data.is_template:
|
||||
return get_predefined_metric(key=data.metric_of, project_id=project_id, data=data.dict())
|
||||
elif __is_funnel_chart(data):
|
||||
return __get_funnel_chart(project_id=project_id, data=data)
|
||||
elif __is_errors_list(data):
|
||||
return __get_errors_list(project_id=project_id, user_id=user_id, data=data)
|
||||
elif __is_sessions_list(data):
|
||||
return __get_sessions_list(project_id=project_id, user_id=user_id, data=data)
|
||||
elif __is_click_map(data):
|
||||
return __get_click_map_chart(project_id=project_id, user_id=user_id, data=data)
|
||||
elif len(data.series) == 0:
|
||||
return []
|
||||
series_charts = __try_live(project_id=project_id, data=data)
|
||||
if data.view_type == schemas.MetricTimeseriesViewType.progress or data.metric_type == schemas.MetricType.table:
|
||||
return series_charts
|
||||
results = [{}] * len(series_charts[0])
|
||||
for i in range(len(results)):
|
||||
for j, series_chart in enumerate(series_charts):
|
||||
results[i] = {**results[i], "timestamp": series_chart[i]["timestamp"],
|
||||
data.series[j].name if data.series[j].name else j + 1: series_chart[i]["count"]}
|
||||
return results
|
||||
|
||||
|
||||
def __merge_metric_with_data(metric: schemas.CreateCardSchema,
|
||||
data: schemas.CardChartSchema) -> schemas.CreateCardSchema:
|
||||
if data.series is not None and len(data.series) > 0:
|
||||
metric.series = data.series
|
||||
metric: schemas.CreateCardSchema = schemas.CreateCardSchema(
|
||||
**{**data.dict(by_alias=True), **metric.dict(by_alias=True)})
|
||||
if len(data.filters) > 0 or len(data.events) > 0:
|
||||
for s in metric.series:
|
||||
if len(data.filters) > 0:
|
||||
s.filter.filters += data.filters
|
||||
if len(data.events) > 0:
|
||||
s.filter.events += data.events
|
||||
metric.limit = data.limit
|
||||
metric.page = data.page
|
||||
metric.startTimestamp = data.startTimestamp
|
||||
metric.endTimestamp = data.endTimestamp
|
||||
return metric
|
||||
|
||||
|
||||
def make_chart(project_id, user_id, data: schemas.CardChartSchema, metric: schemas.CreateCardSchema):
|
||||
if metric is None:
|
||||
return None
|
||||
metric: schemas.CreateCardSchema = __merge_metric_with_data(metric=metric, data=data)
|
||||
|
||||
return merged_live(project_id=project_id, data=metric, user_id=user_id)
|
||||
|
||||
|
||||
def get_sessions(project_id, user_id, metric_id, data: schemas.CardSessionsSchema):
|
||||
# raw_metric = get_card(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False, include_data=True)
|
||||
raw_metric: dict = get_card(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
|
||||
if raw_metric is None:
|
||||
return None
|
||||
metric: schemas.CreateCardSchema = schemas.CreateCardSchema(**raw_metric)
|
||||
metric: schemas.CreateCardSchema = __merge_metric_with_data(metric=metric, data=data)
|
||||
if metric is None:
|
||||
return None
|
||||
results = []
|
||||
# is_click_map = False
|
||||
# if __is_click_map(metric) and raw_metric.get("data") is not None:
|
||||
# is_click_map = True
|
||||
for s in metric.series:
|
||||
s.filter.startDate = data.startTimestamp
|
||||
s.filter.endDate = data.endTimestamp
|
||||
s.filter.limit = data.limit
|
||||
s.filter.page = data.page
|
||||
# if is_click_map:
|
||||
# results.append(
|
||||
# {"seriesId": s.series_id, "seriesName": s.name, "total": 1, "sessions": [raw_metric["data"]]})
|
||||
# break
|
||||
results.append({"seriesId": s.series_id, "seriesName": s.name,
|
||||
**sessions.search_sessions(data=s.filter, project_id=project_id, user_id=user_id)})
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def get_funnel_issues(project_id, user_id, metric_id, data: schemas.CardSessionsSchema):
|
||||
raw_metric: dict = get_card(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
|
||||
if raw_metric is None:
|
||||
return None
|
||||
metric: schemas.CreateCardSchema = schemas.CreateCardSchema(**raw_metric)
|
||||
metric: schemas.CreateCardSchema = __merge_metric_with_data(metric=metric, data=data)
|
||||
if metric is None:
|
||||
return None
|
||||
for s in metric.series:
|
||||
s.filter.startDate = data.startTimestamp
|
||||
s.filter.endDate = data.endTimestamp
|
||||
s.filter.limit = data.limit
|
||||
s.filter.page = data.page
|
||||
return {"seriesId": s.series_id, "seriesName": s.name,
|
||||
**funnels.get_issues_on_the_fly_widget(project_id=project_id, data=s.filter)}
|
||||
|
||||
|
||||
def get_errors_list(project_id, user_id, metric_id, data: schemas.CardSessionsSchema):
|
||||
raw_metric: dict = get_card(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
|
||||
if raw_metric is None:
|
||||
return None
|
||||
metric: schemas.CreateCardSchema = schemas.CreateCardSchema(**raw_metric)
|
||||
metric: schemas.CreateCardSchema = __merge_metric_with_data(metric=metric, data=data)
|
||||
if metric is None:
|
||||
return None
|
||||
for s in metric.series:
|
||||
s.filter.startDate = data.startTimestamp
|
||||
s.filter.endDate = data.endTimestamp
|
||||
s.filter.limit = data.limit
|
||||
s.filter.page = data.page
|
||||
return {"seriesId": s.series_id, "seriesName": s.name,
|
||||
**errors.search(data=s.filter, project_id=project_id, user_id=user_id)}
|
||||
|
||||
|
||||
def try_sessions(project_id, user_id, data: schemas.CardSessionsSchema):
|
||||
results = []
|
||||
if data.series is None:
|
||||
return results
|
||||
for s in data.series:
|
||||
s.filter.startDate = data.startTimestamp
|
||||
s.filter.endDate = data.endTimestamp
|
||||
s.filter.limit = data.limit
|
||||
s.filter.page = data.page
|
||||
if len(data.filters) > 0:
|
||||
s.filter.filters += data.filters
|
||||
if len(data.events) > 0:
|
||||
s.filter.events += data.events
|
||||
results.append({"seriesId": None, "seriesName": s.name,
|
||||
**sessions.search_sessions(data=s.filter, project_id=project_id, user_id=user_id)})
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def create(project_id, user_id, data: schemas.CreateCardSchema, dashboard=False):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
session_data = None
|
||||
if __is_click_map(data):
|
||||
session_data = __get_click_map_chart(project_id=project_id, user_id=user_id,
|
||||
data=data, include_mobs=False)
|
||||
if session_data is not None:
|
||||
session_data = json.dumps(session_data)
|
||||
_data = {"session_data": session_data}
|
||||
for i, s in enumerate(data.series):
|
||||
for k in s.dict().keys():
|
||||
_data[f"{k}_{i}"] = s.__getattribute__(k)
|
||||
_data[f"index_{i}"] = i
|
||||
_data[f"filter_{i}"] = s.filter.json()
|
||||
series_len = len(data.series)
|
||||
params = {"user_id": user_id, "project_id": project_id, **data.dict(), **_data}
|
||||
params["default_config"] = json.dumps(data.default_config.dict())
|
||||
query = """INSERT INTO metrics (project_id, user_id, name, is_public,
|
||||
view_type, metric_type, metric_of, metric_value,
|
||||
metric_format, default_config, thumbnail, data)
|
||||
VALUES (%(project_id)s, %(user_id)s, %(name)s, %(is_public)s,
|
||||
%(view_type)s, %(metric_type)s, %(metric_of)s, %(metric_value)s,
|
||||
%(metric_format)s, %(default_config)s, %(thumbnail)s, %(session_data)s)
|
||||
RETURNING metric_id"""
|
||||
if len(data.series) > 0:
|
||||
query = f"""WITH m AS ({query})
|
||||
INSERT INTO metric_series(metric_id, index, name, filter)
|
||||
VALUES {",".join([f"((SELECT metric_id FROM m), %(index_{i})s, %(name_{i})s, %(filter_{i})s::jsonb)"
|
||||
for i in range(series_len)])}
|
||||
RETURNING metric_id;"""
|
||||
|
||||
query = cur.mogrify(query, params)
|
||||
# print("-------")
|
||||
# print(query)
|
||||
# print("-------")
|
||||
cur.execute(query)
|
||||
r = cur.fetchone()
|
||||
if dashboard:
|
||||
return r["metric_id"]
|
||||
return {"data": get_card(metric_id=r["metric_id"], project_id=project_id, user_id=user_id)}
|
||||
|
||||
|
||||
def update(metric_id, user_id, project_id, data: schemas.UpdateCardSchema):
|
||||
metric: dict = get_card(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
|
||||
if metric is None:
|
||||
return None
|
||||
series_ids = [r["seriesId"] for r in metric["series"]]
|
||||
n_series = []
|
||||
d_series_ids = []
|
||||
u_series = []
|
||||
u_series_ids = []
|
||||
params = {"metric_id": metric_id, "is_public": data.is_public, "name": data.name,
|
||||
"user_id": user_id, "project_id": project_id, "view_type": data.view_type,
|
||||
"metric_type": data.metric_type, "metric_of": data.metric_of,
|
||||
"metric_value": data.metric_value, "metric_format": data.metric_format,
|
||||
"config": json.dumps(data.default_config.dict()), "thumbnail": data.thumbnail}
|
||||
for i, s in enumerate(data.series):
|
||||
prefix = "u_"
|
||||
if s.index is None:
|
||||
s.index = i
|
||||
if s.series_id is None or s.series_id not in series_ids:
|
||||
n_series.append({"i": i, "s": s})
|
||||
prefix = "n_"
|
||||
else:
|
||||
u_series.append({"i": i, "s": s})
|
||||
u_series_ids.append(s.series_id)
|
||||
ns = s.dict()
|
||||
for k in ns.keys():
|
||||
if k == "filter":
|
||||
ns[k] = json.dumps(ns[k])
|
||||
params[f"{prefix}{k}_{i}"] = ns[k]
|
||||
for i in series_ids:
|
||||
if i not in u_series_ids:
|
||||
d_series_ids.append(i)
|
||||
params["d_series_ids"] = tuple(d_series_ids)
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
sub_queries = []
|
||||
if len(n_series) > 0:
|
||||
sub_queries.append(f"""\
|
||||
n AS (INSERT INTO metric_series (metric_id, index, name, filter)
|
||||
VALUES {",".join([f"(%(metric_id)s, %(n_index_{s['i']})s, %(n_name_{s['i']})s, %(n_filter_{s['i']})s::jsonb)"
|
||||
for s in n_series])}
|
||||
RETURNING 1)""")
|
||||
if len(u_series) > 0:
|
||||
sub_queries.append(f"""\
|
||||
u AS (UPDATE metric_series
|
||||
SET name=series.name,
|
||||
filter=series.filter,
|
||||
index=series.index
|
||||
FROM (VALUES {",".join([f"(%(u_series_id_{s['i']})s,%(u_index_{s['i']})s,%(u_name_{s['i']})s,%(u_filter_{s['i']})s::jsonb)"
|
||||
for s in u_series])}) AS series(series_id, index, name, filter)
|
||||
WHERE metric_series.metric_id =%(metric_id)s AND metric_series.series_id=series.series_id
|
||||
RETURNING 1)""")
|
||||
if len(d_series_ids) > 0:
|
||||
sub_queries.append("""\
|
||||
d AS (DELETE FROM metric_series WHERE metric_id =%(metric_id)s AND series_id IN %(d_series_ids)s
|
||||
RETURNING 1)""")
|
||||
query = cur.mogrify(f"""\
|
||||
{"WITH " if len(sub_queries) > 0 else ""}{",".join(sub_queries)}
|
||||
UPDATE metrics
|
||||
SET name = %(name)s, is_public= %(is_public)s,
|
||||
view_type= %(view_type)s, metric_type= %(metric_type)s,
|
||||
metric_of= %(metric_of)s, metric_value= %(metric_value)s,
|
||||
metric_format= %(metric_format)s,
|
||||
edited_at = timezone('utc'::text, now()),
|
||||
default_config = %(config)s,
|
||||
thumbnail = %(thumbnail)s
|
||||
WHERE metric_id = %(metric_id)s
|
||||
AND project_id = %(project_id)s
|
||||
AND (user_id = %(user_id)s OR is_public)
|
||||
RETURNING metric_id;""", params)
|
||||
cur.execute(query)
|
||||
return get_card(metric_id=metric_id, project_id=project_id, user_id=user_id)
|
||||
|
||||
|
||||
def search_all(project_id, user_id, data: schemas.SearchCardsSchema, include_series=False):
|
||||
constraints = ["metrics.project_id = %(project_id)s",
|
||||
"metrics.deleted_at ISNULL"]
|
||||
params = {"project_id": project_id, "user_id": user_id,
|
||||
"offset": (data.page - 1) * data.limit,
|
||||
"limit": data.limit, }
|
||||
if data.mine_only:
|
||||
constraints.append("user_id = %(user_id)s")
|
||||
else:
|
||||
constraints.append("(user_id = %(user_id)s OR metrics.is_public)")
|
||||
if data.shared_only:
|
||||
constraints.append("is_public")
|
||||
|
||||
if data.query is not None and len(data.query) > 0:
|
||||
constraints.append("(name ILIKE %(query)s OR owner.owner_email ILIKE %(query)s)")
|
||||
params["query"] = helper.values_for_operator(value=data.query,
|
||||
op=schemas.SearchEventOperator._contains)
|
||||
with pg_client.PostgresClient() as cur:
|
||||
sub_join = ""
|
||||
if include_series:
|
||||
sub_join = """LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(metric_series.* ORDER BY index),'[]'::jsonb) AS series
|
||||
FROM metric_series
|
||||
WHERE metric_series.metric_id = metrics.metric_id
|
||||
AND metric_series.deleted_at ISNULL
|
||||
) AS metric_series ON (TRUE)"""
|
||||
query = cur.mogrify(
|
||||
f"""SELECT metric_id, project_id, user_id, name, is_public, created_at, edited_at,
|
||||
metric_type, metric_of, metric_format, metric_value, view_type, is_pinned,
|
||||
dashboards, owner_email, default_config AS config, thumbnail
|
||||
FROM metrics
|
||||
{sub_join}
|
||||
LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(connected_dashboards.* ORDER BY is_public,name),'[]'::jsonb) AS dashboards
|
||||
FROM (SELECT DISTINCT dashboard_id, name, is_public
|
||||
FROM dashboards INNER JOIN dashboard_widgets USING (dashboard_id)
|
||||
WHERE deleted_at ISNULL
|
||||
AND dashboard_widgets.metric_id = metrics.metric_id
|
||||
AND project_id = %(project_id)s
|
||||
AND ((dashboards.user_id = %(user_id)s OR is_public))) AS connected_dashboards
|
||||
) AS connected_dashboards ON (TRUE)
|
||||
LEFT JOIN LATERAL (SELECT email AS owner_email
|
||||
FROM users
|
||||
WHERE deleted_at ISNULL
|
||||
AND users.user_id = metrics.user_id
|
||||
) AS owner ON (TRUE)
|
||||
WHERE {" AND ".join(constraints)}
|
||||
ORDER BY created_at {data.order.value}
|
||||
LIMIT %(limit)s OFFSET %(offset)s;""", params)
|
||||
cur.execute(query)
|
||||
rows = cur.fetchall()
|
||||
if include_series:
|
||||
for r in rows:
|
||||
for s in r["series"]:
|
||||
s["filter"] = helper.old_search_payload_to_flat(s["filter"])
|
||||
else:
|
||||
for r in rows:
|
||||
r["created_at"] = TimeUTC.datetime_to_timestamp(r["created_at"])
|
||||
r["edited_at"] = TimeUTC.datetime_to_timestamp(r["edited_at"])
|
||||
rows = helper.list_to_camel_case(rows)
|
||||
return rows
|
||||
|
||||
|
||||
def get_all(project_id, user_id):
|
||||
default_search = schemas.SearchCardsSchema()
|
||||
result = rows = search_all(project_id=project_id, user_id=user_id, data=default_search)
|
||||
while len(rows) == default_search.limit:
|
||||
default_search.page += 1
|
||||
rows = search_all(project_id=project_id, user_id=user_id, data=default_search)
|
||||
result += rows
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def delete(project_id, metric_id, user_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
UPDATE public.metrics
|
||||
SET deleted_at = timezone('utc'::text, now()), edited_at = timezone('utc'::text, now())
|
||||
WHERE project_id = %(project_id)s
|
||||
AND metric_id = %(metric_id)s
|
||||
AND (user_id = %(user_id)s OR is_public);""",
|
||||
{"metric_id": metric_id, "project_id": project_id, "user_id": user_id})
|
||||
)
|
||||
|
||||
return {"state": "success"}
|
||||
|
||||
|
||||
def get_card(metric_id, project_id, user_id, flatten: bool = True, include_data: bool = False):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
f"""SELECT metric_id, project_id, user_id, name, is_public, created_at, deleted_at, edited_at, metric_type,
|
||||
view_type, metric_of, metric_value, metric_format, is_pinned, default_config,
|
||||
default_config AS config,series, dashboards, owner_email
|
||||
{',data' if include_data else ''}
|
||||
FROM metrics
|
||||
LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(metric_series.* ORDER BY index),'[]'::jsonb) AS series
|
||||
FROM metric_series
|
||||
WHERE metric_series.metric_id = metrics.metric_id
|
||||
AND metric_series.deleted_at ISNULL
|
||||
) AS metric_series ON (TRUE)
|
||||
LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(connected_dashboards.* ORDER BY is_public,name),'[]'::jsonb) AS dashboards
|
||||
FROM (SELECT dashboard_id, name, is_public
|
||||
FROM dashboards INNER JOIN dashboard_widgets USING (dashboard_id)
|
||||
WHERE deleted_at ISNULL
|
||||
AND project_id = %(project_id)s
|
||||
AND ((dashboards.user_id = %(user_id)s OR is_public))
|
||||
AND metric_id = %(metric_id)s) AS connected_dashboards
|
||||
) AS connected_dashboards ON (TRUE)
|
||||
LEFT JOIN LATERAL (SELECT email AS owner_email
|
||||
FROM users
|
||||
WHERE deleted_at ISNULL
|
||||
AND users.user_id = metrics.user_id
|
||||
) AS owner ON (TRUE)
|
||||
WHERE metrics.project_id = %(project_id)s
|
||||
AND metrics.deleted_at ISNULL
|
||||
AND (metrics.user_id = %(user_id)s OR metrics.is_public)
|
||||
AND metrics.metric_id = %(metric_id)s
|
||||
ORDER BY created_at;""",
|
||||
{"metric_id": metric_id, "project_id": project_id, "user_id": user_id}
|
||||
)
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
if row is None:
|
||||
return None
|
||||
row["created_at"] = TimeUTC.datetime_to_timestamp(row["created_at"])
|
||||
row["edited_at"] = TimeUTC.datetime_to_timestamp(row["edited_at"])
|
||||
if flatten:
|
||||
for s in row["series"]:
|
||||
s["filter"] = helper.old_search_payload_to_flat(s["filter"])
|
||||
return helper.dict_to_camel_case(row)
|
||||
|
||||
|
||||
def get_series_for_alert(project_id, user_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(
|
||||
"""SELECT series_id AS value,
|
||||
metrics.name || '.' || (COALESCE(metric_series.name, 'series ' || index)) || '.count' AS name,
|
||||
'count' AS unit,
|
||||
FALSE AS predefined,
|
||||
metric_id,
|
||||
series_id
|
||||
FROM metric_series
|
||||
INNER JOIN metrics USING (metric_id)
|
||||
WHERE metrics.deleted_at ISNULL
|
||||
AND metrics.project_id = %(project_id)s
|
||||
AND metrics.metric_type = 'timeseries'
|
||||
AND (user_id = %(user_id)s OR is_public)
|
||||
ORDER BY name;""",
|
||||
{"project_id": project_id, "user_id": user_id}
|
||||
)
|
||||
)
|
||||
rows = cur.fetchall()
|
||||
return helper.list_to_camel_case(rows)
|
||||
|
||||
|
||||
def change_state(project_id, metric_id, user_id, status):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
UPDATE public.metrics
|
||||
SET active = %(status)s
|
||||
WHERE metric_id = %(metric_id)s
|
||||
AND (user_id = %(user_id)s OR is_public);""",
|
||||
{"metric_id": metric_id, "status": status, "user_id": user_id})
|
||||
)
|
||||
return get_card(metric_id=metric_id, project_id=project_id, user_id=user_id)
|
||||
|
||||
|
||||
def get_funnel_sessions_by_issue(user_id, project_id, metric_id, issue_id,
|
||||
data: schemas.CardSessionsSchema
|
||||
# , range_value=None, start_date=None, end_date=None
|
||||
):
|
||||
metric: dict = get_card(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
|
||||
if metric is None:
|
||||
return None
|
||||
metric: schemas.CreateCardSchema = schemas.CreateCardSchema(**metric)
|
||||
metric: schemas.CreateCardSchema = __merge_metric_with_data(metric=metric, data=data)
|
||||
if metric is None:
|
||||
return None
|
||||
for s in metric.series:
|
||||
s.filter.startDate = data.startTimestamp
|
||||
s.filter.endDate = data.endTimestamp
|
||||
s.filter.limit = data.limit
|
||||
s.filter.page = data.page
|
||||
issues_list = funnels.get_issues_on_the_fly_widget(project_id=project_id, data=s.filter).get("issues", {})
|
||||
issues_list = issues_list.get("significant", []) + issues_list.get("insignificant", [])
|
||||
issue = None
|
||||
for i in issues_list:
|
||||
if i.get("issueId", "") == issue_id:
|
||||
issue = i
|
||||
break
|
||||
if issue is None:
|
||||
issue = issues.get(project_id=project_id, issue_id=issue_id)
|
||||
if issue is not None:
|
||||
issue = {**issue,
|
||||
"affectedSessions": 0,
|
||||
"affectedUsers": 0,
|
||||
"conversionImpact": 0,
|
||||
"lostConversions": 0,
|
||||
"unaffectedSessions": 0}
|
||||
return {"seriesId": s.series_id, "seriesName": s.name,
|
||||
"sessions": sessions.search_sessions(user_id=user_id, project_id=project_id,
|
||||
issue=issue, data=s.filter)
|
||||
if issue is not None else {"total": 0, "sessions": []},
|
||||
"issue": issue}
|
||||
|
||||
|
||||
def make_chart_from_card(project_id, user_id, metric_id, data: schemas.CardChartSchema):
|
||||
raw_metric: dict = get_card(metric_id=metric_id, project_id=project_id, user_id=user_id, include_data=True)
|
||||
if raw_metric is None:
|
||||
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="card not found")
|
||||
metric: schemas.CreateCardSchema = schemas.CreateCardSchema(**raw_metric)
|
||||
if metric.is_template:
|
||||
return get_predefined_metric(key=metric.metric_of, project_id=project_id, data=data.dict())
|
||||
elif __is_click_map(metric):
|
||||
if raw_metric["data"]:
|
||||
keys = sessions_mobs. \
|
||||
__get_mob_keys(project_id=project_id, session_id=raw_metric["data"]["sessionId"])
|
||||
mob_exists = False
|
||||
for k in keys:
|
||||
if s3.exists(bucket=config("sessions_bucket"), key=k):
|
||||
mob_exists = True
|
||||
break
|
||||
if mob_exists:
|
||||
raw_metric["data"]['domURL'] = sessions_mobs.get_urls(session_id=raw_metric["data"]["sessionId"],
|
||||
project_id=project_id)
|
||||
raw_metric["data"]['mobsUrl'] = sessions_mobs.get_urls_depercated(
|
||||
session_id=raw_metric["data"]["sessionId"])
|
||||
return raw_metric["data"]
|
||||
|
||||
return make_chart(project_id=project_id, user_id=user_id, data=data, metric=metric)
|
||||
|
||||
|
||||
PREDEFINED = {schemas.MetricOfWebVitals.count_sessions: metrics.get_processed_sessions,
|
||||
schemas.MetricOfWebVitals.avg_image_load_time: metrics.get_application_activity_avg_image_load_time,
|
||||
schemas.MetricOfWebVitals.avg_page_load_time: metrics.get_application_activity_avg_page_load_time,
|
||||
schemas.MetricOfWebVitals.avg_request_load_time: metrics.get_application_activity_avg_request_load_time,
|
||||
schemas.MetricOfWebVitals.avg_dom_content_load_start: metrics.get_page_metrics_avg_dom_content_load_start,
|
||||
schemas.MetricOfWebVitals.avg_first_contentful_pixel: metrics.get_page_metrics_avg_first_contentful_pixel,
|
||||
schemas.MetricOfWebVitals.avg_visited_pages: metrics.get_user_activity_avg_visited_pages,
|
||||
schemas.MetricOfWebVitals.avg_session_duration: metrics.get_user_activity_avg_session_duration,
|
||||
schemas.MetricOfWebVitals.avg_pages_dom_buildtime: metrics.get_pages_dom_build_time,
|
||||
schemas.MetricOfWebVitals.avg_pages_response_time: metrics.get_pages_response_time,
|
||||
schemas.MetricOfWebVitals.avg_response_time: metrics.get_top_metrics_avg_response_time,
|
||||
schemas.MetricOfWebVitals.avg_first_paint: metrics.get_top_metrics_avg_first_paint,
|
||||
schemas.MetricOfWebVitals.avg_dom_content_loaded: metrics.get_top_metrics_avg_dom_content_loaded,
|
||||
schemas.MetricOfWebVitals.avg_till_first_byte: metrics.get_top_metrics_avg_till_first_bit,
|
||||
schemas.MetricOfWebVitals.avg_time_to_interactive: metrics.get_top_metrics_avg_time_to_interactive,
|
||||
schemas.MetricOfWebVitals.count_requests: metrics.get_top_metrics_count_requests,
|
||||
schemas.MetricOfWebVitals.avg_time_to_render: metrics.get_time_to_render,
|
||||
schemas.MetricOfWebVitals.avg_used_js_heap_size: metrics.get_memory_consumption,
|
||||
schemas.MetricOfWebVitals.avg_cpu: metrics.get_avg_cpu,
|
||||
schemas.MetricOfWebVitals.avg_fps: metrics.get_avg_fps,
|
||||
schemas.MetricOfErrors.impacted_sessions_by_js_errors: metrics.get_impacted_sessions_by_js_errors,
|
||||
schemas.MetricOfErrors.domains_errors_4xx: metrics.get_domains_errors_4xx,
|
||||
schemas.MetricOfErrors.domains_errors_5xx: metrics.get_domains_errors_5xx,
|
||||
schemas.MetricOfErrors.errors_per_domains: metrics.get_errors_per_domains,
|
||||
schemas.MetricOfErrors.calls_errors: metrics.get_calls_errors,
|
||||
schemas.MetricOfErrors.errors_per_type: metrics.get_errors_per_type,
|
||||
schemas.MetricOfErrors.resources_by_party: metrics.get_resources_by_party,
|
||||
schemas.MetricOfPerformance.speed_location: metrics.get_speed_index_location,
|
||||
schemas.MetricOfPerformance.slowest_domains: metrics.get_slowest_domains,
|
||||
schemas.MetricOfPerformance.sessions_per_browser: metrics.get_sessions_per_browser,
|
||||
schemas.MetricOfPerformance.time_to_render: metrics.get_time_to_render,
|
||||
schemas.MetricOfPerformance.impacted_sessions_by_slow_pages: metrics.get_impacted_sessions_by_slow_pages,
|
||||
schemas.MetricOfPerformance.memory_consumption: metrics.get_memory_consumption,
|
||||
schemas.MetricOfPerformance.cpu: metrics.get_avg_cpu,
|
||||
schemas.MetricOfPerformance.fps: metrics.get_avg_fps,
|
||||
schemas.MetricOfPerformance.crashes: metrics.get_crashes,
|
||||
schemas.MetricOfPerformance.resources_vs_visually_complete: metrics.get_resources_vs_visually_complete,
|
||||
schemas.MetricOfPerformance.pages_dom_buildtime: metrics.get_pages_dom_build_time,
|
||||
schemas.MetricOfPerformance.pages_response_time: metrics.get_pages_response_time,
|
||||
schemas.MetricOfPerformance.pages_response_time_distribution: metrics.get_pages_response_time_distribution,
|
||||
schemas.MetricOfResources.missing_resources: metrics.get_missing_resources_trend,
|
||||
schemas.MetricOfResources.slowest_resources: metrics.get_slowest_resources,
|
||||
schemas.MetricOfResources.resources_loading_time: metrics.get_resources_loading_time,
|
||||
schemas.MetricOfResources.resource_type_vs_response_end: metrics.resource_type_vs_response_end,
|
||||
schemas.MetricOfResources.resources_count_by_type: metrics.get_resources_count_by_type, }
|
||||
|
||||
|
||||
def get_predefined_metric(key: Union[schemas.MetricOfWebVitals, schemas.MetricOfErrors, \
|
||||
schemas.MetricOfPerformance, schemas.MetricOfResources], project_id: int, data: dict):
|
||||
return PREDEFINED.get(key, lambda *args: None)(project_id=project_id, **data)
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
import json
|
||||
|
||||
import schemas
|
||||
from chalicelib.core.metrics import custom_metrics
|
||||
from chalicelib.core import custom_metrics
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
|
@ -12,7 +12,7 @@ def create_dashboard(project_id, user_id, data: schemas.CreateDashboardSchema):
|
|||
pg_query = f"""INSERT INTO dashboards(project_id, user_id, name, is_public, is_pinned, description)
|
||||
VALUES(%(projectId)s, %(userId)s, %(name)s, %(is_public)s, %(is_pinned)s, %(description)s)
|
||||
RETURNING *"""
|
||||
params = {"userId": user_id, "projectId": project_id, **data.model_dump()}
|
||||
params = {"userId": user_id, "projectId": project_id, **data.dict()}
|
||||
if data.metrics is not None and len(data.metrics) > 0:
|
||||
pg_query = f"""WITH dash AS ({pg_query})
|
||||
INSERT INTO dashboard_widgets(dashboard_id, metric_id, user_id, config)
|
||||
|
|
@ -34,13 +34,8 @@ def create_dashboard(project_id, user_id, data: schemas.CreateDashboardSchema):
|
|||
|
||||
def get_dashboards(project_id, user_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = f"""SELECT *, owner_email, owner_name
|
||||
pg_query = f"""SELECT *
|
||||
FROM dashboards
|
||||
LEFT JOIN LATERAL (SELECT email AS owner_email, name AS owner_name
|
||||
FROM users
|
||||
WHERE deleted_at ISNULL
|
||||
AND users.user_id = dashboards.user_id
|
||||
) AS owner ON (TRUE)
|
||||
WHERE deleted_at ISNULL
|
||||
AND project_id = %(projectId)s
|
||||
AND (user_id = %(userId)s OR is_public);"""
|
||||
|
|
@ -114,7 +109,7 @@ def update_dashboard(project_id, user_id, dashboard_id, data: schemas.EditDashbo
|
|||
pg_query = """SELECT COALESCE(COUNT(*),0) AS count
|
||||
FROM dashboard_widgets
|
||||
WHERE dashboard_id = %(dashboard_id)s;"""
|
||||
params = {"userId": user_id, "projectId": project_id, "dashboard_id": dashboard_id, **data.model_dump()}
|
||||
params = {"userId": user_id, "projectId": project_id, "dashboard_id": dashboard_id, **data.dict()}
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
row = cur.fetchone()
|
||||
offset = row["count"]
|
||||
|
|
@ -126,14 +121,12 @@ def update_dashboard(project_id, user_id, dashboard_id, data: schemas.EditDashbo
|
|||
WHERE dashboards.project_id = %(projectId)s
|
||||
AND dashboard_id = %(dashboard_id)s
|
||||
AND (dashboards.user_id = %(userId)s OR is_public)
|
||||
RETURNING dashboard_id,name,description,is_public,created_at"""
|
||||
RETURNING dashboard_id,name,description,is_public,created_at;"""
|
||||
if data.metrics is not None and len(data.metrics) > 0:
|
||||
pg_query = f"""WITH dash AS ({pg_query})
|
||||
INSERT INTO dashboard_widgets(dashboard_id, metric_id, user_id, config)
|
||||
VALUES {",".join([f"(%(dashboard_id)s, %(metric_id_{i})s, %(userId)s, (SELECT default_config FROM metrics WHERE metric_id=%(metric_id_{i})s)||%(config_{i})s)" for i in range(len(data.metrics))])}
|
||||
RETURNING (SELECT dashboard_id FROM dash),(SELECT name FROM dash),
|
||||
(SELECT description FROM dash),(SELECT is_public FROM dash),
|
||||
(SELECT created_at FROM dash);"""
|
||||
RETURNING dash.*;"""
|
||||
for i, m in enumerate(data.metrics):
|
||||
params[f"metric_id_{i}"] = m
|
||||
# params[f"config_{i}"] = schemas.AddWidgetToDashboardPayloadSchema.schema() \
|
||||
|
|
@ -149,6 +142,30 @@ def update_dashboard(project_id, user_id, dashboard_id, data: schemas.EditDashbo
|
|||
return helper.dict_to_camel_case(row)
|
||||
|
||||
|
||||
def get_widget(project_id, user_id, dashboard_id, widget_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = """SELECT metrics.*, metric_series.series
|
||||
FROM dashboard_widgets
|
||||
INNER JOIN dashboards USING (dashboard_id)
|
||||
INNER JOIN metrics USING (metric_id)
|
||||
LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(metric_series.* ORDER BY index), '[]'::jsonb) AS series
|
||||
FROM metric_series
|
||||
WHERE metric_series.metric_id = metrics.metric_id
|
||||
AND metric_series.deleted_at ISNULL
|
||||
) AS metric_series ON (TRUE)
|
||||
WHERE dashboard_id = %(dashboard_id)s
|
||||
AND widget_id = %(widget_id)s
|
||||
AND (dashboards.is_public OR dashboards.user_id = %(userId)s)
|
||||
AND dashboards.deleted_at IS NULL
|
||||
AND metrics.deleted_at ISNULL
|
||||
AND (metrics.project_id = %(projectId)s OR metrics.project_id ISNULL)
|
||||
AND (metrics.is_public OR metrics.user_id = %(userId)s);"""
|
||||
params = {"userId": user_id, "projectId": project_id, "dashboard_id": dashboard_id, "widget_id": widget_id}
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
row = cur.fetchone()
|
||||
return helper.dict_to_camel_case(row)
|
||||
|
||||
|
||||
def add_widget(project_id, user_id, dashboard_id, data: schemas.AddWidgetToDashboardPayloadSchema):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = """INSERT INTO dashboard_widgets(dashboard_id, metric_id, user_id, config)
|
||||
|
|
@ -159,7 +176,7 @@ def add_widget(project_id, user_id, dashboard_id, data: schemas.AddWidgetToDashb
|
|||
AND dashboard_id = %(dashboard_id)s
|
||||
AND (dashboards.user_id = %(userId)s OR is_public))
|
||||
RETURNING *;"""
|
||||
params = {"userId": user_id, "projectId": project_id, "dashboard_id": dashboard_id, **data.model_dump()}
|
||||
params = {"userId": user_id, "projectId": project_id, "dashboard_id": dashboard_id, **data.dict()}
|
||||
params["config"] = json.dumps(data.config)
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
row = cur.fetchone()
|
||||
|
|
@ -173,7 +190,7 @@ def update_widget(project_id, user_id, dashboard_id, widget_id, data: schemas.Up
|
|||
WHERE dashboard_id=%(dashboard_id)s AND widget_id=%(widget_id)s
|
||||
RETURNING *;"""
|
||||
params = {"userId": user_id, "projectId": project_id, "dashboard_id": dashboard_id,
|
||||
"widget_id": widget_id, **data.model_dump()}
|
||||
"widget_id": widget_id, **data.dict()}
|
||||
params["config"] = json.dumps(data.config)
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
row = cur.fetchone()
|
||||
|
|
@ -204,9 +221,9 @@ def pin_dashboard(project_id, user_id, dashboard_id):
|
|||
return helper.dict_to_camel_case(row)
|
||||
|
||||
|
||||
def create_metric_add_widget(project: schemas.ProjectContext, user_id, dashboard_id, data: schemas.CardSchema):
|
||||
metric_id = custom_metrics.create_card(project=project, user_id=user_id, data=data, dashboard=True)
|
||||
return add_widget(project_id=project.project_id, user_id=user_id, dashboard_id=dashboard_id,
|
||||
def create_metric_add_widget(project_id, user_id, dashboard_id, data: schemas.CreateCardSchema):
|
||||
metric_id = custom_metrics.create(project_id=project_id, user_id=user_id, data=data, dashboard=True)
|
||||
return add_widget(project_id=project_id, user_id=user_id, dashboard_id=dashboard_id,
|
||||
data=schemas.AddWidgetToDashboardPayloadSchema(metricId=metric_id))
|
||||
|
||||
# def make_chart_widget(dashboard_id, project_id, user_id, widget_id, data: schemas.CardChartSchema):
|
||||
|
|
@ -215,7 +232,7 @@ def create_metric_add_widget(project: schemas.ProjectContext, user_id, dashboard
|
|||
# return None
|
||||
# metric = schemas.CustomMetricAndTemplate = schemas.CustomMetricAndTemplate(**raw_metric)
|
||||
# if metric.is_template:
|
||||
# return get_predefined_metric(key=metric.predefined_key, project_id=project_id, data=data.model_dump())
|
||||
# return get_predefined_metric(key=metric.predefined_key, project_id=project_id, data=data.dict())
|
||||
# else:
|
||||
# return custom_metrics.make_chart(project_id=project_id, user_id=user_id, metric_id=raw_metric["metricId"],
|
||||
# data=data, metric=raw_metric)
|
||||
|
|
@ -1,205 +0,0 @@
|
|||
import logging
|
||||
|
||||
from chalicelib.utils import pg_client
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DatabaseRequestHandler:
|
||||
def __init__(self, table_name):
|
||||
self.table_name = table_name
|
||||
self.constraints = []
|
||||
self.params = {}
|
||||
self.order_clause = ""
|
||||
self.sort_clause = ""
|
||||
self.select_columns = []
|
||||
self.sub_queries = []
|
||||
self.joins = []
|
||||
self.group_by_clause = ""
|
||||
self.client = pg_client
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self.pagination = {}
|
||||
|
||||
def add_constraint(self, constraint, param=None):
|
||||
self.constraints.append(constraint)
|
||||
if param:
|
||||
self.params.update(param)
|
||||
|
||||
def add_subquery(self, subquery, alias, param=None):
|
||||
self.sub_queries.append((subquery, alias))
|
||||
if param:
|
||||
self.params.update(param)
|
||||
|
||||
def add_join(self, join_clause):
|
||||
self.joins.append(join_clause)
|
||||
|
||||
def add_param(self, key, value):
|
||||
self.params[key] = value
|
||||
|
||||
def set_order_by(self, order_by):
|
||||
self.order_clause = order_by
|
||||
|
||||
def set_sort_by(self, sort_by):
|
||||
self.sort_clause = sort_by
|
||||
|
||||
def set_select_columns(self, columns):
|
||||
self.select_columns = columns
|
||||
|
||||
def set_group_by(self, group_by_clause):
|
||||
self.group_by_clause = group_by_clause
|
||||
|
||||
def set_pagination(self, page, page_size):
|
||||
"""
|
||||
Set pagination parameters for the query.
|
||||
:param page: The page number (1-indexed)
|
||||
:param page_size: Number of items per page
|
||||
"""
|
||||
self.pagination = {
|
||||
'offset': (page - 1) * page_size,
|
||||
'limit': page_size
|
||||
}
|
||||
|
||||
def build_query(self, action="select", additional_clauses=None, data=None):
|
||||
|
||||
if action == "select":
|
||||
query = f"SELECT {', '.join(self.select_columns)} FROM {self.table_name}"
|
||||
elif action == "insert":
|
||||
columns = ', '.join(data.keys())
|
||||
placeholders = ', '.join(f'%({k})s' for k in data.keys())
|
||||
query = f"INSERT INTO {self.table_name} ({columns}) VALUES ({placeholders})"
|
||||
elif action == "update":
|
||||
set_clause = ', '.join(f"{k} = %({k})s" for k in data.keys())
|
||||
query = f"UPDATE {self.table_name} SET {set_clause}"
|
||||
elif action == "delete":
|
||||
query = f"DELETE FROM {self.table_name}"
|
||||
|
||||
for join in self.joins:
|
||||
query += f" {join}"
|
||||
for subquery, alias in self.sub_queries:
|
||||
query += f", ({subquery}) AS {alias}"
|
||||
if self.constraints:
|
||||
query += " WHERE " + " AND ".join(self.constraints)
|
||||
if action == "select":
|
||||
if self.group_by_clause:
|
||||
query += " GROUP BY " + self.group_by_clause
|
||||
if self.sort_clause:
|
||||
query += " ORDER BY " + self.sort_clause
|
||||
if self.order_clause:
|
||||
query += " " + self.order_clause
|
||||
if hasattr(self, 'pagination') and self.pagination:
|
||||
query += " LIMIT %(limit)s OFFSET %(offset)s"
|
||||
self.params.update(self.pagination)
|
||||
|
||||
if additional_clauses:
|
||||
query += " " + additional_clauses
|
||||
|
||||
logger.debug(f"Query: {query}")
|
||||
return query
|
||||
|
||||
def execute_query(self, query, data=None):
|
||||
try:
|
||||
with self.client.PostgresClient() as cur:
|
||||
mogrified_query = cur.mogrify(query, {**data, **self.params} if data else self.params)
|
||||
cur.execute(mogrified_query)
|
||||
return cur.fetchall() if cur.description else None
|
||||
except Exception as e:
|
||||
self.logger.error(f"Database operation failed: {e}")
|
||||
raise
|
||||
|
||||
def fetchall(self):
|
||||
query = self.build_query()
|
||||
return self.execute_query(query)
|
||||
|
||||
def fetchone(self):
|
||||
query = self.build_query()
|
||||
result = self.execute_query(query)
|
||||
return result[0] if result else None
|
||||
|
||||
def insert(self, data):
|
||||
query = self.build_query(action="insert", data=data)
|
||||
query += " RETURNING *;"
|
||||
|
||||
result = self.execute_query(query, data)
|
||||
return result[0] if result else None
|
||||
|
||||
def update(self, data):
|
||||
query = self.build_query(action="update", data=data)
|
||||
query += " RETURNING *;"
|
||||
|
||||
result = self.execute_query(query, data)
|
||||
return result[0] if result else None
|
||||
|
||||
def delete(self):
|
||||
query = self.build_query(action="delete")
|
||||
return self.execute_query(query)
|
||||
|
||||
def batch_insert(self, items):
|
||||
if not items:
|
||||
return None
|
||||
|
||||
columns = ', '.join(items[0].keys())
|
||||
|
||||
# Building a values string with unique parameter names for each item
|
||||
all_values_query = ', '.join(
|
||||
'(' + ', '.join([f"%({key}_{i})s" for key in item]) + ')'
|
||||
for i, item in enumerate(items)
|
||||
)
|
||||
|
||||
query = f"INSERT INTO {self.table_name} ({columns}) VALUES {all_values_query} RETURNING *;"
|
||||
|
||||
try:
|
||||
with self.client.PostgresClient() as cur:
|
||||
# Flatten items into a single dictionary with unique keys
|
||||
combined_params = {f"{k}_{i}": v for i, item in enumerate(items) for k, v in item.items()}
|
||||
mogrified_query = cur.mogrify(query, combined_params)
|
||||
cur.execute(mogrified_query)
|
||||
return cur.fetchall()
|
||||
except Exception as e:
|
||||
self.logger.error(f"Database batch insert operation failed: {e}")
|
||||
raise
|
||||
|
||||
def raw_query(self, query, params=None):
|
||||
try:
|
||||
with self.client.PostgresClient() as cur:
|
||||
mogrified_query = cur.mogrify(query, params)
|
||||
cur.execute(mogrified_query)
|
||||
return cur.fetchall() if cur.description else None
|
||||
except Exception as e:
|
||||
self.logger.error(f"Database operation failed: {e}")
|
||||
raise
|
||||
|
||||
def batch_update(self, items):
|
||||
if not items:
|
||||
return None
|
||||
|
||||
id_column = list(items[0])[0]
|
||||
|
||||
# Building the set clause for the update statement
|
||||
update_columns = list(items[0].keys())
|
||||
update_columns.remove(id_column)
|
||||
set_clause = ', '.join([f"{col} = v.{col}" for col in update_columns])
|
||||
|
||||
# Building the values part for the 'VALUES' section
|
||||
values_rows = []
|
||||
for item in items:
|
||||
values = ', '.join([f"%({key})s" for key in item.keys()])
|
||||
values_rows.append(f"({values})")
|
||||
values_query = ', '.join(values_rows)
|
||||
|
||||
# Constructing the full update query
|
||||
query = f"""
|
||||
UPDATE {self.table_name} AS t
|
||||
SET {set_clause}
|
||||
FROM (VALUES {values_query}) AS v ({', '.join(items[0].keys())})
|
||||
WHERE t.{id_column} = v.{id_column};
|
||||
"""
|
||||
|
||||
try:
|
||||
with self.client.PostgresClient() as cur:
|
||||
# Flatten items into a single dictionary for mogrify
|
||||
combined_params = {k: v for item in items for k, v in item.items()}
|
||||
mogrified_query = cur.mogrify(query, combined_params)
|
||||
cur.execute(mogrified_query)
|
||||
except Exception as e:
|
||||
self.logger.error(f"Database batch update operation failed: {e}")
|
||||
raise
|
||||
749
api/chalicelib/core/errors.py
Normal file
749
api/chalicelib/core/errors.py
Normal file
|
|
@ -0,0 +1,749 @@
|
|||
import json
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import sourcemaps, sessions
|
||||
from chalicelib.utils import errors_helper
|
||||
from chalicelib.utils import pg_client, helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.utils.metrics_helper import __get_step_size
|
||||
|
||||
|
||||
def get(error_id, family=False):
|
||||
if family:
|
||||
return get_batch([error_id])
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
"SELECT * FROM events.errors AS e INNER JOIN public.errors AS re USING(error_id) WHERE error_id = %(error_id)s;",
|
||||
{"error_id": error_id})
|
||||
cur.execute(query=query)
|
||||
result = cur.fetchone()
|
||||
if result is not None:
|
||||
result["stacktrace_parsed_at"] = TimeUTC.datetime_to_timestamp(result["stacktrace_parsed_at"])
|
||||
return helper.dict_to_camel_case(result)
|
||||
|
||||
|
||||
def get_batch(error_ids):
|
||||
if len(error_ids) == 0:
|
||||
return []
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
"""
|
||||
WITH RECURSIVE error_family AS (
|
||||
SELECT *
|
||||
FROM public.errors
|
||||
WHERE error_id IN %(error_ids)s
|
||||
UNION
|
||||
SELECT child_errors.*
|
||||
FROM public.errors AS child_errors
|
||||
INNER JOIN error_family ON error_family.error_id = child_errors.parent_error_id OR error_family.parent_error_id = child_errors.error_id
|
||||
)
|
||||
SELECT *
|
||||
FROM error_family;""",
|
||||
{"error_ids": tuple(error_ids)})
|
||||
cur.execute(query=query)
|
||||
errors = cur.fetchall()
|
||||
for e in errors:
|
||||
e["stacktrace_parsed_at"] = TimeUTC.datetime_to_timestamp(e["stacktrace_parsed_at"])
|
||||
return helper.list_to_camel_case(errors)
|
||||
|
||||
|
||||
def __flatten_sort_key_count_version(data, merge_nested=False):
|
||||
if data is None:
|
||||
return []
|
||||
return sorted(
|
||||
[
|
||||
{
|
||||
"name": f'{o["name"]}@{v["version"]}',
|
||||
"count": v["count"]
|
||||
} for o in data for v in o["partition"]
|
||||
],
|
||||
key=lambda o: o["count"], reverse=True) if merge_nested else \
|
||||
[
|
||||
{
|
||||
"name": o["name"],
|
||||
"count": o["count"],
|
||||
} for o in data
|
||||
]
|
||||
|
||||
|
||||
def __process_tags(row):
|
||||
return [
|
||||
{"name": "browser", "partitions": __flatten_sort_key_count_version(data=row.get("browsers_partition"))},
|
||||
{"name": "browser.ver",
|
||||
"partitions": __flatten_sort_key_count_version(data=row.pop("browsers_partition"), merge_nested=True)},
|
||||
{"name": "OS", "partitions": __flatten_sort_key_count_version(data=row.get("os_partition"))},
|
||||
{"name": "OS.ver",
|
||||
"partitions": __flatten_sort_key_count_version(data=row.pop("os_partition"), merge_nested=True)},
|
||||
{"name": "device.family", "partitions": __flatten_sort_key_count_version(data=row.get("device_partition"))},
|
||||
{"name": "device",
|
||||
"partitions": __flatten_sort_key_count_version(data=row.pop("device_partition"), merge_nested=True)},
|
||||
{"name": "country", "partitions": row.pop("country_partition")}
|
||||
]
|
||||
|
||||
|
||||
def get_details(project_id, error_id, user_id, **data):
|
||||
pg_sub_query24 = __get_basic_constraints(time_constraint=False, chart=True, step_size_name="step_size24")
|
||||
pg_sub_query24.append("error_id = %(error_id)s")
|
||||
pg_sub_query30_session = __get_basic_constraints(time_constraint=True, chart=False,
|
||||
startTime_arg_name="startDate30",
|
||||
endTime_arg_name="endDate30", project_key="sessions.project_id")
|
||||
pg_sub_query30_session.append("sessions.start_ts >= %(startDate30)s")
|
||||
pg_sub_query30_session.append("sessions.start_ts <= %(endDate30)s")
|
||||
pg_sub_query30_session.append("error_id = %(error_id)s")
|
||||
pg_sub_query30_err = __get_basic_constraints(time_constraint=True, chart=False, startTime_arg_name="startDate30",
|
||||
endTime_arg_name="endDate30", project_key="errors.project_id")
|
||||
pg_sub_query30_err.append("sessions.project_id = %(project_id)s")
|
||||
pg_sub_query30_err.append("sessions.start_ts >= %(startDate30)s")
|
||||
pg_sub_query30_err.append("sessions.start_ts <= %(endDate30)s")
|
||||
pg_sub_query30_err.append("error_id = %(error_id)s")
|
||||
pg_sub_query30_err.append("source ='js_exception'")
|
||||
pg_sub_query30 = __get_basic_constraints(time_constraint=False, chart=True, step_size_name="step_size30")
|
||||
pg_sub_query30.append("error_id = %(error_id)s")
|
||||
pg_basic_query = __get_basic_constraints(time_constraint=False)
|
||||
pg_basic_query.append("error_id = %(error_id)s")
|
||||
with pg_client.PostgresClient() as cur:
|
||||
data["startDate24"] = TimeUTC.now(-1)
|
||||
data["endDate24"] = TimeUTC.now()
|
||||
data["startDate30"] = TimeUTC.now(-30)
|
||||
data["endDate30"] = TimeUTC.now()
|
||||
density24 = int(data.get("density24", 24))
|
||||
step_size24 = __get_step_size(data["startDate24"], data["endDate24"], density24, factor=1)
|
||||
density30 = int(data.get("density30", 30))
|
||||
step_size30 = __get_step_size(data["startDate30"], data["endDate30"], density30, factor=1)
|
||||
params = {
|
||||
"startDate24": data['startDate24'],
|
||||
"endDate24": data['endDate24'],
|
||||
"startDate30": data['startDate30'],
|
||||
"endDate30": data['endDate30'],
|
||||
"project_id": project_id,
|
||||
"userId": user_id,
|
||||
"step_size24": step_size24,
|
||||
"step_size30": step_size30,
|
||||
"error_id": error_id}
|
||||
|
||||
main_pg_query = f"""\
|
||||
SELECT error_id,
|
||||
name,
|
||||
message,
|
||||
users,
|
||||
sessions,
|
||||
last_occurrence,
|
||||
first_occurrence,
|
||||
last_session_id,
|
||||
browsers_partition,
|
||||
os_partition,
|
||||
device_partition,
|
||||
country_partition,
|
||||
chart24,
|
||||
chart30,
|
||||
custom_tags
|
||||
FROM (SELECT error_id,
|
||||
name,
|
||||
message,
|
||||
COUNT(DISTINCT user_id) AS users,
|
||||
COUNT(DISTINCT session_id) AS sessions
|
||||
FROM public.errors
|
||||
INNER JOIN events.errors AS s_errors USING (error_id)
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_err)}
|
||||
GROUP BY error_id, name, message) AS details
|
||||
INNER JOIN (SELECT MAX(timestamp) AS last_occurrence,
|
||||
MIN(timestamp) AS first_occurrence
|
||||
FROM events.errors
|
||||
WHERE error_id = %(error_id)s
|
||||
GROUP BY error_id) AS time_details ON (TRUE)
|
||||
INNER JOIN (SELECT session_id AS last_session_id,
|
||||
coalesce(custom_tags, '[]')::jsonb AS custom_tags
|
||||
FROM events.errors
|
||||
LEFT JOIN LATERAL (
|
||||
SELECT jsonb_agg(jsonb_build_object(errors_tags.key, errors_tags.value)) AS custom_tags
|
||||
FROM errors_tags
|
||||
WHERE errors_tags.error_id = %(error_id)s
|
||||
AND errors_tags.session_id = errors.session_id
|
||||
AND errors_tags.message_id = errors.message_id) AS errors_tags ON (TRUE)
|
||||
WHERE error_id = %(error_id)s
|
||||
ORDER BY errors.timestamp DESC
|
||||
LIMIT 1) AS last_session_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(browser_details) AS browsers_partition
|
||||
FROM (SELECT *
|
||||
FROM (SELECT user_browser AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors
|
||||
INNER JOIN sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
GROUP BY user_browser
|
||||
ORDER BY count DESC) AS count_per_browser_query
|
||||
INNER JOIN LATERAL (SELECT JSONB_AGG(version_details) AS partition
|
||||
FROM (SELECT user_browser_version AS version,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
AND sessions.user_browser = count_per_browser_query.name
|
||||
GROUP BY user_browser_version
|
||||
ORDER BY count DESC) AS version_details
|
||||
) AS browser_version_details ON (TRUE)) AS browser_details) AS browser_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(os_details) AS os_partition
|
||||
FROM (SELECT *
|
||||
FROM (SELECT user_os AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
GROUP BY user_os
|
||||
ORDER BY count DESC) AS count_per_os_details
|
||||
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_version_details) AS partition
|
||||
FROM (SELECT COALESCE(user_os_version,'unknown') AS version, COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
AND sessions.user_os = count_per_os_details.name
|
||||
GROUP BY user_os_version
|
||||
ORDER BY count DESC) AS count_per_version_details
|
||||
GROUP BY count_per_os_details.name ) AS os_version_details
|
||||
ON (TRUE)) AS os_details) AS os_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(device_details) AS device_partition
|
||||
FROM (SELECT *
|
||||
FROM (SELECT user_device_type AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
GROUP BY user_device_type
|
||||
ORDER BY count DESC) AS count_per_device_details
|
||||
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_device_v_details) AS partition
|
||||
FROM (SELECT CASE
|
||||
WHEN user_device = '' OR user_device ISNULL
|
||||
THEN 'unknown'
|
||||
ELSE user_device END AS version,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
AND sessions.user_device_type = count_per_device_details.name
|
||||
GROUP BY user_device
|
||||
ORDER BY count DESC) AS count_per_device_v_details
|
||||
GROUP BY count_per_device_details.name ) AS device_version_details
|
||||
ON (TRUE)) AS device_details) AS device_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(count_per_country_details) AS country_partition
|
||||
FROM (SELECT user_country AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
GROUP BY user_country
|
||||
ORDER BY count DESC) AS count_per_country_details) AS country_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart24
|
||||
FROM (SELECT generated_timestamp AS timestamp,
|
||||
COUNT(session_id) AS count
|
||||
FROM generate_series(%(startDate24)s, %(endDate24)s, %(step_size24)s) AS generated_timestamp
|
||||
LEFT JOIN LATERAL (SELECT DISTINCT session_id
|
||||
FROM events.errors
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query24)}
|
||||
) AS chart_details ON (TRUE)
|
||||
GROUP BY generated_timestamp
|
||||
ORDER BY generated_timestamp) AS chart_details) AS chart_details24 ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart30
|
||||
FROM (SELECT generated_timestamp AS timestamp,
|
||||
COUNT(session_id) AS count
|
||||
FROM generate_series(%(startDate30)s, %(endDate30)s, %(step_size30)s) AS generated_timestamp
|
||||
LEFT JOIN LATERAL (SELECT DISTINCT session_id
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30)}) AS chart_details
|
||||
ON (TRUE)
|
||||
GROUP BY timestamp
|
||||
ORDER BY timestamp) AS chart_details) AS chart_details30 ON (TRUE);
|
||||
"""
|
||||
|
||||
# print("--------------------")
|
||||
# print(cur.mogrify(main_pg_query, params))
|
||||
# print("--------------------")
|
||||
cur.execute(cur.mogrify(main_pg_query, params))
|
||||
row = cur.fetchone()
|
||||
if row is None:
|
||||
return {"errors": ["error not found"]}
|
||||
row["tags"] = __process_tags(row)
|
||||
|
||||
query = cur.mogrify(
|
||||
f"""SELECT error_id, status, session_id, start_ts,
|
||||
parent_error_id,session_id, user_anonymous_id,
|
||||
user_id, user_uuid, user_browser, user_browser_version,
|
||||
user_os, user_os_version, user_device, payload,
|
||||
FALSE AS favorite,
|
||||
True AS viewed
|
||||
FROM public.errors AS pe
|
||||
INNER JOIN events.errors AS ee USING (error_id)
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
WHERE pe.project_id = %(project_id)s
|
||||
AND error_id = %(error_id)s
|
||||
ORDER BY start_ts DESC
|
||||
LIMIT 1;""",
|
||||
{"project_id": project_id, "error_id": error_id, "user_id": user_id})
|
||||
cur.execute(query=query)
|
||||
status = cur.fetchone()
|
||||
|
||||
if status is not None:
|
||||
row["stack"] = errors_helper.format_first_stack_frame(status).pop("stack")
|
||||
row["status"] = status.pop("status")
|
||||
row["parent_error_id"] = status.pop("parent_error_id")
|
||||
row["favorite"] = status.pop("favorite")
|
||||
row["viewed"] = status.pop("viewed")
|
||||
row["last_hydrated_session"] = status
|
||||
else:
|
||||
row["stack"] = []
|
||||
row["last_hydrated_session"] = None
|
||||
row["status"] = "untracked"
|
||||
row["parent_error_id"] = None
|
||||
row["favorite"] = False
|
||||
row["viewed"] = False
|
||||
return {"data": helper.dict_to_camel_case(row)}
|
||||
|
||||
|
||||
def get_details_chart(project_id, error_id, user_id, **data):
|
||||
pg_sub_query = __get_basic_constraints()
|
||||
pg_sub_query.append("error_id = %(error_id)s")
|
||||
pg_sub_query_chart = __get_basic_constraints(time_constraint=False, chart=True)
|
||||
pg_sub_query_chart.append("error_id = %(error_id)s")
|
||||
with pg_client.PostgresClient() as cur:
|
||||
if data.get("startDate") is None:
|
||||
data["startDate"] = TimeUTC.now(-7)
|
||||
else:
|
||||
data["startDate"] = int(data["startDate"])
|
||||
if data.get("endDate") is None:
|
||||
data["endDate"] = TimeUTC.now()
|
||||
else:
|
||||
data["endDate"] = int(data["endDate"])
|
||||
density = int(data.get("density", 7))
|
||||
step_size = __get_step_size(data["startDate"], data["endDate"], density, factor=1)
|
||||
params = {
|
||||
"startDate": data['startDate'],
|
||||
"endDate": data['endDate'],
|
||||
"project_id": project_id,
|
||||
"userId": user_id,
|
||||
"step_size": step_size,
|
||||
"error_id": error_id}
|
||||
|
||||
main_pg_query = f"""\
|
||||
SELECT %(error_id)s AS error_id,
|
||||
browsers_partition,
|
||||
os_partition,
|
||||
device_partition,
|
||||
country_partition,
|
||||
chart
|
||||
FROM (SELECT jsonb_agg(browser_details) AS browsers_partition
|
||||
FROM (SELECT *
|
||||
FROM (SELECT user_browser AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
GROUP BY user_browser
|
||||
ORDER BY count DESC) AS count_per_browser_query
|
||||
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_version_details) AS partition
|
||||
FROM (SELECT user_browser_version AS version,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
AND user_browser = count_per_browser_query.name
|
||||
GROUP BY user_browser_version
|
||||
ORDER BY count DESC) AS count_per_version_details) AS browesr_version_details
|
||||
ON (TRUE)) AS browser_details) AS browser_details
|
||||
INNER JOIN (SELECT jsonb_agg(os_details) AS os_partition
|
||||
FROM (SELECT *
|
||||
FROM (SELECT user_os AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
GROUP BY user_os
|
||||
ORDER BY count DESC) AS count_per_os_details
|
||||
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_version_query) AS partition
|
||||
FROM (SELECT COALESCE(user_os_version, 'unknown') AS version,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
AND user_os = count_per_os_details.name
|
||||
GROUP BY user_os_version
|
||||
ORDER BY count DESC) AS count_per_version_query
|
||||
) AS os_version_query ON (TRUE)) AS os_details) AS os_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(device_details) AS device_partition
|
||||
FROM (SELECT *
|
||||
FROM (SELECT user_device_type AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
GROUP BY user_device_type
|
||||
ORDER BY count DESC) AS count_per_device_details
|
||||
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_device_details) AS partition
|
||||
FROM (SELECT CASE
|
||||
WHEN user_device = '' OR user_device ISNULL
|
||||
THEN 'unknown'
|
||||
ELSE user_device END AS version,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
AND user_device_type = count_per_device_details.name
|
||||
GROUP BY user_device_type, user_device
|
||||
ORDER BY count DESC) AS count_per_device_details
|
||||
) AS device_version_details ON (TRUE)) AS device_details) AS device_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(count_per_country_details) AS country_partition
|
||||
FROM (SELECT user_country AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
GROUP BY user_country
|
||||
ORDER BY count DESC) AS count_per_country_details) AS country_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart
|
||||
FROM (SELECT generated_timestamp AS timestamp,
|
||||
COUNT(session_id) AS count
|
||||
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp
|
||||
LEFT JOIN LATERAL (SELECT DISTINCT session_id
|
||||
FROM events.errors
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||
) AS chart_details ON (TRUE)
|
||||
GROUP BY generated_timestamp
|
||||
ORDER BY generated_timestamp) AS chart_details) AS chart_details ON (TRUE);"""
|
||||
|
||||
cur.execute(cur.mogrify(main_pg_query, params))
|
||||
row = cur.fetchone()
|
||||
if row is None:
|
||||
return {"errors": ["error not found"]}
|
||||
row["tags"] = __process_tags(row)
|
||||
return {"data": helper.dict_to_camel_case(row)}
|
||||
|
||||
|
||||
def __get_basic_constraints(platform=None, time_constraint=True, startTime_arg_name="startDate",
|
||||
endTime_arg_name="endDate", chart=False, step_size_name="step_size",
|
||||
project_key="project_id"):
|
||||
if project_key is None:
|
||||
ch_sub_query = []
|
||||
else:
|
||||
ch_sub_query = [f"{project_key} =%(project_id)s"]
|
||||
if time_constraint:
|
||||
ch_sub_query += [f"timestamp >= %({startTime_arg_name})s",
|
||||
f"timestamp < %({endTime_arg_name})s"]
|
||||
if chart:
|
||||
ch_sub_query += [f"timestamp >= generated_timestamp",
|
||||
f"timestamp < generated_timestamp + %({step_size_name})s"]
|
||||
if platform == schemas.PlatformType.mobile:
|
||||
ch_sub_query.append("user_device_type = 'mobile'")
|
||||
elif platform == schemas.PlatformType.desktop:
|
||||
ch_sub_query.append("user_device_type = 'desktop'")
|
||||
return ch_sub_query
|
||||
|
||||
|
||||
def __get_sort_key(key):
|
||||
return {
|
||||
schemas.ErrorSort.occurrence: "max_datetime",
|
||||
schemas.ErrorSort.users_count: "users",
|
||||
schemas.ErrorSort.sessions_count: "sessions"
|
||||
}.get(key, 'max_datetime')
|
||||
|
||||
|
||||
def search(data: schemas.SearchErrorsSchema, project_id, user_id):
|
||||
empty_response = {
|
||||
'total': 0,
|
||||
'errors': []
|
||||
}
|
||||
|
||||
platform = None
|
||||
for f in data.filters:
|
||||
if f.type == schemas.FilterType.platform and len(f.value) > 0:
|
||||
platform = f.value[0]
|
||||
pg_sub_query = __get_basic_constraints(platform, project_key="sessions.project_id")
|
||||
pg_sub_query += ["sessions.start_ts>=%(startDate)s", "sessions.start_ts<%(endDate)s", "source ='js_exception'",
|
||||
"pe.project_id=%(project_id)s"]
|
||||
# To ignore Script error
|
||||
pg_sub_query.append("pe.message!='Script error.'")
|
||||
pg_sub_query_chart = __get_basic_constraints(platform, time_constraint=False, chart=True, project_key=None)
|
||||
# pg_sub_query_chart.append("source ='js_exception'")
|
||||
pg_sub_query_chart.append("errors.error_id =details.error_id")
|
||||
statuses = []
|
||||
error_ids = None
|
||||
if data.startDate is None:
|
||||
data.startDate = TimeUTC.now(-30)
|
||||
if data.endDate is None:
|
||||
data.endDate = TimeUTC.now(1)
|
||||
if len(data.events) > 0 or len(data.filters) > 0:
|
||||
print("-- searching for sessions before errors")
|
||||
statuses = sessions.search_sessions(data=data, project_id=project_id, user_id=user_id, errors_only=True,
|
||||
error_status=data.status)
|
||||
if len(statuses) == 0:
|
||||
return empty_response
|
||||
error_ids = [e["errorId"] for e in statuses]
|
||||
with pg_client.PostgresClient() as cur:
|
||||
step_size = __get_step_size(data.startDate, data.endDate, data.density, factor=1)
|
||||
sort = __get_sort_key('datetime')
|
||||
if data.sort is not None:
|
||||
sort = __get_sort_key(data.sort)
|
||||
order = schemas.SortOrderType.desc.value
|
||||
if data.order is not None:
|
||||
order = data.order.value
|
||||
extra_join = ""
|
||||
|
||||
params = {
|
||||
"startDate": data.startDate,
|
||||
"endDate": data.endDate,
|
||||
"project_id": project_id,
|
||||
"userId": user_id,
|
||||
"step_size": step_size}
|
||||
if data.status != schemas.ErrorStatus.all:
|
||||
pg_sub_query.append("status = %(error_status)s")
|
||||
params["error_status"] = data.status
|
||||
if data.limit is not None and data.page is not None:
|
||||
params["errors_offset"] = (data.page - 1) * data.limit
|
||||
params["errors_limit"] = data.limit
|
||||
else:
|
||||
params["errors_offset"] = 0
|
||||
params["errors_limit"] = 200
|
||||
|
||||
if error_ids is not None:
|
||||
params["error_ids"] = tuple(error_ids)
|
||||
pg_sub_query.append("error_id IN %(error_ids)s")
|
||||
# if data.bookmarked:
|
||||
# pg_sub_query.append("ufe.user_id = %(userId)s")
|
||||
# extra_join += " INNER JOIN public.user_favorite_errors AS ufe USING (error_id)"
|
||||
if data.query is not None and len(data.query) > 0:
|
||||
pg_sub_query.append("(pe.name ILIKE %(error_query)s OR pe.message ILIKE %(error_query)s)")
|
||||
params["error_query"] = helper.values_for_operator(value=data.query,
|
||||
op=schemas.SearchEventOperator._contains)
|
||||
|
||||
main_pg_query = f"""SELECT full_count,
|
||||
error_id,
|
||||
name,
|
||||
message,
|
||||
users,
|
||||
sessions,
|
||||
last_occurrence,
|
||||
first_occurrence,
|
||||
chart
|
||||
FROM (SELECT COUNT(details) OVER () AS full_count, details.*
|
||||
FROM (SELECT error_id,
|
||||
name,
|
||||
message,
|
||||
COUNT(DISTINCT COALESCE(user_id,user_uuid::text)) AS users,
|
||||
COUNT(DISTINCT session_id) AS sessions,
|
||||
MAX(timestamp) AS max_datetime,
|
||||
MIN(timestamp) AS min_datetime
|
||||
FROM events.errors
|
||||
INNER JOIN public.errors AS pe USING (error_id)
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
{extra_join}
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
GROUP BY error_id, name, message
|
||||
ORDER BY {sort} {order}) AS details
|
||||
LIMIT %(errors_limit)s OFFSET %(errors_offset)s
|
||||
) AS details
|
||||
INNER JOIN LATERAL (SELECT MAX(timestamp) AS last_occurrence,
|
||||
MIN(timestamp) AS first_occurrence
|
||||
FROM events.errors
|
||||
WHERE errors.error_id = details.error_id) AS time_details ON (TRUE)
|
||||
INNER JOIN LATERAL (SELECT jsonb_agg(chart_details) AS chart
|
||||
FROM (SELECT generated_timestamp AS timestamp,
|
||||
COUNT(session_id) AS count
|
||||
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp
|
||||
LEFT JOIN LATERAL (SELECT DISTINCT session_id
|
||||
FROM events.errors
|
||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||
) AS sessions ON (TRUE)
|
||||
GROUP BY timestamp
|
||||
ORDER BY timestamp) AS chart_details) AS chart_details ON (TRUE);"""
|
||||
|
||||
# print("--------------------")
|
||||
# print(cur.mogrify(main_pg_query, params))
|
||||
# print("--------------------")
|
||||
|
||||
cur.execute(cur.mogrify(main_pg_query, params))
|
||||
rows = cur.fetchall()
|
||||
total = 0 if len(rows) == 0 else rows[0]["full_count"]
|
||||
|
||||
if total == 0:
|
||||
rows = []
|
||||
else:
|
||||
if len(statuses) == 0:
|
||||
query = cur.mogrify(
|
||||
"""SELECT error_id,
|
||||
COALESCE((SELECT TRUE
|
||||
FROM public.user_viewed_errors AS ve
|
||||
WHERE errors.error_id = ve.error_id
|
||||
AND ve.user_id = %(user_id)s LIMIT 1), FALSE) AS viewed
|
||||
FROM public.errors
|
||||
WHERE project_id = %(project_id)s AND error_id IN %(error_ids)s;""",
|
||||
{"project_id": project_id, "error_ids": tuple([r["error_id"] for r in rows]),
|
||||
"user_id": user_id})
|
||||
cur.execute(query=query)
|
||||
statuses = helper.list_to_camel_case(cur.fetchall())
|
||||
statuses = {
|
||||
s["errorId"]: s for s in statuses
|
||||
}
|
||||
|
||||
for r in rows:
|
||||
r.pop("full_count")
|
||||
if r["error_id"] in statuses:
|
||||
r["viewed"] = statuses[r["error_id"]]["viewed"]
|
||||
else:
|
||||
r["viewed"] = False
|
||||
|
||||
return {
|
||||
'total': total,
|
||||
'errors': helper.list_to_camel_case(rows)
|
||||
}
|
||||
|
||||
|
||||
def __save_stacktrace(error_id, data):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
"""UPDATE public.errors
|
||||
SET stacktrace=%(data)s::jsonb, stacktrace_parsed_at=timezone('utc'::text, now())
|
||||
WHERE error_id = %(error_id)s;""",
|
||||
{"error_id": error_id, "data": json.dumps(data)})
|
||||
cur.execute(query=query)
|
||||
|
||||
|
||||
def get_trace(project_id, error_id):
|
||||
error = get(error_id=error_id, family=False)
|
||||
if error is None:
|
||||
return {"errors": ["error not found"]}
|
||||
if error.get("source", "") != "js_exception":
|
||||
return {"errors": ["this source of errors doesn't have a sourcemap"]}
|
||||
if error.get("payload") is None:
|
||||
return {"errors": ["null payload"]}
|
||||
if error.get("stacktrace") is not None:
|
||||
return {"sourcemapUploaded": True,
|
||||
"trace": error.get("stacktrace"),
|
||||
"preparsed": True}
|
||||
trace, all_exists = sourcemaps.get_traces_group(project_id=project_id, payload=error["payload"])
|
||||
if all_exists:
|
||||
__save_stacktrace(error_id=error_id, data=trace)
|
||||
return {"sourcemapUploaded": all_exists,
|
||||
"trace": trace,
|
||||
"preparsed": False}
|
||||
|
||||
|
||||
def get_sessions(start_date, end_date, project_id, user_id, error_id):
|
||||
extra_constraints = ["s.project_id = %(project_id)s",
|
||||
"s.start_ts >= %(startDate)s",
|
||||
"s.start_ts <= %(endDate)s",
|
||||
"e.error_id = %(error_id)s"]
|
||||
if start_date is None:
|
||||
start_date = TimeUTC.now(-7)
|
||||
if end_date is None:
|
||||
end_date = TimeUTC.now()
|
||||
|
||||
params = {
|
||||
"startDate": start_date,
|
||||
"endDate": end_date,
|
||||
"project_id": project_id,
|
||||
"userId": user_id,
|
||||
"error_id": error_id}
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
f"""SELECT s.project_id,
|
||||
s.session_id::text AS session_id,
|
||||
s.user_uuid,
|
||||
s.user_id,
|
||||
s.user_agent,
|
||||
s.user_os,
|
||||
s.user_browser,
|
||||
s.user_device,
|
||||
s.user_country,
|
||||
s.start_ts,
|
||||
s.duration,
|
||||
s.events_count,
|
||||
s.pages_count,
|
||||
s.errors_count,
|
||||
s.issue_types,
|
||||
COALESCE((SELECT TRUE
|
||||
FROM public.user_favorite_sessions AS fs
|
||||
WHERE s.session_id = fs.session_id
|
||||
AND fs.user_id = %(userId)s LIMIT 1), FALSE) AS favorite,
|
||||
COALESCE((SELECT TRUE
|
||||
FROM public.user_viewed_sessions AS fs
|
||||
WHERE s.session_id = fs.session_id
|
||||
AND fs.user_id = %(userId)s LIMIT 1), FALSE) AS viewed
|
||||
FROM public.sessions AS s INNER JOIN events.errors AS e USING (session_id)
|
||||
WHERE {" AND ".join(extra_constraints)}
|
||||
ORDER BY s.start_ts DESC;""",
|
||||
params)
|
||||
cur.execute(query=query)
|
||||
sessions_list = []
|
||||
total = cur.rowcount
|
||||
row = cur.fetchone()
|
||||
while row is not None and len(sessions_list) < 100:
|
||||
sessions_list.append(row)
|
||||
row = cur.fetchone()
|
||||
|
||||
return {
|
||||
'total': total,
|
||||
'sessions': helper.list_to_camel_case(sessions_list)
|
||||
}
|
||||
|
||||
|
||||
ACTION_STATE = {
|
||||
"unsolve": 'unresolved',
|
||||
"solve": 'resolved',
|
||||
"ignore": 'ignored'
|
||||
}
|
||||
|
||||
|
||||
def change_state(project_id, user_id, error_id, action):
|
||||
errors = get(error_id, family=True)
|
||||
print(len(errors))
|
||||
status = ACTION_STATE.get(action)
|
||||
if errors is None or len(errors) == 0:
|
||||
return {"errors": ["error not found"]}
|
||||
if errors[0]["status"] == status:
|
||||
return {"errors": [f"error is already {status}"]}
|
||||
|
||||
if errors[0]["status"] == ACTION_STATE["solve"] and status == ACTION_STATE["ignore"]:
|
||||
return {"errors": [f"state transition not permitted {errors[0]['status']} -> {status}"]}
|
||||
|
||||
params = {
|
||||
"userId": user_id,
|
||||
"error_ids": tuple([e["errorId"] for e in errors]),
|
||||
"status": status}
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
"""UPDATE public.errors
|
||||
SET status = %(status)s
|
||||
WHERE error_id IN %(error_ids)s
|
||||
RETURNING status""",
|
||||
params)
|
||||
cur.execute(query=query)
|
||||
row = cur.fetchone()
|
||||
if row is not None:
|
||||
for e in errors:
|
||||
e["status"] = row["status"]
|
||||
return {"data": errors}
|
||||
|
||||
|
||||
MAX_RANK = 2
|
||||
|
||||
|
||||
def __status_rank(status):
|
||||
return {
|
||||
'unresolved': MAX_RANK - 2,
|
||||
'ignored': MAX_RANK - 1,
|
||||
'resolved': MAX_RANK
|
||||
}.get(status)
|
||||
|
||||
|
||||
def stats(project_id, user_id, startTimestamp=TimeUTC.now(delta_days=-7), endTimestamp=TimeUTC.now()):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
"""WITH user_viewed AS (SELECT error_id FROM public.user_viewed_errors WHERE user_id = %(user_id)s)
|
||||
SELECT COUNT(timed_errors.*) AS unresolved_and_unviewed
|
||||
FROM (SELECT root_error.error_id
|
||||
FROM events.errors
|
||||
INNER JOIN public.errors AS root_error USING (error_id)
|
||||
LEFT JOIN user_viewed USING (error_id)
|
||||
WHERE project_id = %(project_id)s
|
||||
AND timestamp >= %(startTimestamp)s
|
||||
AND timestamp <= %(endTimestamp)s
|
||||
AND source = 'js_exception'
|
||||
AND root_error.status = 'unresolved'
|
||||
AND user_viewed.error_id ISNULL
|
||||
LIMIT 1
|
||||
) AS timed_errors;""",
|
||||
{"project_id": project_id, "user_id": user_id, "startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp})
|
||||
cur.execute(query=query)
|
||||
row = cur.fetchone()
|
||||
|
||||
return {
|
||||
"data": helper.dict_to_camel_case(row)
|
||||
}
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
import logging
|
||||
|
||||
from decouple import config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
from . import errors_pg as errors_legacy
|
||||
|
||||
if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
|
||||
logger.info(">>> Using experimental error search")
|
||||
from . import errors_ch as errors
|
||||
else:
|
||||
from . import errors_pg as errors
|
||||
|
|
@ -1,409 +0,0 @@
|
|||
import schemas
|
||||
from chalicelib.core import metadata
|
||||
from chalicelib.core.errors import errors_legacy
|
||||
from chalicelib.core.errors.modules import errors_helper
|
||||
from chalicelib.core.errors.modules import sessions
|
||||
from chalicelib.utils import ch_client, exp_ch_helper
|
||||
from chalicelib.utils import helper, metrics_helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
|
||||
def _multiple_values(values, value_key="value"):
|
||||
query_values = {}
|
||||
if values is not None and isinstance(values, list):
|
||||
for i in range(len(values)):
|
||||
k = f"{value_key}_{i}"
|
||||
query_values[k] = values[i]
|
||||
return query_values
|
||||
|
||||
|
||||
def __get_sql_operator(op: schemas.SearchEventOperator):
|
||||
return {
|
||||
schemas.SearchEventOperator.IS: "=",
|
||||
schemas.SearchEventOperator.IS_ANY: "IN",
|
||||
schemas.SearchEventOperator.ON: "=",
|
||||
schemas.SearchEventOperator.ON_ANY: "IN",
|
||||
schemas.SearchEventOperator.IS_NOT: "!=",
|
||||
schemas.SearchEventOperator.NOT_ON: "!=",
|
||||
schemas.SearchEventOperator.CONTAINS: "ILIKE",
|
||||
schemas.SearchEventOperator.NOT_CONTAINS: "NOT ILIKE",
|
||||
schemas.SearchEventOperator.STARTS_WITH: "ILIKE",
|
||||
schemas.SearchEventOperator.ENDS_WITH: "ILIKE",
|
||||
}.get(op, "=")
|
||||
|
||||
|
||||
def _isAny_opreator(op: schemas.SearchEventOperator):
|
||||
return op in [schemas.SearchEventOperator.ON_ANY, schemas.SearchEventOperator.IS_ANY]
|
||||
|
||||
|
||||
def _isUndefined_operator(op: schemas.SearchEventOperator):
|
||||
return op in [schemas.SearchEventOperator.IS_UNDEFINED]
|
||||
|
||||
|
||||
def __is_negation_operator(op: schemas.SearchEventOperator):
|
||||
return op in [schemas.SearchEventOperator.IS_NOT,
|
||||
schemas.SearchEventOperator.NOT_ON,
|
||||
schemas.SearchEventOperator.NOT_CONTAINS]
|
||||
|
||||
|
||||
def _multiple_conditions(condition, values, value_key="value", is_not=False):
|
||||
query = []
|
||||
for i in range(len(values)):
|
||||
k = f"{value_key}_{i}"
|
||||
query.append(condition.replace(value_key, k))
|
||||
return "(" + (" AND " if is_not else " OR ").join(query) + ")"
|
||||
|
||||
|
||||
def get(error_id, family=False):
|
||||
return errors_legacy.get(error_id=error_id, family=family)
|
||||
|
||||
|
||||
def get_batch(error_ids):
|
||||
return errors_legacy.get_batch(error_ids=error_ids)
|
||||
|
||||
|
||||
def __get_basic_constraints_events(platform=None, time_constraint=True, startTime_arg_name="startDate",
|
||||
endTime_arg_name="endDate", type_condition=True, project_key="project_id",
|
||||
table_name=None):
|
||||
ch_sub_query = [f"{project_key} =toUInt16(%(project_id)s)"]
|
||||
if table_name is not None:
|
||||
table_name = table_name + "."
|
||||
else:
|
||||
table_name = ""
|
||||
if type_condition:
|
||||
ch_sub_query.append(f"{table_name}`$event_name`='ERROR'")
|
||||
if time_constraint:
|
||||
ch_sub_query += [f"{table_name}created_at >= toDateTime(%({startTime_arg_name})s/1000)",
|
||||
f"{table_name}created_at < toDateTime(%({endTime_arg_name})s/1000)"]
|
||||
# if platform == schemas.PlatformType.MOBILE:
|
||||
# ch_sub_query.append("user_device_type = 'mobile'")
|
||||
# elif platform == schemas.PlatformType.DESKTOP:
|
||||
# ch_sub_query.append("user_device_type = 'desktop'")
|
||||
return ch_sub_query
|
||||
|
||||
|
||||
def __get_sort_key(key):
|
||||
return {
|
||||
schemas.ErrorSort.OCCURRENCE: "max_datetime",
|
||||
schemas.ErrorSort.USERS_COUNT: "users",
|
||||
schemas.ErrorSort.SESSIONS_COUNT: "sessions"
|
||||
}.get(key, 'max_datetime')
|
||||
|
||||
|
||||
def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, user_id):
|
||||
MAIN_EVENTS_TABLE = exp_ch_helper.get_main_events_table(data.startTimestamp)
|
||||
MAIN_SESSIONS_TABLE = exp_ch_helper.get_main_sessions_table(data.startTimestamp)
|
||||
|
||||
platform = None
|
||||
for f in data.filters:
|
||||
if f.type == schemas.FilterType.PLATFORM and len(f.value) > 0:
|
||||
platform = f.value[0]
|
||||
ch_sessions_sub_query = errors_helper.__get_basic_constraints_ch(platform, type_condition=False)
|
||||
# ignore platform for errors table
|
||||
ch_sub_query = __get_basic_constraints_events(None, type_condition=True)
|
||||
ch_sub_query.append("JSONExtractString(toString(`$properties`), 'source') = 'js_exception'")
|
||||
|
||||
# To ignore Script error
|
||||
ch_sub_query.append("JSONExtractString(toString(`$properties`), 'message') != 'Script error.'")
|
||||
error_ids = None
|
||||
|
||||
if data.startTimestamp is None:
|
||||
data.startTimestamp = TimeUTC.now(-7)
|
||||
if data.endTimestamp is None:
|
||||
data.endTimestamp = TimeUTC.now(1)
|
||||
|
||||
subquery_part = ""
|
||||
params = {}
|
||||
if len(data.events) > 0:
|
||||
errors_condition_count = 0
|
||||
for i, e in enumerate(data.events):
|
||||
if e.type == schemas.EventType.ERROR:
|
||||
errors_condition_count += 1
|
||||
is_any = _isAny_opreator(e.operator)
|
||||
op = __get_sql_operator(e.operator)
|
||||
e_k = f"e_value{i}"
|
||||
params = {**params, **_multiple_values(e.value, value_key=e_k)}
|
||||
if not is_any and len(e.value) > 0 and e.value[1] not in [None, "*", ""]:
|
||||
ch_sub_query.append(
|
||||
_multiple_conditions(f"(message {op} %({e_k})s OR name {op} %({e_k})s)",
|
||||
e.value, value_key=e_k))
|
||||
if len(data.events) > errors_condition_count:
|
||||
subquery_part_args, subquery_part = sessions.search_query_parts_ch(data=data, error_status=data.status,
|
||||
errors_only=True,
|
||||
project_id=project.project_id,
|
||||
user_id=user_id,
|
||||
issue=None,
|
||||
favorite_only=False)
|
||||
subquery_part = f"INNER JOIN {subquery_part} USING(session_id)"
|
||||
params = {**params, **subquery_part_args}
|
||||
if len(data.filters) > 0:
|
||||
meta_keys = None
|
||||
# to reduce include a sub-query of sessions inside events query, in order to reduce the selected data
|
||||
for i, f in enumerate(data.filters):
|
||||
if not isinstance(f.value, list):
|
||||
f.value = [f.value]
|
||||
filter_type = f.type
|
||||
f.value = helper.values_for_operator(value=f.value, op=f.operator)
|
||||
f_k = f"f_value{i}"
|
||||
params = {**params, f_k: f.value, **_multiple_values(f.value, value_key=f_k)}
|
||||
op = __get_sql_operator(f.operator) \
|
||||
if filter_type not in [schemas.FilterType.EVENTS_COUNT] else f.operator
|
||||
is_any = _isAny_opreator(f.operator)
|
||||
is_undefined = _isUndefined_operator(f.operator)
|
||||
if not is_any and not is_undefined and len(f.value) == 0:
|
||||
continue
|
||||
is_not = False
|
||||
if __is_negation_operator(f.operator):
|
||||
is_not = True
|
||||
if filter_type == schemas.FilterType.USER_BROWSER:
|
||||
if is_any:
|
||||
ch_sessions_sub_query.append('isNotNull(s.user_browser)')
|
||||
else:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f's.user_browser {op} %({f_k})s', f.value, is_not=is_not,
|
||||
value_key=f_k))
|
||||
|
||||
elif filter_type in [schemas.FilterType.USER_OS, schemas.FilterType.USER_OS_MOBILE]:
|
||||
if is_any:
|
||||
ch_sessions_sub_query.append('isNotNull(s.user_os)')
|
||||
else:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f's.user_os {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
|
||||
|
||||
elif filter_type in [schemas.FilterType.USER_DEVICE, schemas.FilterType.USER_DEVICE_MOBILE]:
|
||||
if is_any:
|
||||
ch_sessions_sub_query.append('isNotNull(s.user_device)')
|
||||
else:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f's.user_device {op} %({f_k})s', f.value, is_not=is_not,
|
||||
value_key=f_k))
|
||||
|
||||
elif filter_type in [schemas.FilterType.USER_COUNTRY, schemas.FilterType.USER_COUNTRY_MOBILE]:
|
||||
if is_any:
|
||||
ch_sessions_sub_query.append('isNotNull(s.user_country)')
|
||||
else:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f's.user_country {op} %({f_k})s', f.value, is_not=is_not,
|
||||
value_key=f_k))
|
||||
|
||||
elif filter_type in [schemas.FilterType.UTM_SOURCE]:
|
||||
if is_any:
|
||||
ch_sessions_sub_query.append('isNotNull(s.utm_source)')
|
||||
elif is_undefined:
|
||||
ch_sessions_sub_query.append('isNull(s.utm_source)')
|
||||
else:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f's.utm_source {op} toString(%({f_k})s)', f.value, is_not=is_not,
|
||||
value_key=f_k))
|
||||
|
||||
elif filter_type in [schemas.FilterType.UTM_MEDIUM]:
|
||||
if is_any:
|
||||
ch_sessions_sub_query.append('isNotNull(s.utm_medium)')
|
||||
elif is_undefined:
|
||||
ch_sessions_sub_query.append('isNull(s.utm_medium)')
|
||||
else:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f's.utm_medium {op} toString(%({f_k})s)', f.value, is_not=is_not,
|
||||
value_key=f_k))
|
||||
elif filter_type in [schemas.FilterType.UTM_CAMPAIGN]:
|
||||
if is_any:
|
||||
ch_sessions_sub_query.append('isNotNull(s.utm_campaign)')
|
||||
elif is_undefined:
|
||||
ch_sessions_sub_query.append('isNull(s.utm_campaign)')
|
||||
else:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f's.utm_campaign {op} toString(%({f_k})s)', f.value, is_not=is_not,
|
||||
value_key=f_k))
|
||||
|
||||
elif filter_type == schemas.FilterType.DURATION:
|
||||
if len(f.value) > 0 and f.value[0] is not None:
|
||||
ch_sessions_sub_query.append("s.duration >= %(minDuration)s")
|
||||
params["minDuration"] = f.value[0]
|
||||
if len(f.value) > 1 and f.value[1] is not None and int(f.value[1]) > 0:
|
||||
ch_sessions_sub_query.append("s.duration <= %(maxDuration)s")
|
||||
params["maxDuration"] = f.value[1]
|
||||
|
||||
elif filter_type == schemas.FilterType.REFERRER:
|
||||
# extra_from += f"INNER JOIN {events.EventType.LOCATION.table} AS p USING(session_id)"
|
||||
if is_any:
|
||||
referrer_constraint = 'isNotNull(s.base_referrer)'
|
||||
else:
|
||||
referrer_constraint = _multiple_conditions(f"s.base_referrer {op} %({f_k})s", f.value,
|
||||
is_not=is_not, value_key=f_k)
|
||||
elif filter_type == schemas.FilterType.METADATA:
|
||||
# get metadata list only if you need it
|
||||
if meta_keys is None:
|
||||
meta_keys = metadata.get(project_id=project.project_id)
|
||||
meta_keys = {m["key"]: m["index"] for m in meta_keys}
|
||||
if f.source in meta_keys.keys():
|
||||
if is_any:
|
||||
ch_sessions_sub_query.append(f"isNotNull(s.{metadata.index_to_colname(meta_keys[f.source])})")
|
||||
elif is_undefined:
|
||||
ch_sessions_sub_query.append(f"isNull(s.{metadata.index_to_colname(meta_keys[f.source])})")
|
||||
else:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(
|
||||
f"s.{metadata.index_to_colname(meta_keys[f.source])} {op} toString(%({f_k})s)",
|
||||
f.value, is_not=is_not, value_key=f_k))
|
||||
|
||||
elif filter_type in [schemas.FilterType.USER_ID, schemas.FilterType.USER_ID_MOBILE]:
|
||||
if is_any:
|
||||
ch_sessions_sub_query.append('isNotNull(s.user_id)')
|
||||
elif is_undefined:
|
||||
ch_sessions_sub_query.append('isNull(s.user_id)')
|
||||
else:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f"s.user_id {op} toString(%({f_k})s)", f.value, is_not=is_not,
|
||||
value_key=f_k))
|
||||
elif filter_type in [schemas.FilterType.USER_ANONYMOUS_ID,
|
||||
schemas.FilterType.USER_ANONYMOUS_ID_MOBILE]:
|
||||
if is_any:
|
||||
ch_sessions_sub_query.append('isNotNull(s.user_anonymous_id)')
|
||||
elif is_undefined:
|
||||
ch_sessions_sub_query.append('isNull(s.user_anonymous_id)')
|
||||
else:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f"s.user_anonymous_id {op} toString(%({f_k})s)", f.value,
|
||||
is_not=is_not,
|
||||
value_key=f_k))
|
||||
|
||||
elif filter_type in [schemas.FilterType.REV_ID, schemas.FilterType.REV_ID_MOBILE]:
|
||||
if is_any:
|
||||
ch_sessions_sub_query.append('isNotNull(s.rev_id)')
|
||||
elif is_undefined:
|
||||
ch_sessions_sub_query.append('isNull(s.rev_id)')
|
||||
else:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f"s.rev_id {op} toString(%({f_k})s)", f.value, is_not=is_not,
|
||||
value_key=f_k))
|
||||
|
||||
elif filter_type == schemas.FilterType.PLATFORM:
|
||||
# op = __get_sql_operator(f.operator)
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f"s.user_device_type {op} %({f_k})s", f.value, is_not=is_not,
|
||||
value_key=f_k))
|
||||
# elif filter_type == schemas.FilterType.issue:
|
||||
# if is_any:
|
||||
# ch_sessions_sub_query.append("notEmpty(s.issue_types)")
|
||||
# else:
|
||||
# ch_sessions_sub_query.append(f"hasAny(s.issue_types,%({f_k})s)")
|
||||
# # _multiple_conditions(f"%({f_k})s {op} ANY (s.issue_types)", f.value, is_not=is_not,
|
||||
# # value_key=f_k))
|
||||
#
|
||||
# if is_not:
|
||||
# extra_constraints[-1] = f"not({extra_constraints[-1]})"
|
||||
# ss_constraints[-1] = f"not({ss_constraints[-1]})"
|
||||
elif filter_type == schemas.FilterType.EVENTS_COUNT:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f"s.events_count {op} %({f_k})s", f.value, is_not=is_not,
|
||||
value_key=f_k))
|
||||
|
||||
with ch_client.ClickHouseClient() as ch:
|
||||
step_size = metrics_helper.get_step_size(data.startTimestamp, data.endTimestamp, data.density)
|
||||
sort = __get_sort_key('datetime')
|
||||
if data.sort is not None:
|
||||
sort = __get_sort_key(data.sort)
|
||||
order = "DESC"
|
||||
if data.order is not None:
|
||||
order = data.order
|
||||
params = {
|
||||
**params,
|
||||
"startDate": data.startTimestamp,
|
||||
"endDate": data.endTimestamp,
|
||||
"project_id": project.project_id,
|
||||
"userId": user_id,
|
||||
"step_size": step_size}
|
||||
if data.limit is not None and data.page is not None:
|
||||
params["errors_offset"] = (data.page - 1) * data.limit
|
||||
params["errors_limit"] = data.limit
|
||||
else:
|
||||
params["errors_offset"] = 0
|
||||
params["errors_limit"] = 200
|
||||
# if data.bookmarked:
|
||||
# cur.execute(cur.mogrify(f"""SELECT error_id
|
||||
# FROM public.user_favorite_errors
|
||||
# WHERE user_id = %(userId)s
|
||||
# {"" if error_ids is None else "AND error_id IN %(error_ids)s"}""",
|
||||
# {"userId": user_id, "error_ids": tuple(error_ids or [])}))
|
||||
# error_ids = cur.fetchall()
|
||||
# if len(error_ids) == 0:
|
||||
# return empty_response
|
||||
# error_ids = [e["error_id"] for e in error_ids]
|
||||
|
||||
if error_ids is not None:
|
||||
params["error_ids"] = tuple(error_ids)
|
||||
ch_sub_query.append("error_id IN %(error_ids)s")
|
||||
|
||||
main_ch_query = f"""\
|
||||
SELECT details.error_id as error_id,
|
||||
name, message, users, total,
|
||||
sessions, last_occurrence, first_occurrence, chart
|
||||
FROM (SELECT error_id,
|
||||
JSONExtractString(toString(`$properties`), 'name') AS name,
|
||||
JSONExtractString(toString(`$properties`), 'message') AS message,
|
||||
COUNT(DISTINCT user_id) AS users,
|
||||
COUNT(DISTINCT events.session_id) AS sessions,
|
||||
MAX(created_at) AS max_datetime,
|
||||
MIN(created_at) AS min_datetime,
|
||||
COUNT(DISTINCT error_id)
|
||||
OVER() AS total
|
||||
FROM {MAIN_EVENTS_TABLE} AS events
|
||||
INNER JOIN (SELECT session_id, coalesce(user_id,toString(user_uuid)) AS user_id
|
||||
FROM {MAIN_SESSIONS_TABLE} AS s
|
||||
{subquery_part}
|
||||
WHERE {" AND ".join(ch_sessions_sub_query)}) AS sessions
|
||||
ON (events.session_id = sessions.session_id)
|
||||
WHERE {" AND ".join(ch_sub_query)}
|
||||
GROUP BY error_id, name, message
|
||||
ORDER BY {sort} {order}
|
||||
LIMIT %(errors_limit)s OFFSET %(errors_offset)s) AS details
|
||||
INNER JOIN (SELECT error_id,
|
||||
toUnixTimestamp(MAX(created_at))*1000 AS last_occurrence,
|
||||
toUnixTimestamp(MIN(created_at))*1000 AS first_occurrence
|
||||
FROM {MAIN_EVENTS_TABLE}
|
||||
WHERE project_id=%(project_id)s
|
||||
AND `$event_name`='ERROR'
|
||||
GROUP BY error_id) AS time_details
|
||||
ON details.error_id=time_details.error_id
|
||||
INNER JOIN (SELECT error_id, groupArray([timestamp, count]) AS chart
|
||||
FROM (SELECT error_id,
|
||||
gs.generate_series AS timestamp,
|
||||
COUNT(DISTINCT session_id) AS count
|
||||
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS gs
|
||||
LEFT JOIN {MAIN_EVENTS_TABLE} ON(TRUE)
|
||||
WHERE {" AND ".join(ch_sub_query)}
|
||||
AND created_at >= toDateTime(timestamp / 1000)
|
||||
AND created_at < toDateTime((timestamp + %(step_size)s) / 1000)
|
||||
GROUP BY error_id, timestamp
|
||||
ORDER BY timestamp) AS sub_table
|
||||
GROUP BY error_id) AS chart_details ON details.error_id=chart_details.error_id;"""
|
||||
|
||||
# print("------------")
|
||||
# print(ch.format(main_ch_query, params))
|
||||
# print("------------")
|
||||
query = ch.format(query=main_ch_query, parameters=params)
|
||||
|
||||
rows = ch.execute(query=query)
|
||||
total = rows[0]["total"] if len(rows) > 0 else 0
|
||||
|
||||
for r in rows:
|
||||
r["chart"] = list(r["chart"])
|
||||
for i in range(len(r["chart"])):
|
||||
r["chart"][i] = {"timestamp": r["chart"][i][0], "count": r["chart"][i][1]}
|
||||
|
||||
return {
|
||||
'total': total,
|
||||
'errors': helper.list_to_camel_case(rows)
|
||||
}
|
||||
|
||||
|
||||
def get_trace(project_id, error_id):
|
||||
return errors_legacy.get_trace(project_id=project_id, error_id=error_id)
|
||||
|
||||
|
||||
def get_sessions(start_date, end_date, project_id, user_id, error_id):
|
||||
return errors_legacy.get_sessions(start_date=start_date,
|
||||
end_date=end_date,
|
||||
project_id=project_id,
|
||||
user_id=user_id,
|
||||
error_id=error_id)
|
||||
|
|
@ -1,248 +0,0 @@
|
|||
from chalicelib.core.errors.modules import errors_helper
|
||||
|
||||
from chalicelib.utils import pg_client, helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.utils.metrics_helper import get_step_size
|
||||
|
||||
|
||||
def __flatten_sort_key_count_version(data, merge_nested=False):
|
||||
if data is None:
|
||||
return []
|
||||
return sorted(
|
||||
[
|
||||
{
|
||||
"name": f'{o["name"]}@{v["version"]}',
|
||||
"count": v["count"]
|
||||
} for o in data for v in o["partition"]
|
||||
],
|
||||
key=lambda o: o["count"], reverse=True) if merge_nested else \
|
||||
[
|
||||
{
|
||||
"name": o["name"],
|
||||
"count": o["count"],
|
||||
} for o in data
|
||||
]
|
||||
|
||||
|
||||
def __process_tags(row):
|
||||
return [
|
||||
{"name": "browser", "partitions": __flatten_sort_key_count_version(data=row.get("browsers_partition"))},
|
||||
{"name": "browser.ver",
|
||||
"partitions": __flatten_sort_key_count_version(data=row.pop("browsers_partition"), merge_nested=True)},
|
||||
{"name": "OS", "partitions": __flatten_sort_key_count_version(data=row.get("os_partition"))},
|
||||
{"name": "OS.ver",
|
||||
"partitions": __flatten_sort_key_count_version(data=row.pop("os_partition"), merge_nested=True)},
|
||||
{"name": "device.family", "partitions": __flatten_sort_key_count_version(data=row.get("device_partition"))},
|
||||
{"name": "device",
|
||||
"partitions": __flatten_sort_key_count_version(data=row.pop("device_partition"), merge_nested=True)},
|
||||
{"name": "country", "partitions": row.pop("country_partition")}
|
||||
]
|
||||
|
||||
|
||||
def get_details(project_id, error_id, user_id, **data):
|
||||
pg_sub_query24 = errors_helper.__get_basic_constraints(time_constraint=False, chart=True,
|
||||
step_size_name="step_size24")
|
||||
pg_sub_query24.append("error_id = %(error_id)s")
|
||||
pg_sub_query30_session = errors_helper.__get_basic_constraints(time_constraint=True, chart=False,
|
||||
startTime_arg_name="startDate30",
|
||||
endTime_arg_name="endDate30",
|
||||
project_key="sessions.project_id")
|
||||
pg_sub_query30_session.append("sessions.start_ts >= %(startDate30)s")
|
||||
pg_sub_query30_session.append("sessions.start_ts <= %(endDate30)s")
|
||||
pg_sub_query30_session.append("error_id = %(error_id)s")
|
||||
pg_sub_query30_err = errors_helper.__get_basic_constraints(time_constraint=True, chart=False,
|
||||
startTime_arg_name="startDate30",
|
||||
endTime_arg_name="endDate30",
|
||||
project_key="errors.project_id")
|
||||
pg_sub_query30_err.append("sessions.project_id = %(project_id)s")
|
||||
pg_sub_query30_err.append("sessions.start_ts >= %(startDate30)s")
|
||||
pg_sub_query30_err.append("sessions.start_ts <= %(endDate30)s")
|
||||
pg_sub_query30_err.append("error_id = %(error_id)s")
|
||||
pg_sub_query30_err.append("source ='js_exception'")
|
||||
pg_sub_query30 = errors_helper.__get_basic_constraints(time_constraint=False, chart=True,
|
||||
step_size_name="step_size30")
|
||||
pg_sub_query30.append("error_id = %(error_id)s")
|
||||
pg_basic_query = errors_helper.__get_basic_constraints(time_constraint=False)
|
||||
pg_basic_query.append("error_id = %(error_id)s")
|
||||
with pg_client.PostgresClient() as cur:
|
||||
data["startDate24"] = TimeUTC.now(-1)
|
||||
data["endDate24"] = TimeUTC.now()
|
||||
data["startDate30"] = TimeUTC.now(-30)
|
||||
data["endDate30"] = TimeUTC.now()
|
||||
density24 = int(data.get("density24", 24))
|
||||
step_size24 = get_step_size(data["startDate24"], data["endDate24"], density24, factor=1)
|
||||
density30 = int(data.get("density30", 30))
|
||||
step_size30 = get_step_size(data["startDate30"], data["endDate30"], density30, factor=1)
|
||||
params = {
|
||||
"startDate24": data['startDate24'],
|
||||
"endDate24": data['endDate24'],
|
||||
"startDate30": data['startDate30'],
|
||||
"endDate30": data['endDate30'],
|
||||
"project_id": project_id,
|
||||
"userId": user_id,
|
||||
"step_size24": step_size24,
|
||||
"step_size30": step_size30,
|
||||
"error_id": error_id}
|
||||
|
||||
main_pg_query = f"""\
|
||||
SELECT error_id,
|
||||
name,
|
||||
message,
|
||||
users,
|
||||
sessions,
|
||||
last_occurrence,
|
||||
first_occurrence,
|
||||
last_session_id,
|
||||
browsers_partition,
|
||||
os_partition,
|
||||
device_partition,
|
||||
country_partition,
|
||||
chart24,
|
||||
chart30
|
||||
FROM (SELECT error_id,
|
||||
name,
|
||||
message,
|
||||
COUNT(DISTINCT user_id) AS users,
|
||||
COUNT(DISTINCT session_id) AS sessions
|
||||
FROM public.errors
|
||||
INNER JOIN events.errors AS s_errors USING (error_id)
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_err)}
|
||||
GROUP BY error_id, name, message) AS details
|
||||
INNER JOIN (SELECT MAX(timestamp) AS last_occurrence,
|
||||
MIN(timestamp) AS first_occurrence
|
||||
FROM events.errors
|
||||
WHERE error_id = %(error_id)s) AS time_details ON (TRUE)
|
||||
INNER JOIN (SELECT session_id AS last_session_id
|
||||
FROM events.errors
|
||||
WHERE error_id = %(error_id)s
|
||||
ORDER BY errors.timestamp DESC
|
||||
LIMIT 1) AS last_session_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(browser_details) AS browsers_partition
|
||||
FROM (SELECT *
|
||||
FROM (SELECT user_browser AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors
|
||||
INNER JOIN sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
GROUP BY user_browser
|
||||
ORDER BY count DESC) AS count_per_browser_query
|
||||
INNER JOIN LATERAL (SELECT JSONB_AGG(version_details) AS partition
|
||||
FROM (SELECT user_browser_version AS version,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
AND sessions.user_browser = count_per_browser_query.name
|
||||
GROUP BY user_browser_version
|
||||
ORDER BY count DESC) AS version_details
|
||||
) AS browser_version_details ON (TRUE)) AS browser_details) AS browser_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(os_details) AS os_partition
|
||||
FROM (SELECT *
|
||||
FROM (SELECT user_os AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
GROUP BY user_os
|
||||
ORDER BY count DESC) AS count_per_os_details
|
||||
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_version_details) AS partition
|
||||
FROM (SELECT COALESCE(user_os_version,'unknown') AS version, COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
AND sessions.user_os = count_per_os_details.name
|
||||
GROUP BY user_os_version
|
||||
ORDER BY count DESC) AS count_per_version_details
|
||||
GROUP BY count_per_os_details.name ) AS os_version_details
|
||||
ON (TRUE)) AS os_details) AS os_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(device_details) AS device_partition
|
||||
FROM (SELECT *
|
||||
FROM (SELECT user_device_type AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
GROUP BY user_device_type
|
||||
ORDER BY count DESC) AS count_per_device_details
|
||||
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_device_v_details) AS partition
|
||||
FROM (SELECT CASE
|
||||
WHEN user_device = '' OR user_device ISNULL
|
||||
THEN 'unknown'
|
||||
ELSE user_device END AS version,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
AND sessions.user_device_type = count_per_device_details.name
|
||||
GROUP BY user_device
|
||||
ORDER BY count DESC) AS count_per_device_v_details
|
||||
GROUP BY count_per_device_details.name ) AS device_version_details
|
||||
ON (TRUE)) AS device_details) AS device_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(count_per_country_details) AS country_partition
|
||||
FROM (SELECT user_country AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
GROUP BY user_country
|
||||
ORDER BY count DESC) AS count_per_country_details) AS country_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart24
|
||||
FROM (SELECT generated_timestamp AS timestamp,
|
||||
COUNT(session_id) AS count
|
||||
FROM generate_series(%(startDate24)s, %(endDate24)s, %(step_size24)s) AS generated_timestamp
|
||||
LEFT JOIN LATERAL (SELECT DISTINCT session_id
|
||||
FROM events.errors
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query24)}
|
||||
) AS chart_details ON (TRUE)
|
||||
GROUP BY generated_timestamp
|
||||
ORDER BY generated_timestamp) AS chart_details) AS chart_details24 ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart30
|
||||
FROM (SELECT generated_timestamp AS timestamp,
|
||||
COUNT(session_id) AS count
|
||||
FROM generate_series(%(startDate30)s, %(endDate30)s, %(step_size30)s) AS generated_timestamp
|
||||
LEFT JOIN LATERAL (SELECT DISTINCT session_id
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30)}) AS chart_details
|
||||
ON (TRUE)
|
||||
GROUP BY timestamp
|
||||
ORDER BY timestamp) AS chart_details) AS chart_details30 ON (TRUE);
|
||||
"""
|
||||
|
||||
# print("--------------------")
|
||||
# print(cur.mogrify(main_pg_query, params))
|
||||
# print("--------------------")
|
||||
cur.execute(cur.mogrify(main_pg_query, params))
|
||||
row = cur.fetchone()
|
||||
if row is None:
|
||||
return {"errors": ["error not found"]}
|
||||
row["tags"] = __process_tags(row)
|
||||
|
||||
query = cur.mogrify(
|
||||
f"""SELECT error_id, status, session_id, start_ts,
|
||||
parent_error_id,session_id, user_anonymous_id,
|
||||
user_id, user_uuid, user_browser, user_browser_version,
|
||||
user_os, user_os_version, user_device, payload,
|
||||
FALSE AS favorite,
|
||||
True AS viewed
|
||||
FROM public.errors AS pe
|
||||
INNER JOIN events.errors AS ee USING (error_id)
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
WHERE pe.project_id = %(project_id)s
|
||||
AND error_id = %(error_id)s
|
||||
ORDER BY start_ts DESC
|
||||
LIMIT 1;""",
|
||||
{"project_id": project_id, "error_id": error_id, "user_id": user_id})
|
||||
cur.execute(query=query)
|
||||
status = cur.fetchone()
|
||||
|
||||
if status is not None:
|
||||
row["stack"] = errors_helper.format_first_stack_frame(status).pop("stack")
|
||||
row["status"] = status.pop("status")
|
||||
row["parent_error_id"] = status.pop("parent_error_id")
|
||||
row["favorite"] = status.pop("favorite")
|
||||
row["viewed"] = status.pop("viewed")
|
||||
row["last_hydrated_session"] = status
|
||||
else:
|
||||
row["stack"] = []
|
||||
row["last_hydrated_session"] = None
|
||||
row["status"] = "untracked"
|
||||
row["parent_error_id"] = None
|
||||
row["favorite"] = False
|
||||
row["viewed"] = False
|
||||
return {"data": helper.dict_to_camel_case(row)}
|
||||
|
|
@ -1,294 +0,0 @@
|
|||
import json
|
||||
from typing import List
|
||||
|
||||
import schemas
|
||||
from chalicelib.core.errors.modules import errors_helper
|
||||
from chalicelib.core.sessions import sessions_search
|
||||
from chalicelib.core.sourcemaps import sourcemaps
|
||||
from chalicelib.utils import pg_client, helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.utils.metrics_helper import get_step_size
|
||||
|
||||
|
||||
def get(error_id, family=False) -> dict | List[dict]:
|
||||
if family:
|
||||
return get_batch([error_id])
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
"""SELECT *
|
||||
FROM public.errors
|
||||
WHERE error_id = %(error_id)s
|
||||
LIMIT 1;""",
|
||||
{"error_id": error_id})
|
||||
cur.execute(query=query)
|
||||
result = cur.fetchone()
|
||||
if result is not None:
|
||||
result["stacktrace_parsed_at"] = TimeUTC.datetime_to_timestamp(result["stacktrace_parsed_at"])
|
||||
return helper.dict_to_camel_case(result)
|
||||
|
||||
|
||||
def get_batch(error_ids):
|
||||
if len(error_ids) == 0:
|
||||
return []
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
"""
|
||||
WITH RECURSIVE error_family AS (
|
||||
SELECT *
|
||||
FROM public.errors
|
||||
WHERE error_id IN %(error_ids)s
|
||||
UNION
|
||||
SELECT child_errors.*
|
||||
FROM public.errors AS child_errors
|
||||
INNER JOIN error_family ON error_family.error_id = child_errors.parent_error_id OR error_family.parent_error_id = child_errors.error_id
|
||||
)
|
||||
SELECT *
|
||||
FROM error_family;""",
|
||||
{"error_ids": tuple(error_ids)})
|
||||
cur.execute(query=query)
|
||||
errors = cur.fetchall()
|
||||
for e in errors:
|
||||
e["stacktrace_parsed_at"] = TimeUTC.datetime_to_timestamp(e["stacktrace_parsed_at"])
|
||||
return helper.list_to_camel_case(errors)
|
||||
|
||||
|
||||
def __get_sort_key(key):
|
||||
return {
|
||||
schemas.ErrorSort.OCCURRENCE: "max_datetime",
|
||||
schemas.ErrorSort.USERS_COUNT: "users",
|
||||
schemas.ErrorSort.SESSIONS_COUNT: "sessions"
|
||||
}.get(key, 'max_datetime')
|
||||
|
||||
|
||||
def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, user_id):
|
||||
empty_response = {
|
||||
'total': 0,
|
||||
'errors': []
|
||||
}
|
||||
|
||||
platform = None
|
||||
for f in data.filters:
|
||||
if f.type == schemas.FilterType.PLATFORM and len(f.value) > 0:
|
||||
platform = f.value[0]
|
||||
pg_sub_query = errors_helper.__get_basic_constraints(platform, project_key="sessions.project_id")
|
||||
pg_sub_query += ["sessions.start_ts>=%(startDate)s", "sessions.start_ts<%(endDate)s", "source ='js_exception'",
|
||||
"pe.project_id=%(project_id)s"]
|
||||
# To ignore Script error
|
||||
pg_sub_query.append("pe.message!='Script error.'")
|
||||
pg_sub_query_chart = errors_helper.__get_basic_constraints(platform, time_constraint=False, chart=True,
|
||||
project_key=None)
|
||||
if platform:
|
||||
pg_sub_query_chart += ["start_ts>=%(startDate)s", "start_ts<%(endDate)s", "project_id=%(project_id)s"]
|
||||
pg_sub_query_chart.append("errors.error_id =details.error_id")
|
||||
statuses = []
|
||||
error_ids = None
|
||||
if data.startTimestamp is None:
|
||||
data.startTimestamp = TimeUTC.now(-30)
|
||||
if data.endTimestamp is None:
|
||||
data.endTimestamp = TimeUTC.now(1)
|
||||
if len(data.events) > 0 or len(data.filters) > 0:
|
||||
print("-- searching for sessions before errors")
|
||||
statuses = sessions_search.search_sessions(data=data, project=project, user_id=user_id, errors_only=True,
|
||||
error_status=data.status)
|
||||
if len(statuses) == 0:
|
||||
return empty_response
|
||||
error_ids = [e["errorId"] for e in statuses]
|
||||
with pg_client.PostgresClient() as cur:
|
||||
step_size = get_step_size(data.startTimestamp, data.endTimestamp, data.density, factor=1)
|
||||
sort = __get_sort_key('datetime')
|
||||
if data.sort is not None:
|
||||
sort = __get_sort_key(data.sort)
|
||||
order = schemas.SortOrderType.DESC
|
||||
if data.order is not None:
|
||||
order = data.order
|
||||
extra_join = ""
|
||||
|
||||
params = {
|
||||
"startDate": data.startTimestamp,
|
||||
"endDate": data.endTimestamp,
|
||||
"project_id": project.project_id,
|
||||
"userId": user_id,
|
||||
"step_size": step_size}
|
||||
if data.status != schemas.ErrorStatus.ALL:
|
||||
pg_sub_query.append("status = %(error_status)s")
|
||||
params["error_status"] = data.status
|
||||
if data.limit is not None and data.page is not None:
|
||||
params["errors_offset"] = (data.page - 1) * data.limit
|
||||
params["errors_limit"] = data.limit
|
||||
else:
|
||||
params["errors_offset"] = 0
|
||||
params["errors_limit"] = 200
|
||||
|
||||
if error_ids is not None:
|
||||
params["error_ids"] = tuple(error_ids)
|
||||
pg_sub_query.append("error_id IN %(error_ids)s")
|
||||
# if data.bookmarked:
|
||||
# pg_sub_query.append("ufe.user_id = %(userId)s")
|
||||
# extra_join += " INNER JOIN public.user_favorite_errors AS ufe USING (error_id)"
|
||||
if data.query is not None and len(data.query) > 0:
|
||||
pg_sub_query.append("(pe.name ILIKE %(error_query)s OR pe.message ILIKE %(error_query)s)")
|
||||
params["error_query"] = helper.values_for_operator(value=data.query,
|
||||
op=schemas.SearchEventOperator.CONTAINS)
|
||||
|
||||
main_pg_query = f"""SELECT full_count,
|
||||
error_id,
|
||||
name,
|
||||
message,
|
||||
users,
|
||||
sessions,
|
||||
last_occurrence,
|
||||
first_occurrence,
|
||||
chart
|
||||
FROM (SELECT COUNT(details) OVER () AS full_count, details.*
|
||||
FROM (SELECT error_id,
|
||||
name,
|
||||
message,
|
||||
COUNT(DISTINCT COALESCE(user_id,user_uuid::text)) AS users,
|
||||
COUNT(DISTINCT session_id) AS sessions,
|
||||
MAX(timestamp) AS max_datetime,
|
||||
MIN(timestamp) AS min_datetime
|
||||
FROM events.errors
|
||||
INNER JOIN public.errors AS pe USING (error_id)
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
{extra_join}
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
GROUP BY error_id, name, message
|
||||
ORDER BY {sort} {order}) AS details
|
||||
LIMIT %(errors_limit)s OFFSET %(errors_offset)s
|
||||
) AS details
|
||||
INNER JOIN LATERAL (SELECT MAX(timestamp) AS last_occurrence,
|
||||
MIN(timestamp) AS first_occurrence
|
||||
FROM events.errors
|
||||
WHERE errors.error_id = details.error_id) AS time_details ON (TRUE)
|
||||
INNER JOIN LATERAL (SELECT jsonb_agg(chart_details) AS chart
|
||||
FROM (SELECT generated_timestamp AS timestamp,
|
||||
COUNT(session_id) AS count
|
||||
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp
|
||||
LEFT JOIN LATERAL (SELECT DISTINCT session_id
|
||||
FROM events.errors
|
||||
{"INNER JOIN public.sessions USING(session_id)" if platform else ""}
|
||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||
) AS sessions ON (TRUE)
|
||||
GROUP BY timestamp
|
||||
ORDER BY timestamp) AS chart_details) AS chart_details ON (TRUE);"""
|
||||
|
||||
# print("--------------------")
|
||||
# print(cur.mogrify(main_pg_query, params))
|
||||
# print("--------------------")
|
||||
|
||||
cur.execute(cur.mogrify(main_pg_query, params))
|
||||
rows = cur.fetchall()
|
||||
total = 0 if len(rows) == 0 else rows[0]["full_count"]
|
||||
|
||||
if total == 0:
|
||||
rows = []
|
||||
else:
|
||||
if len(statuses) == 0:
|
||||
query = cur.mogrify(
|
||||
"""SELECT error_id
|
||||
FROM public.errors
|
||||
WHERE project_id = %(project_id)s AND error_id IN %(error_ids)s;""",
|
||||
{"project_id": project.project_id, "error_ids": tuple([r["error_id"] for r in rows]),
|
||||
"user_id": user_id})
|
||||
cur.execute(query=query)
|
||||
statuses = helper.list_to_camel_case(cur.fetchall())
|
||||
statuses = {
|
||||
s["errorId"]: s for s in statuses
|
||||
}
|
||||
|
||||
for r in rows:
|
||||
r.pop("full_count")
|
||||
|
||||
return {
|
||||
'total': total,
|
||||
'errors': helper.list_to_camel_case(rows)
|
||||
}
|
||||
|
||||
|
||||
def __save_stacktrace(error_id, data):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
"""UPDATE public.errors
|
||||
SET stacktrace=%(data)s::jsonb, stacktrace_parsed_at=timezone('utc'::text, now())
|
||||
WHERE error_id = %(error_id)s;""",
|
||||
{"error_id": error_id, "data": json.dumps(data)})
|
||||
cur.execute(query=query)
|
||||
|
||||
|
||||
def get_trace(project_id, error_id):
|
||||
error = get(error_id=error_id, family=False)
|
||||
if error is None:
|
||||
return {"errors": ["error not found"]}
|
||||
if error.get("source", "") != "js_exception":
|
||||
return {"errors": ["this source of errors doesn't have a sourcemap"]}
|
||||
if error.get("payload") is None:
|
||||
return {"errors": ["null payload"]}
|
||||
if error.get("stacktrace") is not None:
|
||||
return {"sourcemapUploaded": True,
|
||||
"trace": error.get("stacktrace"),
|
||||
"preparsed": True}
|
||||
trace, all_exists = sourcemaps.get_traces_group(project_id=project_id, payload=error["payload"])
|
||||
if all_exists:
|
||||
__save_stacktrace(error_id=error_id, data=trace)
|
||||
return {"sourcemapUploaded": all_exists,
|
||||
"trace": trace,
|
||||
"preparsed": False}
|
||||
|
||||
|
||||
def get_sessions(start_date, end_date, project_id, user_id, error_id):
|
||||
extra_constraints = ["s.project_id = %(project_id)s",
|
||||
"s.start_ts >= %(startDate)s",
|
||||
"s.start_ts <= %(endDate)s",
|
||||
"e.error_id = %(error_id)s"]
|
||||
if start_date is None:
|
||||
start_date = TimeUTC.now(-7)
|
||||
if end_date is None:
|
||||
end_date = TimeUTC.now()
|
||||
|
||||
params = {
|
||||
"startDate": start_date,
|
||||
"endDate": end_date,
|
||||
"project_id": project_id,
|
||||
"userId": user_id,
|
||||
"error_id": error_id}
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
f"""SELECT s.project_id,
|
||||
s.session_id::text AS session_id,
|
||||
s.user_uuid,
|
||||
s.user_id,
|
||||
s.user_agent,
|
||||
s.user_os,
|
||||
s.user_browser,
|
||||
s.user_device,
|
||||
s.user_country,
|
||||
s.start_ts,
|
||||
s.duration,
|
||||
s.events_count,
|
||||
s.pages_count,
|
||||
s.errors_count,
|
||||
s.issue_types,
|
||||
COALESCE((SELECT TRUE
|
||||
FROM public.user_favorite_sessions AS fs
|
||||
WHERE s.session_id = fs.session_id
|
||||
AND fs.user_id = %(userId)s LIMIT 1), FALSE) AS favorite,
|
||||
COALESCE((SELECT TRUE
|
||||
FROM public.user_viewed_sessions AS fs
|
||||
WHERE s.session_id = fs.session_id
|
||||
AND fs.user_id = %(userId)s LIMIT 1), FALSE) AS viewed
|
||||
FROM public.sessions AS s INNER JOIN events.errors AS e USING (session_id)
|
||||
WHERE {" AND ".join(extra_constraints)}
|
||||
ORDER BY s.start_ts DESC;""",
|
||||
params)
|
||||
cur.execute(query=query)
|
||||
sessions_list = []
|
||||
total = cur.rowcount
|
||||
row = cur.fetchone()
|
||||
while row is not None and len(sessions_list) < 100:
|
||||
sessions_list.append(row)
|
||||
row = cur.fetchone()
|
||||
|
||||
return {
|
||||
'total': total,
|
||||
'sessions': helper.list_to_camel_case(sessions_list)
|
||||
}
|
||||
|
|
@ -1,11 +0,0 @@
|
|||
import logging
|
||||
|
||||
from decouple import config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
from . import helper as errors_helper
|
||||
|
||||
if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
|
||||
import chalicelib.core.sessions.sessions_ch as sessions
|
||||
else:
|
||||
import chalicelib.core.sessions.sessions_pg as sessions
|
||||
|
|
@ -1,58 +0,0 @@
|
|||
from typing import Optional
|
||||
|
||||
import schemas
|
||||
from chalicelib.core.sourcemaps import sourcemaps
|
||||
|
||||
|
||||
def __get_basic_constraints(platform: Optional[schemas.PlatformType] = None, time_constraint: bool = True,
|
||||
startTime_arg_name: str = "startDate", endTime_arg_name: str = "endDate",
|
||||
chart: bool = False, step_size_name: str = "step_size",
|
||||
project_key: Optional[str] = "project_id"):
|
||||
if project_key is None:
|
||||
ch_sub_query = []
|
||||
else:
|
||||
ch_sub_query = [f"{project_key} =%(project_id)s"]
|
||||
if time_constraint:
|
||||
ch_sub_query += [f"timestamp >= %({startTime_arg_name})s",
|
||||
f"timestamp < %({endTime_arg_name})s"]
|
||||
if chart:
|
||||
ch_sub_query += [f"timestamp >= generated_timestamp",
|
||||
f"timestamp < generated_timestamp + %({step_size_name})s"]
|
||||
if platform == schemas.PlatformType.MOBILE:
|
||||
ch_sub_query.append("user_device_type = 'mobile'")
|
||||
elif platform == schemas.PlatformType.DESKTOP:
|
||||
ch_sub_query.append("user_device_type = 'desktop'")
|
||||
return ch_sub_query
|
||||
|
||||
|
||||
def __get_basic_constraints_ch(platform=None, time_constraint=True, startTime_arg_name="startDate",
|
||||
endTime_arg_name="endDate", type_condition=True, project_key="project_id",
|
||||
table_name=None):
|
||||
ch_sub_query = [f"{project_key} =toUInt16(%(project_id)s)"]
|
||||
if table_name is not None:
|
||||
table_name = table_name + "."
|
||||
else:
|
||||
table_name = ""
|
||||
if type_condition:
|
||||
ch_sub_query.append(f"{table_name}`$event_name`='ERROR'")
|
||||
if time_constraint:
|
||||
ch_sub_query += [f"{table_name}datetime >= toDateTime(%({startTime_arg_name})s/1000)",
|
||||
f"{table_name}datetime < toDateTime(%({endTime_arg_name})s/1000)"]
|
||||
if platform == schemas.PlatformType.MOBILE:
|
||||
ch_sub_query.append("user_device_type = 'mobile'")
|
||||
elif platform == schemas.PlatformType.DESKTOP:
|
||||
ch_sub_query.append("user_device_type = 'desktop'")
|
||||
return ch_sub_query
|
||||
|
||||
|
||||
def format_first_stack_frame(error):
|
||||
error["stack"] = sourcemaps.format_payload(error.pop("payload"), truncate_to_first=True)
|
||||
for s in error["stack"]:
|
||||
for c in s.get("context", []):
|
||||
for sci, sc in enumerate(c):
|
||||
if isinstance(sc, str) and len(sc) > 1000:
|
||||
c[sci] = sc[:1000]
|
||||
# convert bytes to string:
|
||||
if isinstance(s["filename"], bytes):
|
||||
s["filename"] = s["filename"].decode("utf-8")
|
||||
return error
|
||||
48
api/chalicelib/core/errors_favorite.py
Normal file
48
api/chalicelib/core/errors_favorite.py
Normal file
|
|
@ -0,0 +1,48 @@
|
|||
from chalicelib.utils import pg_client
|
||||
|
||||
|
||||
def add_favorite_error(project_id, user_id, error_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(f"""INSERT INTO public.user_favorite_errors(user_id, error_id)
|
||||
VALUES (%(userId)s,%(error_id)s);""",
|
||||
{"userId": user_id, "error_id": error_id})
|
||||
)
|
||||
return {"errorId": error_id, "favorite": True}
|
||||
|
||||
|
||||
def remove_favorite_error(project_id, user_id, error_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(f"""DELETE FROM public.user_favorite_errors
|
||||
WHERE
|
||||
user_id = %(userId)s
|
||||
AND error_id = %(error_id)s;""",
|
||||
{"userId": user_id, "error_id": error_id})
|
||||
)
|
||||
return {"errorId": error_id, "favorite": False}
|
||||
|
||||
|
||||
def favorite_error(project_id, user_id, error_id):
|
||||
exists, favorite = error_exists_and_favorite(user_id=user_id, error_id=error_id)
|
||||
if not exists:
|
||||
return {"errors": ["cannot bookmark non-rehydrated errors"]}
|
||||
if favorite:
|
||||
return remove_favorite_error(project_id=project_id, user_id=user_id, error_id=error_id)
|
||||
return add_favorite_error(project_id=project_id, user_id=user_id, error_id=error_id)
|
||||
|
||||
|
||||
def error_exists_and_favorite(user_id, error_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(
|
||||
"""SELECT errors.error_id AS exists, ufe.error_id AS favorite
|
||||
FROM public.errors
|
||||
LEFT JOIN (SELECT error_id FROM public.user_favorite_errors WHERE user_id = %(userId)s) AS ufe USING (error_id)
|
||||
WHERE error_id = %(error_id)s;""",
|
||||
{"userId": user_id, "error_id": error_id})
|
||||
)
|
||||
r = cur.fetchone()
|
||||
if r is None:
|
||||
return False, False
|
||||
return True, r.get("favorite") is not None
|
||||
37
api/chalicelib/core/errors_viewed.py
Normal file
37
api/chalicelib/core/errors_viewed.py
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
from chalicelib.utils import pg_client
|
||||
|
||||
|
||||
def add_viewed_error(project_id, user_id, error_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""INSERT INTO public.user_viewed_errors(user_id, error_id)
|
||||
VALUES (%(userId)s,%(error_id)s);""",
|
||||
{"userId": user_id, "error_id": error_id})
|
||||
)
|
||||
|
||||
|
||||
def viewed_error_exists(user_id, error_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
"""SELECT
|
||||
errors.error_id AS hydrated,
|
||||
COALESCE((SELECT TRUE
|
||||
FROM public.user_viewed_errors AS ve
|
||||
WHERE ve.error_id = %(error_id)s
|
||||
AND ve.user_id = %(userId)s LIMIT 1), FALSE) AS viewed
|
||||
FROM public.errors
|
||||
WHERE error_id = %(error_id)s""",
|
||||
{"userId": user_id, "error_id": error_id})
|
||||
cur.execute(
|
||||
query=query
|
||||
)
|
||||
r = cur.fetchone()
|
||||
if r:
|
||||
return r.get("viewed")
|
||||
return True
|
||||
|
||||
|
||||
def viewed_error(project_id, user_id, error_id):
|
||||
if viewed_error_exists(user_id=user_id, error_id=error_id):
|
||||
return None
|
||||
return add_viewed_error(project_id=project_id, user_id=user_id, error_id=error_id)
|
||||
|
|
@ -1,10 +1,9 @@
|
|||
from functools import cache
|
||||
from typing import Optional
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import autocomplete
|
||||
from chalicelib.core import issues
|
||||
from chalicelib.core.autocomplete import autocomplete
|
||||
from chalicelib.core.sessions import sessions_metas
|
||||
from chalicelib.core import sessions_metas
|
||||
from chalicelib.utils import pg_client, helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.utils.event_filter_definition import SupportedFilter, Event
|
||||
|
|
@ -56,7 +55,7 @@ def __get_grouped_clickrage(rows, session_id, project_id):
|
|||
def get_by_session_id(session_id, project_id, group_clickrage=False, event_type: Optional[schemas.EventType] = None):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
rows = []
|
||||
if event_type is None or event_type == schemas.EventType.CLICK:
|
||||
if event_type is None or event_type == schemas.EventType.click:
|
||||
cur.execute(cur.mogrify("""\
|
||||
SELECT
|
||||
c.*,
|
||||
|
|
@ -70,7 +69,7 @@ def get_by_session_id(session_id, project_id, group_clickrage=False, event_type:
|
|||
rows += cur.fetchall()
|
||||
if group_clickrage:
|
||||
rows = __get_grouped_clickrage(rows=rows, session_id=session_id, project_id=project_id)
|
||||
if event_type is None or event_type == schemas.EventType.INPUT:
|
||||
if event_type is None or event_type == schemas.EventType.input:
|
||||
cur.execute(cur.mogrify("""
|
||||
SELECT
|
||||
i.*,
|
||||
|
|
@ -82,7 +81,7 @@ def get_by_session_id(session_id, project_id, group_clickrage=False, event_type:
|
|||
{"project_id": project_id, "session_id": session_id})
|
||||
)
|
||||
rows += cur.fetchall()
|
||||
if event_type is None or event_type == schemas.EventType.LOCATION:
|
||||
if event_type is None or event_type == schemas.EventType.location:
|
||||
cur.execute(cur.mogrify("""\
|
||||
SELECT
|
||||
l.*,
|
||||
|
|
@ -99,96 +98,69 @@ def get_by_session_id(session_id, project_id, group_clickrage=False, event_type:
|
|||
return rows
|
||||
|
||||
|
||||
def _search_tags(project_id, value, key=None, source=None):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = f"""
|
||||
SELECT public.tags.name
|
||||
'TAG' AS type
|
||||
FROM public.tags
|
||||
WHERE public.tags.project_id = %(project_id)s
|
||||
ORDER BY SIMILARITY(public.tags.name, %(value)s) DESC
|
||||
LIMIT 10
|
||||
"""
|
||||
query = cur.mogrify(query, {'project_id': project_id, 'value': value})
|
||||
cur.execute(query)
|
||||
results = helper.list_to_camel_case(cur.fetchall())
|
||||
return results
|
||||
|
||||
|
||||
class EventType:
|
||||
CLICK = Event(ui_type=schemas.EventType.CLICK, table="events.clicks", column="label")
|
||||
INPUT = Event(ui_type=schemas.EventType.INPUT, table="events.inputs", column="label")
|
||||
LOCATION = Event(ui_type=schemas.EventType.LOCATION, table="events.pages", column="path")
|
||||
CUSTOM = Event(ui_type=schemas.EventType.CUSTOM, table="events_common.customs", column="name")
|
||||
REQUEST = Event(ui_type=schemas.EventType.REQUEST, table="events_common.requests", column="path")
|
||||
GRAPHQL = Event(ui_type=schemas.EventType.GRAPHQL, table="events.graphql", column="name")
|
||||
STATEACTION = Event(ui_type=schemas.EventType.STATE_ACTION, table="events.state_actions", column="name")
|
||||
TAG = Event(ui_type=schemas.EventType.TAG, table="events.tags", column="tag_id")
|
||||
ERROR = Event(ui_type=schemas.EventType.ERROR, table="events.errors",
|
||||
CLICK = Event(ui_type=schemas.EventType.click, table="events.clicks", column="label")
|
||||
INPUT = Event(ui_type=schemas.EventType.input, table="events.inputs", column="label")
|
||||
LOCATION = Event(ui_type=schemas.EventType.location, table="events.pages", column="path")
|
||||
CUSTOM = Event(ui_type=schemas.EventType.custom, table="events_common.customs", column="name")
|
||||
REQUEST = Event(ui_type=schemas.EventType.request, table="events_common.requests", column="path")
|
||||
GRAPHQL = Event(ui_type=schemas.EventType.graphql, table="events.graphql", column="name")
|
||||
STATEACTION = Event(ui_type=schemas.EventType.state_action, table="events.state_actions", column="name")
|
||||
ERROR = Event(ui_type=schemas.EventType.error, table="events.errors",
|
||||
column=None) # column=None because errors are searched by name or message
|
||||
METADATA = Event(ui_type=schemas.FilterType.METADATA, table="public.sessions", column=None)
|
||||
# MOBILE
|
||||
CLICK_MOBILE = Event(ui_type=schemas.EventType.CLICK_MOBILE, table="events_ios.taps", column="label")
|
||||
INPUT_MOBILE = Event(ui_type=schemas.EventType.INPUT_MOBILE, table="events_ios.inputs", column="label")
|
||||
VIEW_MOBILE = Event(ui_type=schemas.EventType.VIEW_MOBILE, table="events_ios.views", column="name")
|
||||
SWIPE_MOBILE = Event(ui_type=schemas.EventType.SWIPE_MOBILE, table="events_ios.swipes", column="label")
|
||||
CUSTOM_MOBILE = Event(ui_type=schemas.EventType.CUSTOM_MOBILE, table="events_common.customs", column="name")
|
||||
REQUEST_MOBILE = Event(ui_type=schemas.EventType.REQUEST_MOBILE, table="events_common.requests", column="path")
|
||||
CRASH_MOBILE = Event(ui_type=schemas.EventType.ERROR_MOBILE, table="events_common.crashes",
|
||||
column=None) # column=None because errors are searched by name or message
|
||||
METADATA = Event(ui_type=schemas.FilterType.metadata, table="public.sessions", column=None)
|
||||
# IOS
|
||||
CLICK_IOS = Event(ui_type=schemas.EventType.click_ios, table="events_ios.clicks", column="label")
|
||||
INPUT_IOS = Event(ui_type=schemas.EventType.input_ios, table="events_ios.inputs", column="label")
|
||||
VIEW_IOS = Event(ui_type=schemas.EventType.view_ios, table="events_ios.views", column="name")
|
||||
CUSTOM_IOS = Event(ui_type=schemas.EventType.custom_ios, table="events_common.customs", column="name")
|
||||
REQUEST_IOS = Event(ui_type=schemas.EventType.request_ios, table="events_common.requests", column="url")
|
||||
ERROR_IOS = Event(ui_type=schemas.EventType.error_ios, table="events_ios.crashes",
|
||||
column=None) # column=None because errors are searched by name or message
|
||||
|
||||
|
||||
@cache
|
||||
def supported_types():
|
||||
return {
|
||||
EventType.CLICK.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK),
|
||||
query=autocomplete.__generic_query(typename=EventType.CLICK.ui_type)),
|
||||
EventType.INPUT.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT),
|
||||
query=autocomplete.__generic_query(typename=EventType.INPUT.ui_type)),
|
||||
EventType.LOCATION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.LOCATION),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.LOCATION.ui_type)),
|
||||
EventType.CUSTOM.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CUSTOM),
|
||||
SUPPORTED_TYPES = {
|
||||
EventType.CLICK.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK),
|
||||
query=autocomplete.__generic_query(typename=EventType.CLICK.ui_type)),
|
||||
EventType.INPUT.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT),
|
||||
query=autocomplete.__generic_query(typename=EventType.INPUT.ui_type)),
|
||||
EventType.LOCATION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.LOCATION),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.LOCATION.ui_type)),
|
||||
EventType.CUSTOM.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CUSTOM),
|
||||
query=autocomplete.__generic_query(typename=EventType.CUSTOM.ui_type)),
|
||||
EventType.REQUEST.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.REQUEST),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.REQUEST.ui_type)),
|
||||
EventType.GRAPHQL.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.GRAPHQL),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.GRAPHQL.ui_type)),
|
||||
EventType.STATEACTION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.STATEACTION),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.STATEACTION.ui_type)),
|
||||
EventType.ERROR.ui_type: SupportedFilter(get=autocomplete.__search_errors,
|
||||
query=None),
|
||||
EventType.METADATA.ui_type: SupportedFilter(get=autocomplete.__search_metadata,
|
||||
query=None),
|
||||
# IOS
|
||||
EventType.CLICK_IOS.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK_IOS),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.CLICK_IOS.ui_type)),
|
||||
EventType.INPUT_IOS.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT_IOS),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.INPUT_IOS.ui_type)),
|
||||
EventType.VIEW_IOS.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.VIEW_IOS),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.VIEW_IOS.ui_type)),
|
||||
EventType.CUSTOM_IOS.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CUSTOM_IOS),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.CUSTOM.ui_type)),
|
||||
EventType.REQUEST.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.REQUEST),
|
||||
typename=EventType.CUSTOM_IOS.ui_type)),
|
||||
EventType.REQUEST_IOS.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.REQUEST_IOS),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.REQUEST.ui_type)),
|
||||
EventType.GRAPHQL.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.GRAPHQL),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.GRAPHQL.ui_type)),
|
||||
EventType.STATEACTION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.STATEACTION),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.STATEACTION.ui_type)),
|
||||
EventType.TAG.ui_type: SupportedFilter(get=_search_tags, query=None),
|
||||
EventType.ERROR.ui_type: SupportedFilter(get=autocomplete.__search_errors,
|
||||
typename=EventType.REQUEST_IOS.ui_type)),
|
||||
EventType.ERROR_IOS.ui_type: SupportedFilter(get=autocomplete.__search_errors_ios,
|
||||
query=None),
|
||||
EventType.METADATA.ui_type: SupportedFilter(get=autocomplete.__search_metadata,
|
||||
query=None),
|
||||
# MOBILE
|
||||
EventType.CLICK_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.CLICK_MOBILE.ui_type)),
|
||||
EventType.SWIPE_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.SWIPE_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.SWIPE_MOBILE.ui_type)),
|
||||
EventType.INPUT_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.INPUT_MOBILE.ui_type)),
|
||||
EventType.VIEW_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.VIEW_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.VIEW_MOBILE.ui_type)),
|
||||
EventType.CUSTOM_MOBILE.ui_type: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete(EventType.CUSTOM_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.CUSTOM_MOBILE.ui_type)),
|
||||
EventType.REQUEST_MOBILE.ui_type: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete(EventType.REQUEST_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.REQUEST_MOBILE.ui_type)),
|
||||
EventType.CRASH_MOBILE.ui_type: SupportedFilter(get=autocomplete.__search_errors_mobile,
|
||||
query=None),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def get_errors_by_session_id(session_id, project_id):
|
||||
|
|
@ -208,17 +180,17 @@ def search(text, event_type, project_id, source, key):
|
|||
if not event_type:
|
||||
return {"data": autocomplete.__get_autocomplete_table(text, project_id)}
|
||||
|
||||
if event_type in supported_types().keys():
|
||||
rows = supported_types()[event_type].get(project_id=project_id, value=text, key=key, source=source)
|
||||
elif event_type + "_MOBILE" in supported_types().keys():
|
||||
rows = supported_types()[event_type + "_MOBILE"].get(project_id=project_id, value=text, key=key, source=source)
|
||||
elif event_type in sessions_metas.supported_types().keys():
|
||||
if event_type in SUPPORTED_TYPES.keys():
|
||||
rows = SUPPORTED_TYPES[event_type].get(project_id=project_id, value=text, key=key, source=source)
|
||||
# for IOS events autocomplete
|
||||
# if event_type + "_IOS" in SUPPORTED_TYPES.keys():
|
||||
# rows += SUPPORTED_TYPES[event_type + "_IOS"].get(project_id=project_id, value=text, key=key,source=source)
|
||||
elif event_type + "_IOS" in SUPPORTED_TYPES.keys():
|
||||
rows = SUPPORTED_TYPES[event_type + "_IOS"].get(project_id=project_id, value=text, key=key, source=source)
|
||||
elif event_type in sessions_metas.SUPPORTED_TYPES.keys():
|
||||
return sessions_metas.search(text, event_type, project_id)
|
||||
elif event_type.endswith("_IOS") \
|
||||
and event_type[:-len("_IOS")] in sessions_metas.supported_types().keys():
|
||||
return sessions_metas.search(text, event_type, project_id)
|
||||
elif event_type.endswith("_MOBILE") \
|
||||
and event_type[:-len("_MOBILE")] in sessions_metas.supported_types().keys():
|
||||
and event_type[:-len("_IOS")] in sessions_metas.SUPPORTED_TYPES.keys():
|
||||
return sessions_metas.search(text, event_type, project_id)
|
||||
else:
|
||||
return {"errors": ["unsupported event"]}
|
||||
|
|
|
|||
69
api/chalicelib/core/events_ios.py
Normal file
69
api/chalicelib/core/events_ios.py
Normal file
|
|
@ -0,0 +1,69 @@
|
|||
from chalicelib.utils import pg_client, helper
|
||||
from chalicelib.core import events
|
||||
|
||||
|
||||
def get_customs_by_sessionId(session_id, project_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(cur.mogrify(f"""\
|
||||
SELECT
|
||||
c.*,
|
||||
'{events.EventType.CUSTOM_IOS.ui_type}' AS type
|
||||
FROM {events.EventType.CUSTOM_IOS.table} AS c
|
||||
WHERE
|
||||
c.session_id = %(session_id)s
|
||||
ORDER BY c.timestamp;""",
|
||||
{"project_id": project_id, "session_id": session_id})
|
||||
)
|
||||
rows = cur.fetchall()
|
||||
return helper.dict_to_camel_case(rows)
|
||||
|
||||
|
||||
def get_by_sessionId(session_id, project_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(cur.mogrify(f"""
|
||||
SELECT
|
||||
c.*,
|
||||
'{events.EventType.CLICK_IOS.ui_type}' AS type
|
||||
FROM {events.EventType.CLICK_IOS.table} AS c
|
||||
WHERE
|
||||
c.session_id = %(session_id)s
|
||||
ORDER BY c.timestamp;""",
|
||||
{"project_id": project_id, "session_id": session_id})
|
||||
)
|
||||
rows = cur.fetchall()
|
||||
|
||||
cur.execute(cur.mogrify(f"""
|
||||
SELECT
|
||||
i.*,
|
||||
'{events.EventType.INPUT_IOS.ui_type}' AS type
|
||||
FROM {events.EventType.INPUT_IOS.table} AS i
|
||||
WHERE
|
||||
i.session_id = %(session_id)s
|
||||
ORDER BY i.timestamp;""",
|
||||
{"project_id": project_id, "session_id": session_id})
|
||||
)
|
||||
rows += cur.fetchall()
|
||||
cur.execute(cur.mogrify(f"""
|
||||
SELECT
|
||||
v.*,
|
||||
'{events.EventType.VIEW_IOS.ui_type}' AS type
|
||||
FROM {events.EventType.VIEW_IOS.table} AS v
|
||||
WHERE
|
||||
v.session_id = %(session_id)s
|
||||
ORDER BY v.timestamp;""", {"project_id": project_id, "session_id": session_id}))
|
||||
rows += cur.fetchall()
|
||||
rows = helper.list_to_camel_case(rows)
|
||||
rows = sorted(rows, key=lambda k: k["timestamp"])
|
||||
return rows
|
||||
|
||||
|
||||
def get_crashes_by_session_id(session_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(cur.mogrify(f"""
|
||||
SELECT cr.*,uc.*, cr.timestamp - s.start_ts AS time
|
||||
FROM {events.EventType.ERROR_IOS.table} AS cr INNER JOIN public.crashes_ios AS uc USING (crash_id) INNER JOIN public.sessions AS s USING (session_id)
|
||||
WHERE
|
||||
cr.session_id = %(session_id)s
|
||||
ORDER BY timestamp;""", {"session_id": session_id}))
|
||||
errors = cur.fetchall()
|
||||
return helper.list_to_camel_case(errors)
|
||||
|
|
@ -1,68 +0,0 @@
|
|||
from chalicelib.utils import pg_client, helper
|
||||
from chalicelib.core import events
|
||||
|
||||
|
||||
def get_customs_by_session_id(session_id, project_id):
|
||||
return events.get_customs_by_session_id(session_id=session_id, project_id=project_id)
|
||||
|
||||
|
||||
def get_by_sessionId(session_id, project_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(cur.mogrify(f"""
|
||||
SELECT
|
||||
c.*,
|
||||
'TAP' AS type
|
||||
FROM events_ios.taps AS c
|
||||
WHERE
|
||||
c.session_id = %(session_id)s
|
||||
ORDER BY c.timestamp;""",
|
||||
{"project_id": project_id, "session_id": session_id})
|
||||
)
|
||||
rows = cur.fetchall()
|
||||
|
||||
cur.execute(cur.mogrify(f"""
|
||||
SELECT
|
||||
i.*,
|
||||
'INPUT' AS type
|
||||
FROM events_ios.inputs AS i
|
||||
WHERE
|
||||
i.session_id = %(session_id)s
|
||||
ORDER BY i.timestamp;""",
|
||||
{"project_id": project_id, "session_id": session_id})
|
||||
)
|
||||
rows += cur.fetchall()
|
||||
cur.execute(cur.mogrify(f"""
|
||||
SELECT
|
||||
v.*,
|
||||
'VIEW' AS type
|
||||
FROM events_ios.views AS v
|
||||
WHERE
|
||||
v.session_id = %(session_id)s
|
||||
ORDER BY v.timestamp;""", {"project_id": project_id, "session_id": session_id}))
|
||||
rows += cur.fetchall()
|
||||
cur.execute(cur.mogrify(f"""
|
||||
SELECT
|
||||
s.*,
|
||||
'SWIPE' AS type
|
||||
FROM events_ios.swipes AS s
|
||||
WHERE
|
||||
s.session_id = %(session_id)s
|
||||
ORDER BY s.timestamp;""", {"project_id": project_id, "session_id": session_id}))
|
||||
rows += cur.fetchall()
|
||||
rows = helper.list_to_camel_case(rows)
|
||||
rows = sorted(rows, key=lambda k: k["timestamp"])
|
||||
return rows
|
||||
|
||||
|
||||
def get_crashes_by_session_id(session_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(cur.mogrify(f"""
|
||||
SELECT cr.*,uc.*, cr.timestamp - s.start_ts AS time
|
||||
FROM {events.EventType.CRASH_MOBILE.table} AS cr
|
||||
INNER JOIN public.crashes_ios AS uc USING (crash_ios_id)
|
||||
INNER JOIN public.sessions AS s USING (session_id)
|
||||
WHERE
|
||||
cr.session_id = %(session_id)s
|
||||
ORDER BY timestamp;""", {"session_id": session_id}))
|
||||
errors = cur.fetchall()
|
||||
return helper.list_to_camel_case(errors)
|
||||
|
|
@ -1,598 +0,0 @@
|
|||
import json
|
||||
import logging
|
||||
from typing import Any, List, Dict, Optional
|
||||
|
||||
import schemas
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from fastapi import HTTPException, status
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
feature_flag_columns = (
|
||||
"feature_flag_id",
|
||||
"payload",
|
||||
"flag_key",
|
||||
"description",
|
||||
"flag_type",
|
||||
"is_persist",
|
||||
"is_active",
|
||||
"created_at",
|
||||
"updated_at",
|
||||
"created_by",
|
||||
"updated_by",
|
||||
)
|
||||
|
||||
|
||||
def exists_by_name(flag_key: str, project_id: int, exclude_id: Optional[int]) -> bool:
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(f"""SELECT EXISTS(SELECT 1
|
||||
FROM public.feature_flags
|
||||
WHERE deleted_at IS NULL
|
||||
AND flag_key ILIKE %(flag_key)s AND project_id=%(project_id)s
|
||||
{"AND feature_flag_id!=%(exclude_id)s" if exclude_id else ""}) AS exists;""",
|
||||
{"flag_key": flag_key, "exclude_id": exclude_id, "project_id": project_id})
|
||||
|
||||
cur.execute(query=query)
|
||||
row = cur.fetchone()
|
||||
return row["exists"]
|
||||
|
||||
|
||||
def update_feature_flag_status(project_id: int, feature_flag_id: int, is_active: bool) -> Dict[str, Any]:
|
||||
try:
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(f"""UPDATE feature_flags
|
||||
SET is_active = %(is_active)s, updated_at=NOW()
|
||||
WHERE feature_flag_id=%(feature_flag_id)s AND project_id=%(project_id)s
|
||||
RETURNING is_active;""",
|
||||
{"feature_flag_id": feature_flag_id, "is_active": is_active, "project_id": project_id})
|
||||
cur.execute(query=query)
|
||||
|
||||
return {"is_active": cur.fetchone()["is_active"]}
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update feature flag status: {e}")
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="Failed to update feature flag status")
|
||||
|
||||
|
||||
def search_feature_flags(project_id: int, user_id: int, data: schemas.SearchFlagsSchema) -> Dict[str, Any]:
|
||||
"""
|
||||
Get all feature flags and their total count.
|
||||
"""
|
||||
constraints, params = prepare_constraints_params_to_search(data, project_id, user_id)
|
||||
|
||||
sql = f"""
|
||||
SELECT COUNT(1) OVER () AS count, {", ".join(feature_flag_columns)}
|
||||
FROM feature_flags
|
||||
WHERE {" AND ".join(constraints)}
|
||||
ORDER BY updated_at {data.order}
|
||||
LIMIT %(limit)s OFFSET %(offset)s;
|
||||
"""
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(sql, params)
|
||||
cur.execute(query)
|
||||
rows = cur.fetchall()
|
||||
|
||||
if len(rows) == 0:
|
||||
return {"data": {"total": 0, "list": []}}
|
||||
|
||||
results = {"total": rows[0]["count"]}
|
||||
|
||||
rows = helper.list_to_camel_case(rows)
|
||||
for row in rows:
|
||||
row.pop("count")
|
||||
row["createdAt"] = TimeUTC.datetime_to_timestamp(row["createdAt"])
|
||||
row["updatedAt"] = TimeUTC.datetime_to_timestamp(row["updatedAt"])
|
||||
|
||||
results["list"] = rows
|
||||
return {"data": results}
|
||||
|
||||
|
||||
def prepare_constraints_params_to_search(data, project_id, user_id):
|
||||
constraints = [
|
||||
"feature_flags.project_id = %(project_id)s",
|
||||
"feature_flags.deleted_at IS NULL",
|
||||
]
|
||||
params = {
|
||||
"project_id": project_id,
|
||||
"user_id": user_id,
|
||||
"limit": data.limit,
|
||||
"offset": (data.page - 1) * data.limit,
|
||||
}
|
||||
if data.is_active is not None:
|
||||
constraints.append("feature_flags.is_active=%(is_active)s")
|
||||
params["is_active"] = data.is_active
|
||||
if data.user_id is not None:
|
||||
constraints.append("feature_flags.created_by=%(user_id)s")
|
||||
if data.query is not None and len(data.query) > 0:
|
||||
constraints.append("flag_key ILIKE %(query)s")
|
||||
params["query"] = helper.values_for_operator(value=data.query,
|
||||
op=schemas.SearchEventOperator.CONTAINS)
|
||||
return constraints, params
|
||||
|
||||
|
||||
def create_feature_flag(project_id: int, user_id: int, feature_flag_data: schemas.FeatureFlagSchema) -> Optional[int]:
|
||||
if feature_flag_data.flag_type == schemas.FeatureFlagType.MULTI_VARIANT and len(feature_flag_data.variants) == 0:
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="Variants are required for multi variant flag")
|
||||
|
||||
validate_unique_flag_key(feature_flag_data, project_id)
|
||||
validate_multi_variant_flag(feature_flag_data)
|
||||
|
||||
insert_columns = (
|
||||
'project_id',
|
||||
'flag_key',
|
||||
'description',
|
||||
'flag_type',
|
||||
'payload',
|
||||
'is_persist',
|
||||
'is_active',
|
||||
'created_by'
|
||||
)
|
||||
|
||||
params = prepare_params_to_create_flag(feature_flag_data, project_id, user_id)
|
||||
conditions_len = len(feature_flag_data.conditions)
|
||||
variants_len = len(feature_flag_data.variants)
|
||||
|
||||
flag_sql = f"""
|
||||
INSERT INTO feature_flags ({", ".join(insert_columns)})
|
||||
VALUES ({", ".join(["%(" + col + ")s" for col in insert_columns])})
|
||||
RETURNING feature_flag_id
|
||||
"""
|
||||
conditions_query = ""
|
||||
variants_query = ""
|
||||
|
||||
if conditions_len > 0:
|
||||
conditions_query = f"""
|
||||
inserted_conditions AS (
|
||||
INSERT INTO feature_flags_conditions(feature_flag_id, name, rollout_percentage, filters)
|
||||
VALUES {",".join([f"(("
|
||||
f"SELECT feature_flag_id FROM inserted_flag),"
|
||||
f"%(name_{i})s,"
|
||||
f"%(rollout_percentage_{i})s,"
|
||||
f"%(filters_{i})s::jsonb)"
|
||||
for i in range(conditions_len)])}
|
||||
RETURNING feature_flag_id
|
||||
)
|
||||
"""
|
||||
|
||||
if variants_len > 0:
|
||||
variants_query = f""",
|
||||
inserted_variants AS (
|
||||
INSERT INTO feature_flags_variants(feature_flag_id, value, description, rollout_percentage, payload)
|
||||
VALUES {",".join([f"((SELECT feature_flag_id FROM inserted_flag),"
|
||||
f"%(v_value_{i})s,"
|
||||
f"%(v_description_{i})s,"
|
||||
f"%(v_rollout_percentage_{i})s,"
|
||||
f"%(v_payload_{i})s::jsonb)"
|
||||
for i in range(variants_len)])}
|
||||
RETURNING feature_flag_id
|
||||
)
|
||||
"""
|
||||
|
||||
query = f"""
|
||||
WITH inserted_flag AS ({flag_sql}),
|
||||
{conditions_query}
|
||||
{variants_query}
|
||||
SELECT feature_flag_id FROM inserted_flag;
|
||||
"""
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(query, params)
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
|
||||
if row is None:
|
||||
return None
|
||||
|
||||
return get_feature_flag(project_id=project_id, feature_flag_id=row["feature_flag_id"])
|
||||
|
||||
|
||||
def validate_unique_flag_key(feature_flag_data, project_id, exclude_id=None):
|
||||
if exists_by_name(project_id=project_id, flag_key=feature_flag_data.flag_key, exclude_id=exclude_id):
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=f"Feature flag with key already exists.")
|
||||
|
||||
|
||||
def validate_multi_variant_flag(feature_flag_data):
|
||||
if feature_flag_data.flag_type == schemas.FeatureFlagType.MULTI_VARIANT:
|
||||
if sum([v.rollout_percentage for v in feature_flag_data.variants]) > 100:
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=f"Sum of rollout percentage for variants cannot be greater than 100.")
|
||||
|
||||
|
||||
def prepare_params_to_create_flag(feature_flag_data, project_id, user_id):
|
||||
conditions_data = prepare_conditions_values(feature_flag_data)
|
||||
variants_data = prepare_variants_values(feature_flag_data)
|
||||
|
||||
params = {
|
||||
"project_id": project_id,
|
||||
"created_by": user_id,
|
||||
**feature_flag_data.model_dump(),
|
||||
**conditions_data,
|
||||
**variants_data,
|
||||
"payload": json.dumps(feature_flag_data.payload)
|
||||
}
|
||||
|
||||
return params
|
||||
|
||||
|
||||
def prepare_variants_values(feature_flag_data):
|
||||
variants_data = {}
|
||||
for i, v in enumerate(feature_flag_data.variants):
|
||||
for k in v.model_dump().keys():
|
||||
variants_data[f"v_{k}_{i}"] = v.__getattribute__(k)
|
||||
variants_data[f"v_value_{i}"] = v.value
|
||||
variants_data[f"v_description_{i}"] = v.description
|
||||
variants_data[f"v_payload_{i}"] = json.dumps(v.payload)
|
||||
variants_data[f"v_rollout_percentage_{i}"] = v.rollout_percentage
|
||||
return variants_data
|
||||
|
||||
|
||||
def prepare_conditions_values(feature_flag_data):
|
||||
conditions_data = {}
|
||||
for i, s in enumerate(feature_flag_data.conditions):
|
||||
for k in s.model_dump().keys():
|
||||
conditions_data[f"{k}_{i}"] = s.__getattribute__(k)
|
||||
conditions_data[f"name_{i}"] = s.name
|
||||
conditions_data[f"rollout_percentage_{i}"] = s.rollout_percentage
|
||||
conditions_data[f"filters_{i}"] = json.dumps([filter_.model_dump() for filter_ in s.filters])
|
||||
return conditions_data
|
||||
|
||||
|
||||
def get_feature_flag(project_id: int, feature_flag_id: int) -> Optional[Dict[str, Any]]:
|
||||
conditions_query = """
|
||||
SELECT COALESCE(jsonb_agg(ffc ORDER BY condition_id), '[]'::jsonb) AS conditions
|
||||
FROM feature_flags_conditions AS ffc
|
||||
WHERE ffc.feature_flag_id = %(feature_flag_id)s
|
||||
"""
|
||||
|
||||
variants_query = """
|
||||
SELECT COALESCE(jsonb_agg(ffv ORDER BY variant_id), '[]'::jsonb) AS variants
|
||||
FROM feature_flags_variants AS ffv
|
||||
WHERE ffv.feature_flag_id = %(feature_flag_id)s
|
||||
"""
|
||||
|
||||
sql = f"""
|
||||
SELECT {", ".join(["ff." + col for col in feature_flag_columns])},
|
||||
({conditions_query}) AS conditions,
|
||||
({variants_query}) AS variants
|
||||
FROM feature_flags AS ff
|
||||
WHERE ff.feature_flag_id = %(feature_flag_id)s
|
||||
AND ff.project_id = %(project_id)s
|
||||
AND ff.deleted_at IS NULL;
|
||||
"""
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(sql, {"feature_flag_id": feature_flag_id, "project_id": project_id})
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
|
||||
if row is None:
|
||||
return {"errors": ["Feature flag not found"]}
|
||||
|
||||
row["created_at"] = TimeUTC.datetime_to_timestamp(row["created_at"])
|
||||
row["updated_at"] = TimeUTC.datetime_to_timestamp(row["updated_at"])
|
||||
|
||||
return {"data": helper.dict_to_camel_case(row)}
|
||||
|
||||
|
||||
def create_conditions(feature_flag_id: int, conditions: List[schemas.FeatureFlagCondition]) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Create new feature flag conditions and return their data.
|
||||
"""
|
||||
rows = []
|
||||
|
||||
# insert all conditions rows with single sql query
|
||||
if len(conditions) > 0:
|
||||
columns = (
|
||||
"feature_flag_id",
|
||||
"name",
|
||||
"rollout_percentage",
|
||||
"filters",
|
||||
)
|
||||
|
||||
sql = f"""
|
||||
INSERT INTO feature_flags_conditions
|
||||
(feature_flag_id, name, rollout_percentage, filters)
|
||||
VALUES {", ".join(["%s"] * len(conditions))}
|
||||
RETURNING condition_id, {", ".join(columns)}
|
||||
"""
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
params = [
|
||||
(feature_flag_id, c.name, c.rollout_percentage,
|
||||
json.dumps([filter_.model_dump() for filter_ in c.filters]))
|
||||
for c in conditions]
|
||||
query = cur.mogrify(sql, params)
|
||||
cur.execute(query)
|
||||
rows = cur.fetchall()
|
||||
|
||||
return rows
|
||||
|
||||
|
||||
def update_feature_flag(project_id: int, feature_flag_id: int,
|
||||
feature_flag: schemas.FeatureFlagSchema, user_id: int):
|
||||
"""
|
||||
Update an existing feature flag and return its updated data.
|
||||
"""
|
||||
validate_unique_flag_key(feature_flag_data=feature_flag, project_id=project_id, exclude_id=feature_flag_id)
|
||||
validate_multi_variant_flag(feature_flag_data=feature_flag)
|
||||
|
||||
columns = (
|
||||
"flag_key",
|
||||
"description",
|
||||
"flag_type",
|
||||
"is_persist",
|
||||
"is_active",
|
||||
"payload",
|
||||
"updated_by",
|
||||
)
|
||||
|
||||
params = {
|
||||
"updated_by": user_id,
|
||||
"feature_flag_id": feature_flag_id,
|
||||
"project_id": project_id,
|
||||
**feature_flag.model_dump(),
|
||||
"payload": json.dumps(feature_flag.payload),
|
||||
}
|
||||
|
||||
sql = f"""
|
||||
UPDATE feature_flags
|
||||
SET {", ".join(f"{column} = %({column})s" for column in columns)},
|
||||
updated_at = timezone('utc'::text, now())
|
||||
WHERE feature_flag_id = %(feature_flag_id)s AND project_id = %(project_id)s
|
||||
RETURNING feature_flag_id, {", ".join(columns)}, created_at, updated_at
|
||||
"""
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(sql, params)
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
|
||||
if row is None:
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Feature flag not found")
|
||||
|
||||
row["created_at"] = TimeUTC.datetime_to_timestamp(row["created_at"])
|
||||
row["updated_at"] = TimeUTC.datetime_to_timestamp(row["updated_at"])
|
||||
row['conditions'] = check_conditions(feature_flag_id, feature_flag.conditions)
|
||||
row['variants'] = check_variants(feature_flag_id, feature_flag.variants)
|
||||
|
||||
return {"data": helper.dict_to_camel_case(row)}
|
||||
|
||||
|
||||
def get_conditions(feature_flag_id: int):
|
||||
"""
|
||||
Get all conditions for a feature flag.
|
||||
"""
|
||||
sql = """
|
||||
SELECT
|
||||
condition_id,
|
||||
feature_flag_id,
|
||||
name,
|
||||
rollout_percentage,
|
||||
filters
|
||||
FROM feature_flags_conditions
|
||||
WHERE feature_flag_id = %(feature_flag_id)s
|
||||
ORDER BY condition_id;
|
||||
"""
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(sql, {"feature_flag_id": feature_flag_id})
|
||||
cur.execute(query)
|
||||
rows = cur.fetchall()
|
||||
|
||||
return rows
|
||||
|
||||
|
||||
def check_variants(feature_flag_id: int, variants: List[schemas.FeatureFlagVariant]) -> Any:
|
||||
existing_ids = [ev.get("variant_id") for ev in get_variants(feature_flag_id)]
|
||||
to_be_deleted = []
|
||||
to_be_updated = []
|
||||
to_be_created = []
|
||||
|
||||
for vid in existing_ids:
|
||||
if vid not in [v.variant_id for v in variants]:
|
||||
to_be_deleted.append(vid)
|
||||
|
||||
for variant in variants:
|
||||
if variant.variant_id is None:
|
||||
to_be_created.append(variant)
|
||||
else:
|
||||
to_be_updated.append(variant)
|
||||
|
||||
if len(to_be_created) > 0:
|
||||
create_variants(feature_flag_id=feature_flag_id, variants=to_be_created)
|
||||
|
||||
if len(to_be_updated) > 0:
|
||||
update_variants(feature_flag_id=feature_flag_id, variants=to_be_updated)
|
||||
|
||||
if len(to_be_deleted) > 0:
|
||||
delete_variants(feature_flag_id=feature_flag_id, ids=to_be_deleted)
|
||||
|
||||
return get_variants(feature_flag_id)
|
||||
|
||||
|
||||
def get_variants(feature_flag_id: int):
|
||||
sql = """
|
||||
SELECT
|
||||
variant_id,
|
||||
feature_flag_id,
|
||||
value,
|
||||
payload,
|
||||
rollout_percentage
|
||||
FROM feature_flags_variants
|
||||
WHERE feature_flag_id = %(feature_flag_id)s
|
||||
ORDER BY variant_id;
|
||||
"""
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(sql, {"feature_flag_id": feature_flag_id})
|
||||
cur.execute(query)
|
||||
rows = cur.fetchall()
|
||||
|
||||
return rows
|
||||
|
||||
|
||||
def create_variants(feature_flag_id: int, variants: List[schemas.FeatureFlagVariant]) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Create new feature flag variants and return their data.
|
||||
"""
|
||||
rows = []
|
||||
|
||||
# insert all variants rows with single sql query
|
||||
if len(variants) > 0:
|
||||
columns = (
|
||||
"feature_flag_id",
|
||||
"value",
|
||||
"description",
|
||||
"payload",
|
||||
"rollout_percentage",
|
||||
)
|
||||
|
||||
sql = f"""
|
||||
INSERT INTO feature_flags_variants
|
||||
(feature_flag_id, value, description, payload, rollout_percentage)
|
||||
VALUES {", ".join(["%s"] * len(variants))}
|
||||
RETURNING variant_id, {", ".join(columns)}
|
||||
"""
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
params = [(feature_flag_id, v.value, v.description, json.dumps(v.payload), v.rollout_percentage) for v in
|
||||
variants]
|
||||
query = cur.mogrify(sql, params)
|
||||
cur.execute(query)
|
||||
rows = cur.fetchall()
|
||||
|
||||
return rows
|
||||
|
||||
|
||||
def update_variants(feature_flag_id: int, variants: List[schemas.FeatureFlagVariant]) -> Any:
|
||||
"""
|
||||
Update existing feature flag variants and return their updated data.
|
||||
"""
|
||||
values = []
|
||||
params = {
|
||||
"feature_flag_id": feature_flag_id,
|
||||
}
|
||||
for i in range(len(variants)):
|
||||
values.append(f"(%(variant_id_{i})s, %(value_{i})s, %(rollout_percentage_{i})s, %(payload_{i})s::jsonb)")
|
||||
params[f"variant_id_{i}"] = variants[i].variant_id
|
||||
params[f"value_{i}"] = variants[i].value
|
||||
params[f"rollout_percentage_{i}"] = variants[i].rollout_percentage
|
||||
params[f"payload_{i}"] = json.dumps(variants[i].payload)
|
||||
|
||||
sql = f"""
|
||||
UPDATE feature_flags_variants
|
||||
SET value = c.value, rollout_percentage = c.rollout_percentage, payload = c.payload
|
||||
FROM (VALUES {','.join(values)}) AS c(variant_id, value, rollout_percentage, payload)
|
||||
WHERE c.variant_id = feature_flags_variants.variant_id AND feature_flag_id = %(feature_flag_id)s;
|
||||
"""
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(sql, params)
|
||||
cur.execute(query)
|
||||
|
||||
|
||||
def delete_variants(feature_flag_id: int, ids: List[int]) -> None:
|
||||
"""
|
||||
Delete existing feature flag variants and return their data.
|
||||
"""
|
||||
sql = """
|
||||
DELETE FROM feature_flags_variants
|
||||
WHERE variant_id IN %(ids)s
|
||||
AND feature_flag_id= %(feature_flag_id)s;
|
||||
"""
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(sql, {"feature_flag_id": feature_flag_id, "ids": tuple(ids)})
|
||||
cur.execute(query)
|
||||
|
||||
|
||||
def check_conditions(feature_flag_id: int, conditions: List[schemas.FeatureFlagCondition]) -> Any:
|
||||
existing_ids = [ec.get("condition_id") for ec in get_conditions(feature_flag_id)]
|
||||
to_be_deleted = []
|
||||
to_be_updated = []
|
||||
to_be_created = []
|
||||
|
||||
for cid in existing_ids:
|
||||
if cid not in [c.condition_id for c in conditions]:
|
||||
to_be_deleted.append(cid)
|
||||
|
||||
for condition in conditions:
|
||||
if condition.condition_id is None:
|
||||
to_be_created.append(condition)
|
||||
else:
|
||||
to_be_updated.append(condition)
|
||||
|
||||
if len(to_be_created) > 0:
|
||||
create_conditions(feature_flag_id=feature_flag_id, conditions=to_be_created)
|
||||
|
||||
if len(to_be_updated) > 0:
|
||||
update_conditions(feature_flag_id=feature_flag_id, conditions=to_be_updated)
|
||||
|
||||
if len(to_be_deleted) > 0:
|
||||
delete_conditions(feature_flag_id=feature_flag_id, ids=to_be_deleted)
|
||||
|
||||
return get_conditions(feature_flag_id)
|
||||
|
||||
|
||||
def update_conditions(feature_flag_id: int, conditions: List[schemas.FeatureFlagCondition]) -> Any:
|
||||
"""
|
||||
Update existing feature flag conditions and return their updated data.
|
||||
"""
|
||||
values = []
|
||||
params = {
|
||||
"feature_flag_id": feature_flag_id,
|
||||
}
|
||||
for i in range(len(conditions)):
|
||||
values.append(f"(%(condition_id_{i})s, %(name_{i})s, %(rollout_percentage_{i})s, %(filters_{i})s::jsonb)")
|
||||
params[f"condition_id_{i}"] = conditions[i].condition_id
|
||||
params[f"name_{i}"] = conditions[i].name
|
||||
params[f"rollout_percentage_{i}"] = conditions[i].rollout_percentage
|
||||
params[f"filters_{i}"] = json.dumps(conditions[i].filters)
|
||||
|
||||
sql = f"""
|
||||
UPDATE feature_flags_conditions
|
||||
SET name = c.name, rollout_percentage = c.rollout_percentage, filters = c.filters
|
||||
FROM (VALUES {','.join(values)}) AS c(condition_id, name, rollout_percentage, filters)
|
||||
WHERE c.condition_id = feature_flags_conditions.condition_id AND feature_flag_id = %(feature_flag_id)s;
|
||||
"""
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(sql, params)
|
||||
cur.execute(query)
|
||||
|
||||
|
||||
def delete_conditions(feature_flag_id: int, ids: List[int]) -> None:
|
||||
"""
|
||||
Delete feature flag conditions.
|
||||
"""
|
||||
sql = """
|
||||
DELETE FROM feature_flags_conditions
|
||||
WHERE condition_id IN %(ids)s
|
||||
AND feature_flag_id= %(feature_flag_id)s;
|
||||
"""
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(sql, {"feature_flag_id": feature_flag_id, "ids": tuple(ids)})
|
||||
cur.execute(query)
|
||||
|
||||
|
||||
def delete_feature_flag(project_id: int, feature_flag_id: int):
|
||||
"""
|
||||
Delete a feature flag.
|
||||
"""
|
||||
conditions = [
|
||||
"project_id=%(project_id)s",
|
||||
"feature_flags.feature_flag_id=%(feature_flag_id)s"
|
||||
]
|
||||
params = {"project_id": project_id, "feature_flag_id": feature_flag_id}
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(f"""UPDATE feature_flags
|
||||
SET deleted_at= (now() at time zone 'utc'), is_active=false
|
||||
WHERE {" AND ".join(conditions)};""", params)
|
||||
cur.execute(query)
|
||||
|
||||
return {"state": "success"}
|
||||
68
api/chalicelib/core/funnels.py
Normal file
68
api/chalicelib/core/funnels.py
Normal file
|
|
@ -0,0 +1,68 @@
|
|||
from typing import List
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import significance
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils import sql_helper as sh
|
||||
|
||||
|
||||
def filter_stages(stages: List[schemas._SessionSearchEventSchema]):
|
||||
ALLOW_TYPES = [schemas.EventType.click, schemas.EventType.input,
|
||||
schemas.EventType.location, schemas.EventType.custom,
|
||||
schemas.EventType.click_ios, schemas.EventType.input_ios,
|
||||
schemas.EventType.view_ios, schemas.EventType.custom_ios, ]
|
||||
return [s for s in stages if s.type in ALLOW_TYPES and s.value is not None]
|
||||
|
||||
|
||||
def __parse_events(f_events: List[dict]):
|
||||
return [schemas._SessionSearchEventSchema.parse_obj(e) for e in f_events]
|
||||
|
||||
|
||||
def __fix_stages(f_events: List[schemas._SessionSearchEventSchema]):
|
||||
if f_events is None:
|
||||
return
|
||||
events = []
|
||||
for e in f_events:
|
||||
if e.operator is None:
|
||||
e.operator = schemas.SearchEventOperator._is
|
||||
|
||||
if not isinstance(e.value, list):
|
||||
e.value = [e.value]
|
||||
is_any = sh.isAny_opreator(e.operator)
|
||||
if not is_any and isinstance(e.value, list) and len(e.value) == 0:
|
||||
continue
|
||||
events.append(e)
|
||||
return events
|
||||
|
||||
|
||||
# def get_top_insights_on_the_fly_widget(project_id, data: schemas.FunnelInsightsPayloadSchema):
|
||||
def get_top_insights_on_the_fly_widget(project_id, data: schemas.CardSeriesFilterSchema):
|
||||
data.events = filter_stages(__parse_events(data.events))
|
||||
data.events = __fix_stages(data.events)
|
||||
if len(data.events) == 0:
|
||||
return {"stages": [], "totalDropDueToIssues": 0}
|
||||
insights, total_drop_due_to_issues = significance.get_top_insights(filter_d=data.dict(), project_id=project_id)
|
||||
insights = helper.list_to_camel_case(insights)
|
||||
if len(insights) > 0:
|
||||
# TODO: check if this correct
|
||||
if total_drop_due_to_issues > insights[0]["sessionsCount"]:
|
||||
if len(insights) == 0:
|
||||
total_drop_due_to_issues = 0
|
||||
else:
|
||||
total_drop_due_to_issues = insights[0]["sessionsCount"] - insights[-1]["sessionsCount"]
|
||||
insights[-1]["dropDueToIssues"] = total_drop_due_to_issues
|
||||
return {"stages": insights,
|
||||
"totalDropDueToIssues": total_drop_due_to_issues}
|
||||
|
||||
|
||||
# def get_issues_on_the_fly_widget(project_id, data: schemas.FunnelSearchPayloadSchema):
|
||||
def get_issues_on_the_fly_widget(project_id, data: schemas.CardSeriesFilterSchema):
|
||||
data.events = filter_stages(data.events)
|
||||
data.events = __fix_stages(data.events)
|
||||
if len(data.events) < 0:
|
||||
return {"issues": []}
|
||||
|
||||
return {
|
||||
"issues": helper.dict_to_camel_case(
|
||||
significance.get_issues_list(filter_d=data.dict(), project_id=project_id, first_stage=1,
|
||||
last_stage=len(data.events)))}
|
||||
|
|
@ -1,330 +0,0 @@
|
|||
import logging
|
||||
|
||||
import redis
|
||||
import requests
|
||||
from decouple import config
|
||||
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def app_connection_string(name, port, path):
|
||||
namespace = config("POD_NAMESPACE", default="app")
|
||||
conn_string = config("CLUSTER_URL", default="svc.cluster.local")
|
||||
return f"http://{name}.{namespace}.{conn_string}:{port}/{path}"
|
||||
|
||||
|
||||
HEALTH_ENDPOINTS = {
|
||||
"alerts": app_connection_string("alerts-openreplay", 8888, "health"),
|
||||
"assets": app_connection_string("assets-openreplay", 8888, "metrics"),
|
||||
"assist": app_connection_string("assist-openreplay", 8888, "health"),
|
||||
"chalice": app_connection_string("chalice-openreplay", 8888, "metrics"),
|
||||
"db": app_connection_string("db-openreplay", 8888, "metrics"),
|
||||
"ender": app_connection_string("ender-openreplay", 8888, "metrics"),
|
||||
"heuristics": app_connection_string("heuristics-openreplay", 8888, "metrics"),
|
||||
"http": app_connection_string("http-openreplay", 8888, "metrics"),
|
||||
"ingress-nginx": app_connection_string("ingress-nginx-openreplay", 80, "healthz"),
|
||||
"integrations": app_connection_string("integrations-openreplay", 8888, "metrics"),
|
||||
"sink": app_connection_string("sink-openreplay", 8888, "metrics"),
|
||||
"sourcemapreader": app_connection_string(
|
||||
"sourcemapreader-openreplay", 8888, "health"
|
||||
),
|
||||
"storage": app_connection_string("storage-openreplay", 8888, "metrics"),
|
||||
}
|
||||
|
||||
|
||||
def __check_database_pg(*_):
|
||||
fail_response = {
|
||||
"health": False,
|
||||
"details": {"errors": ["Postgres health-check failed"]},
|
||||
}
|
||||
with pg_client.PostgresClient() as cur:
|
||||
try:
|
||||
cur.execute("SHOW server_version;")
|
||||
# server_version = cur.fetchone()
|
||||
except Exception as e:
|
||||
logger.error("!! health failed: postgres not responding")
|
||||
logger.exception(e)
|
||||
return fail_response
|
||||
try:
|
||||
cur.execute("SELECT openreplay_version() AS version;")
|
||||
# schema_version = cur.fetchone()
|
||||
except Exception as e:
|
||||
logger.error("!! health failed: openreplay_version not defined")
|
||||
logger.exception(e)
|
||||
return fail_response
|
||||
return {
|
||||
"health": True,
|
||||
"details": {
|
||||
# "version": server_version["server_version"],
|
||||
# "schema": schema_version["version"]
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def __always_healthy(*_):
|
||||
return {"health": True, "details": {}}
|
||||
|
||||
|
||||
def __check_be_service(service_name):
|
||||
def fn(*_):
|
||||
fail_response = {
|
||||
"health": False,
|
||||
"details": {"errors": ["server health-check failed"]},
|
||||
}
|
||||
try:
|
||||
results = requests.get(HEALTH_ENDPOINTS.get(service_name), timeout=2)
|
||||
if results.status_code != 200:
|
||||
logger.error(
|
||||
f"!! issue with the {service_name}-health code:{results.status_code}"
|
||||
)
|
||||
logger.error(results.text)
|
||||
# fail_response["details"]["errors"].append(results.text)
|
||||
return fail_response
|
||||
except requests.exceptions.Timeout:
|
||||
logger.error(f"!! Timeout getting {service_name}-health")
|
||||
# fail_response["details"]["errors"].append("timeout")
|
||||
return fail_response
|
||||
except Exception as e:
|
||||
logger.error(f"!! Issue getting {service_name}-health response")
|
||||
logger.exception(e)
|
||||
try:
|
||||
logger.error(results.text)
|
||||
# fail_response["details"]["errors"].append(results.text)
|
||||
except Exception:
|
||||
logger.error("couldn't get response")
|
||||
# fail_response["details"]["errors"].append(str(e))
|
||||
return fail_response
|
||||
return {"health": True, "details": {}}
|
||||
|
||||
return fn
|
||||
|
||||
|
||||
def __check_redis(*_):
|
||||
fail_response = {
|
||||
"health": False,
|
||||
"details": {"errors": ["server health-check failed"]},
|
||||
}
|
||||
if config("REDIS_STRING", default=None) is None:
|
||||
# fail_response["details"]["errors"].append("REDIS_STRING not defined in env-vars")
|
||||
return fail_response
|
||||
|
||||
try:
|
||||
r = redis.from_url(config("REDIS_STRING"), socket_timeout=2)
|
||||
r.ping()
|
||||
except Exception as e:
|
||||
logger.error("!! Issue getting redis-health response")
|
||||
logger.exception(e)
|
||||
# fail_response["details"]["errors"].append(str(e))
|
||||
return fail_response
|
||||
|
||||
return {
|
||||
"health": True,
|
||||
"details": {
|
||||
# "version": r.execute_command('INFO')['redis_version']
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def __check_SSL(*_):
|
||||
fail_response = {
|
||||
"health": False,
|
||||
"details": {"errors": ["SSL Certificate health-check failed"]},
|
||||
}
|
||||
try:
|
||||
requests.get(config("SITE_URL"), verify=True, allow_redirects=True)
|
||||
except Exception as e:
|
||||
logger.error("!! health failed: SSL Certificate")
|
||||
logger.exception(e)
|
||||
return fail_response
|
||||
return {"health": True, "details": {}}
|
||||
|
||||
|
||||
def __get_sessions_stats(*_):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
constraints = ["projects.deleted_at IS NULL"]
|
||||
query = cur.mogrify(
|
||||
f"""SELECT COALESCE(SUM(sessions_count),0) AS s_c,
|
||||
COALESCE(SUM(events_count),0) AS e_c
|
||||
FROM public.projects_stats
|
||||
INNER JOIN public.projects USING(project_id)
|
||||
WHERE {" AND ".join(constraints)};"""
|
||||
)
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
return {"numberOfSessionsCaptured": row["s_c"], "numberOfEventCaptured": row["e_c"]}
|
||||
|
||||
|
||||
def get_health(tenant_id=None):
|
||||
health_map = {
|
||||
"databases": {"postgres": __check_database_pg},
|
||||
"ingestionPipeline": {"redis": __check_redis},
|
||||
"backendServices": {
|
||||
"alerts": __check_be_service("alerts"),
|
||||
"assets": __check_be_service("assets"),
|
||||
"assist": __check_be_service("assist"),
|
||||
"chalice": __always_healthy,
|
||||
"db": __check_be_service("db"),
|
||||
"ender": __check_be_service("ender"),
|
||||
"frontend": __always_healthy,
|
||||
"heuristics": __check_be_service("heuristics"),
|
||||
"http": __check_be_service("http"),
|
||||
"ingress-nginx": __always_healthy,
|
||||
"integrations": __check_be_service("integrations"),
|
||||
"sink": __check_be_service("sink"),
|
||||
"sourcemapreader": __check_be_service("sourcemapreader"),
|
||||
"storage": __check_be_service("storage"),
|
||||
},
|
||||
"details": __get_sessions_stats,
|
||||
"ssl": __check_SSL,
|
||||
}
|
||||
return __process_health(health_map=health_map)
|
||||
|
||||
|
||||
def __process_health(health_map):
|
||||
response = dict(health_map)
|
||||
for parent_key in health_map.keys():
|
||||
if config(f"SKIP_H_{parent_key.upper()}", cast=bool, default=False):
|
||||
response.pop(parent_key)
|
||||
elif isinstance(health_map[parent_key], dict):
|
||||
for element_key in health_map[parent_key]:
|
||||
if config(
|
||||
f"SKIP_H_{parent_key.upper()}_{element_key.upper()}",
|
||||
cast=bool,
|
||||
default=False,
|
||||
):
|
||||
response[parent_key].pop(element_key)
|
||||
else:
|
||||
response[parent_key][element_key] = health_map[parent_key][
|
||||
element_key
|
||||
]()
|
||||
else:
|
||||
response[parent_key] = health_map[parent_key]()
|
||||
return response
|
||||
|
||||
|
||||
def cron():
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
"""SELECT projects.project_id,
|
||||
projects.created_at,
|
||||
projects.sessions_last_check_at,
|
||||
projects.first_recorded_session_at,
|
||||
projects_stats.last_update_at
|
||||
FROM public.projects
|
||||
LEFT JOIN public.projects_stats USING (project_id)
|
||||
WHERE projects.deleted_at IS NULL
|
||||
ORDER BY project_id;"""
|
||||
)
|
||||
cur.execute(query)
|
||||
rows = cur.fetchall()
|
||||
for r in rows:
|
||||
insert = False
|
||||
if r["last_update_at"] is None:
|
||||
# never counted before, must insert
|
||||
insert = True
|
||||
if r["first_recorded_session_at"] is None:
|
||||
if r["sessions_last_check_at"] is None:
|
||||
count_start_from = r["created_at"]
|
||||
else:
|
||||
count_start_from = r["sessions_last_check_at"]
|
||||
else:
|
||||
count_start_from = r["first_recorded_session_at"]
|
||||
|
||||
else:
|
||||
# counted before, must update
|
||||
count_start_from = r["last_update_at"]
|
||||
|
||||
count_start_from = TimeUTC.datetime_to_timestamp(count_start_from)
|
||||
params = {
|
||||
"project_id": r["project_id"],
|
||||
"start_ts": count_start_from,
|
||||
"end_ts": TimeUTC.now(),
|
||||
"sessions_count": 0,
|
||||
"events_count": 0,
|
||||
}
|
||||
|
||||
query = cur.mogrify(
|
||||
"""SELECT COUNT(1) AS sessions_count,
|
||||
COALESCE(SUM(events_count),0) AS events_count
|
||||
FROM public.sessions
|
||||
WHERE project_id=%(project_id)s
|
||||
AND start_ts>=%(start_ts)s
|
||||
AND start_ts<=%(end_ts)s
|
||||
AND duration IS NOT NULL;""",
|
||||
params,
|
||||
)
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
if row is not None:
|
||||
params["sessions_count"] = row["sessions_count"]
|
||||
params["events_count"] = row["events_count"]
|
||||
|
||||
if insert:
|
||||
query = cur.mogrify(
|
||||
"""INSERT INTO public.projects_stats(project_id, sessions_count, events_count, last_update_at)
|
||||
VALUES (%(project_id)s, %(sessions_count)s, %(events_count)s, (now() AT TIME ZONE 'utc'::text));""",
|
||||
params,
|
||||
)
|
||||
else:
|
||||
query = cur.mogrify(
|
||||
"""UPDATE public.projects_stats
|
||||
SET sessions_count=sessions_count+%(sessions_count)s,
|
||||
events_count=events_count+%(events_count)s,
|
||||
last_update_at=(now() AT TIME ZONE 'utc'::text)
|
||||
WHERE project_id=%(project_id)s;""",
|
||||
params,
|
||||
)
|
||||
cur.execute(query)
|
||||
|
||||
|
||||
# this cron is used to correct the sessions&events count every week
|
||||
def weekly_cron():
|
||||
with pg_client.PostgresClient(long_query=True) as cur:
|
||||
query = cur.mogrify(
|
||||
"""SELECT project_id,
|
||||
projects_stats.last_update_at
|
||||
FROM public.projects
|
||||
LEFT JOIN public.projects_stats USING (project_id)
|
||||
WHERE projects.deleted_at IS NULL
|
||||
ORDER BY project_id;"""
|
||||
)
|
||||
cur.execute(query)
|
||||
rows = cur.fetchall()
|
||||
for r in rows:
|
||||
if r["last_update_at"] is None:
|
||||
continue
|
||||
|
||||
params = {
|
||||
"project_id": r["project_id"],
|
||||
"end_ts": TimeUTC.now(),
|
||||
"sessions_count": 0,
|
||||
"events_count": 0,
|
||||
}
|
||||
|
||||
query = cur.mogrify(
|
||||
"""SELECT COUNT(1) AS sessions_count,
|
||||
COALESCE(SUM(events_count),0) AS events_count
|
||||
FROM public.sessions
|
||||
WHERE project_id=%(project_id)s
|
||||
AND start_ts<=%(end_ts)s
|
||||
AND duration IS NOT NULL;""",
|
||||
params,
|
||||
)
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
if row is not None:
|
||||
params["sessions_count"] = row["sessions_count"]
|
||||
params["events_count"] = row["events_count"]
|
||||
|
||||
query = cur.mogrify(
|
||||
"""UPDATE public.projects_stats
|
||||
SET sessions_count=%(sessions_count)s,
|
||||
events_count=%(events_count)s,
|
||||
last_update_at=(now() AT TIME ZONE 'utc'::text)
|
||||
WHERE project_id=%(project_id)s;""",
|
||||
params,
|
||||
)
|
||||
cur.execute(query)
|
||||
82
api/chalicelib/core/heatmaps.py
Normal file
82
api/chalicelib/core/heatmaps.py
Normal file
|
|
@ -0,0 +1,82 @@
|
|||
from chalicelib.utils import sql_helper as sh
|
||||
import schemas
|
||||
from chalicelib.utils import helper, pg_client
|
||||
|
||||
|
||||
def get_by_url(project_id, data: schemas.GetHeatmapPayloadSchema):
|
||||
args = {"startDate": data.startDate, "endDate": data.endDate,
|
||||
"project_id": project_id, "url": data.url}
|
||||
constraints = ["sessions.project_id = %(project_id)s",
|
||||
"(url = %(url)s OR path= %(url)s)",
|
||||
"clicks.timestamp >= %(startDate)s",
|
||||
"clicks.timestamp <= %(endDate)s",
|
||||
"start_ts >= %(startDate)s",
|
||||
"start_ts <= %(endDate)s",
|
||||
"duration IS NOT NULL"]
|
||||
query_from = "events.clicks INNER JOIN sessions USING (session_id)"
|
||||
q_count = "count(1) AS count"
|
||||
has_click_rage_filter = False
|
||||
if len(data.filters) > 0:
|
||||
for i, f in enumerate(data.filters):
|
||||
if f.type == schemas.FilterType.issue and len(f.value) > 0:
|
||||
has_click_rage_filter = True
|
||||
q_count = "max(real_count) AS count,TRUE AS click_rage"
|
||||
query_from += """INNER JOIN events_common.issues USING (timestamp, session_id)
|
||||
INNER JOIN issues AS mis USING (issue_id)
|
||||
INNER JOIN LATERAL (
|
||||
SELECT COUNT(1) AS real_count
|
||||
FROM events.clicks AS sc
|
||||
INNER JOIN sessions as ss USING (session_id)
|
||||
WHERE ss.project_id = 2
|
||||
AND (sc.url = %(url)s OR sc.path = %(url)s)
|
||||
AND sc.timestamp >= %(startDate)s
|
||||
AND sc.timestamp <= %(endDate)s
|
||||
AND ss.start_ts >= %(startDate)s
|
||||
AND ss.start_ts <= %(endDate)s
|
||||
AND sc.selector = clicks.selector) AS r_clicks ON (TRUE)"""
|
||||
constraints += ["mis.project_id = %(project_id)s",
|
||||
"issues.timestamp >= %(startDate)s",
|
||||
"issues.timestamp <= %(endDate)s"]
|
||||
f_k = f"issue_value{i}"
|
||||
args = {**args, **sh.multi_values(f.value, value_key=f_k)}
|
||||
constraints.append(sh.multi_conditions(f"%({f_k})s = ANY (issue_types)",
|
||||
f.value, value_key=f_k))
|
||||
constraints.append(sh.multi_conditions(f"mis.type = %({f_k})s",
|
||||
f.value, value_key=f_k))
|
||||
if len(f.filters) > 0:
|
||||
for j, sf in enumerate(f.filters):
|
||||
f_k = f"issue_svalue{i}{j}"
|
||||
args = {**args, **sh.multi_values(sf.value, value_key=f_k)}
|
||||
if sf.type == schemas.IssueFilterType._on_selector and len(sf.value) > 0:
|
||||
constraints.append(sh.multi_conditions(f"clicks.selector = %({f_k})s",
|
||||
sf.value, value_key=f_k))
|
||||
|
||||
if data.click_rage and not has_click_rage_filter:
|
||||
constraints.append("""(issues.session_id IS NULL
|
||||
OR (issues.timestamp >= %(startDate)s
|
||||
AND issues.timestamp <= %(endDate)s
|
||||
AND mis.project_id = %(project_id)s
|
||||
AND mis.type = 'click_rage'))""")
|
||||
q_count += ",COALESCE(bool_or(mis.issue_id IS NOT NULL), FALSE) AS click_rage"
|
||||
query_from += """LEFT JOIN events_common.issues USING (timestamp, session_id)
|
||||
LEFT JOIN issues AS mis USING (issue_id)"""
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(f"""SELECT selector, {q_count}
|
||||
FROM {query_from}
|
||||
WHERE {" AND ".join(constraints)}
|
||||
GROUP BY selector
|
||||
LIMIT 500;""", args)
|
||||
# print("---------")
|
||||
# print(query.decode('UTF-8'))
|
||||
# print("---------")
|
||||
try:
|
||||
cur.execute(query)
|
||||
except Exception as err:
|
||||
print("--------- HEATMAP SEARCH QUERY EXCEPTION -----------")
|
||||
print(query.decode('UTF-8'))
|
||||
print("--------- PAYLOAD -----------")
|
||||
print(data)
|
||||
print("--------------------")
|
||||
raise err
|
||||
rows = cur.fetchall()
|
||||
return helper.list_to_camel_case(rows)
|
||||
926
api/chalicelib/core/insights.py
Normal file
926
api/chalicelib/core/insights.py
Normal file
|
|
@ -0,0 +1,926 @@
|
|||
import schemas
|
||||
from chalicelib.core.metrics import __get_constraints, __get_constraint_values
|
||||
from chalicelib.utils import helper, dev
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
|
||||
def __transform_journey(rows):
|
||||
nodes = []
|
||||
links = []
|
||||
for r in rows:
|
||||
source = r["source_event"][r["source_event"].index("_") + 1:]
|
||||
target = r["target_event"][r["target_event"].index("_") + 1:]
|
||||
if source not in nodes:
|
||||
nodes.append(source)
|
||||
if target not in nodes:
|
||||
nodes.append(target)
|
||||
links.append({"source": nodes.index(source), "target": nodes.index(target), "value": r["value"]})
|
||||
return {"nodes": nodes, "links": sorted(links, key=lambda x: x["value"], reverse=True)}
|
||||
|
||||
|
||||
JOURNEY_DEPTH = 5
|
||||
JOURNEY_TYPES = {
|
||||
"PAGES": {"table": "events.pages", "column": "path", "table_id": "message_id"},
|
||||
"CLICK": {"table": "events.clicks", "column": "label", "table_id": "message_id"},
|
||||
# "VIEW": {"table": "events_ios.views", "column": "name", "table_id": "seq_index"}, TODO: enable this for SAAS only
|
||||
"EVENT": {"table": "events_common.customs", "column": "name", "table_id": "seq_index"}
|
||||
}
|
||||
|
||||
|
||||
def journey(project_id, startTimestamp=TimeUTC.now(delta_days=-1), endTimestamp=TimeUTC.now(), filters=[], **args):
|
||||
pg_sub_query_subset = __get_constraints(project_id=project_id, data=args, duration=True, main_table="sessions",
|
||||
time_constraint=True)
|
||||
event_start = None
|
||||
event_table = JOURNEY_TYPES["PAGES"]["table"]
|
||||
event_column = JOURNEY_TYPES["PAGES"]["column"]
|
||||
event_table_id = JOURNEY_TYPES["PAGES"]["table_id"]
|
||||
extra_values = {}
|
||||
for f in filters:
|
||||
if f["type"] == "START_POINT":
|
||||
event_start = f["value"]
|
||||
elif f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]):
|
||||
event_table = JOURNEY_TYPES[f["value"]]["table"]
|
||||
event_column = JOURNEY_TYPES[f["value"]]["column"]
|
||||
elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
|
||||
pg_sub_query_subset.append(f"sessions.user_id = %(user_id)s")
|
||||
extra_values["user_id"] = f["value"]
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = f"""SELECT source_event,
|
||||
target_event,
|
||||
count(*) AS value
|
||||
|
||||
FROM (SELECT event_number || '_' || value as target_event,
|
||||
LAG(event_number || '_' || value, 1) OVER ( PARTITION BY session_rank ) AS source_event
|
||||
FROM (SELECT value,
|
||||
session_rank,
|
||||
message_id,
|
||||
ROW_NUMBER() OVER ( PARTITION BY session_rank ORDER BY timestamp ) AS event_number
|
||||
|
||||
{f"FROM (SELECT * FROM (SELECT *, MIN(mark) OVER ( PARTITION BY session_id , session_rank ORDER BY timestamp ) AS max FROM (SELECT *, CASE WHEN value = %(event_start)s THEN timestamp ELSE NULL END as mark"
|
||||
if event_start else ""}
|
||||
|
||||
FROM (SELECT session_id,
|
||||
message_id,
|
||||
timestamp,
|
||||
value,
|
||||
SUM(new_session) OVER (ORDER BY session_id, timestamp) AS session_rank
|
||||
FROM (SELECT *,
|
||||
CASE
|
||||
WHEN source_timestamp IS NULL THEN 1
|
||||
ELSE 0 END AS new_session
|
||||
FROM (SELECT session_id,
|
||||
{event_table_id} AS message_id,
|
||||
timestamp,
|
||||
{event_column} AS value,
|
||||
LAG(timestamp)
|
||||
OVER (PARTITION BY session_id ORDER BY timestamp) AS source_timestamp
|
||||
FROM {event_table} INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query_subset)}
|
||||
) AS related_events) AS ranked_events) AS processed
|
||||
{") AS marked) AS maxed WHERE timestamp >= max) AS filtered" if event_start else ""}
|
||||
) AS sorted_events
|
||||
WHERE event_number <= %(JOURNEY_DEPTH)s) AS final
|
||||
WHERE source_event IS NOT NULL
|
||||
and target_event IS NOT NULL
|
||||
GROUP BY source_event, target_event
|
||||
ORDER BY value DESC
|
||||
LIMIT 20;"""
|
||||
params = {"project_id": project_id, "startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, "event_start": event_start, "JOURNEY_DEPTH": JOURNEY_DEPTH,
|
||||
**__get_constraint_values(args), **extra_values}
|
||||
# print(cur.mogrify(pg_query, params))
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
rows = cur.fetchall()
|
||||
|
||||
return __transform_journey(rows)
|
||||
|
||||
|
||||
def __compute_weekly_percentage(rows):
|
||||
if rows is None or len(rows) == 0:
|
||||
return rows
|
||||
t = -1
|
||||
for r in rows:
|
||||
if r["week"] == 0:
|
||||
t = r["usersCount"]
|
||||
r["percentage"] = r["usersCount"] / t
|
||||
return rows
|
||||
|
||||
|
||||
def __complete_retention(rows, start_date, end_date=None):
|
||||
if rows is None:
|
||||
return []
|
||||
max_week = 10
|
||||
for i in range(max_week):
|
||||
if end_date is not None and start_date + i * TimeUTC.MS_WEEK >= end_date:
|
||||
break
|
||||
neutral = {
|
||||
"firstConnexionWeek": start_date,
|
||||
"week": i,
|
||||
"usersCount": 0,
|
||||
"connectedUsers": [],
|
||||
"percentage": 0
|
||||
}
|
||||
if i < len(rows) \
|
||||
and i != rows[i]["week"]:
|
||||
rows.insert(i, neutral)
|
||||
elif i >= len(rows):
|
||||
rows.append(neutral)
|
||||
return rows
|
||||
|
||||
|
||||
def __complete_acquisition(rows, start_date, end_date=None):
|
||||
if rows is None:
|
||||
return []
|
||||
max_week = 10
|
||||
week = 0
|
||||
delta_date = 0
|
||||
while max_week > 0:
|
||||
start_date += TimeUTC.MS_WEEK
|
||||
if end_date is not None and start_date >= end_date:
|
||||
break
|
||||
delta = 0
|
||||
if delta_date + week >= len(rows) \
|
||||
or delta_date + week < len(rows) and rows[delta_date + week]["firstConnexionWeek"] > start_date:
|
||||
for i in range(max_week):
|
||||
if end_date is not None and start_date + i * TimeUTC.MS_WEEK >= end_date:
|
||||
break
|
||||
|
||||
neutral = {
|
||||
"firstConnexionWeek": start_date,
|
||||
"week": i,
|
||||
"usersCount": 0,
|
||||
"connectedUsers": [],
|
||||
"percentage": 0
|
||||
}
|
||||
rows.insert(delta_date + week + i, neutral)
|
||||
delta = i
|
||||
else:
|
||||
for i in range(max_week):
|
||||
if end_date is not None and start_date + i * TimeUTC.MS_WEEK >= end_date:
|
||||
break
|
||||
|
||||
neutral = {
|
||||
"firstConnexionWeek": start_date,
|
||||
"week": i,
|
||||
"usersCount": 0,
|
||||
"connectedUsers": [],
|
||||
"percentage": 0
|
||||
}
|
||||
if delta_date + week + i < len(rows) \
|
||||
and i != rows[delta_date + week + i]["week"]:
|
||||
rows.insert(delta_date + week + i, neutral)
|
||||
elif delta_date + week + i >= len(rows):
|
||||
rows.append(neutral)
|
||||
delta = i
|
||||
week += delta
|
||||
max_week -= 1
|
||||
delta_date += 1
|
||||
return rows
|
||||
|
||||
|
||||
def users_retention(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(), filters=[],
|
||||
**args):
|
||||
startTimestamp = TimeUTC.trunc_week(startTimestamp)
|
||||
endTimestamp = startTimestamp + 10 * TimeUTC.MS_WEEK
|
||||
pg_sub_query = __get_constraints(project_id=project_id, data=args, duration=True, main_table="sessions",
|
||||
time_constraint=True)
|
||||
pg_sub_query.append("user_id IS NOT NULL")
|
||||
pg_sub_query.append("DATE_TRUNC('week', to_timestamp(start_ts / 1000)) = to_timestamp(%(startTimestamp)s / 1000)")
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = f"""SELECT FLOOR(DATE_PART('day', connexion_week - DATE_TRUNC('week', to_timestamp(%(startTimestamp)s / 1000)::timestamp)) / 7)::integer AS week,
|
||||
COUNT(DISTINCT connexions_list.user_id) AS users_count,
|
||||
ARRAY_AGG(DISTINCT connexions_list.user_id) AS connected_users
|
||||
FROM (SELECT DISTINCT user_id
|
||||
FROM sessions
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
AND DATE_PART('week', to_timestamp((sessions.start_ts - %(startTimestamp)s)/1000)) = 1
|
||||
AND NOT EXISTS((SELECT 1
|
||||
FROM sessions AS bsess
|
||||
WHERE bsess.start_ts < %(startTimestamp)s
|
||||
AND project_id = %(project_id)s
|
||||
AND bsess.user_id = sessions.user_id
|
||||
LIMIT 1))
|
||||
) AS users_list
|
||||
LEFT JOIN LATERAL (SELECT DATE_TRUNC('week', to_timestamp(start_ts / 1000)::timestamp) AS connexion_week,
|
||||
user_id
|
||||
FROM sessions
|
||||
WHERE users_list.user_id = sessions.user_id
|
||||
AND %(startTimestamp)s <=sessions.start_ts
|
||||
AND sessions.project_id = %(project_id)s
|
||||
AND sessions.start_ts < (%(endTimestamp)s - 1)
|
||||
GROUP BY connexion_week, user_id
|
||||
) AS connexions_list ON (TRUE)
|
||||
GROUP BY week
|
||||
ORDER BY week;"""
|
||||
|
||||
params = {"project_id": project_id, "startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args)}
|
||||
print(cur.mogrify(pg_query, params))
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
rows = cur.fetchall()
|
||||
rows = __compute_weekly_percentage(helper.list_to_camel_case(rows))
|
||||
return {
|
||||
"startTimestamp": startTimestamp,
|
||||
"chart": __complete_retention(rows=rows, start_date=startTimestamp, end_date=TimeUTC.now())
|
||||
}
|
||||
|
||||
|
||||
def users_acquisition(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(),
|
||||
filters=[],
|
||||
**args):
|
||||
startTimestamp = TimeUTC.trunc_week(startTimestamp)
|
||||
endTimestamp = startTimestamp + 10 * TimeUTC.MS_WEEK
|
||||
pg_sub_query = __get_constraints(project_id=project_id, data=args, duration=True, main_table="sessions",
|
||||
time_constraint=True)
|
||||
pg_sub_query.append("user_id IS NOT NULL")
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = f"""SELECT EXTRACT(EPOCH FROM first_connexion_week::date)::bigint*1000 AS first_connexion_week,
|
||||
FLOOR(DATE_PART('day', connexion_week - first_connexion_week) / 7)::integer AS week,
|
||||
COUNT(DISTINCT connexions_list.user_id) AS users_count,
|
||||
ARRAY_AGG(DISTINCT connexions_list.user_id) AS connected_users
|
||||
FROM (SELECT user_id, MIN(DATE_TRUNC('week', to_timestamp(start_ts / 1000))) AS first_connexion_week
|
||||
FROM sessions
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
AND NOT EXISTS((SELECT 1
|
||||
FROM sessions AS bsess
|
||||
WHERE bsess.start_ts<%(startTimestamp)s
|
||||
AND project_id = %(project_id)s
|
||||
AND bsess.user_id = sessions.user_id
|
||||
LIMIT 1))
|
||||
GROUP BY user_id) AS users_list
|
||||
LEFT JOIN LATERAL (SELECT DATE_TRUNC('week', to_timestamp(start_ts / 1000)::timestamp) AS connexion_week,
|
||||
user_id
|
||||
FROM sessions
|
||||
WHERE users_list.user_id = sessions.user_id
|
||||
AND first_connexion_week <=
|
||||
DATE_TRUNC('week', to_timestamp(sessions.start_ts / 1000)::timestamp)
|
||||
AND sessions.project_id = %(project_id)s
|
||||
AND sessions.start_ts < (%(endTimestamp)s - 1)
|
||||
GROUP BY connexion_week, user_id) AS connexions_list ON (TRUE)
|
||||
GROUP BY first_connexion_week, week
|
||||
ORDER BY first_connexion_week, week;"""
|
||||
|
||||
params = {"project_id": project_id, "startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args)}
|
||||
print(cur.mogrify(pg_query, params))
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
rows = cur.fetchall()
|
||||
rows = __compute_weekly_percentage(helper.list_to_camel_case(rows))
|
||||
return {
|
||||
"startTimestamp": startTimestamp,
|
||||
"chart": __complete_acquisition(rows=rows, start_date=startTimestamp, end_date=TimeUTC.now())
|
||||
}
|
||||
|
||||
|
||||
def feature_retention(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(),
|
||||
filters=[],
|
||||
**args):
|
||||
startTimestamp = TimeUTC.trunc_week(startTimestamp)
|
||||
endTimestamp = startTimestamp + 10 * TimeUTC.MS_WEEK
|
||||
pg_sub_query = __get_constraints(project_id=project_id, data=args, duration=True, main_table="sessions",
|
||||
time_constraint=True)
|
||||
pg_sub_query.append("user_id IS NOT NULL")
|
||||
pg_sub_query.append("feature.timestamp >= %(startTimestamp)s")
|
||||
pg_sub_query.append("feature.timestamp < %(endTimestamp)s")
|
||||
event_type = "PAGES"
|
||||
event_value = "/"
|
||||
extra_values = {}
|
||||
default = True
|
||||
for f in filters:
|
||||
if f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]):
|
||||
event_type = f["value"]
|
||||
elif f["type"] == "EVENT_VALUE":
|
||||
event_value = f["value"]
|
||||
default = False
|
||||
elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
|
||||
pg_sub_query.append(f"sessions.user_id = %(user_id)s")
|
||||
extra_values["user_id"] = f["value"]
|
||||
event_table = JOURNEY_TYPES[event_type]["table"]
|
||||
event_column = JOURNEY_TYPES[event_type]["column"]
|
||||
pg_sub_query.append(f"feature.{event_column} = %(value)s")
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
if default:
|
||||
# get most used value
|
||||
pg_query = f"""SELECT {event_column} AS value, COUNT(*) AS count
|
||||
FROM {event_table} AS feature INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query[:-1])}
|
||||
AND length({event_column}) > 2
|
||||
GROUP BY value
|
||||
ORDER BY count DESC
|
||||
LIMIT 1;"""
|
||||
params = {"project_id": project_id, "startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values}
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
row = cur.fetchone()
|
||||
if row is not None:
|
||||
event_value = row["value"]
|
||||
extra_values["value"] = event_value
|
||||
if len(event_value) > 2:
|
||||
pg_sub_query.append(f"length({event_column})>2")
|
||||
pg_query = f"""SELECT FLOOR(DATE_PART('day', connexion_week - to_timestamp(%(startTimestamp)s/1000)) / 7)::integer AS week,
|
||||
COUNT(DISTINCT connexions_list.user_id) AS users_count,
|
||||
ARRAY_AGG(DISTINCT connexions_list.user_id) AS connected_users
|
||||
FROM (SELECT DISTINCT user_id
|
||||
FROM sessions INNER JOIN {event_table} AS feature USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
AND DATE_PART('week', to_timestamp((sessions.start_ts - %(startTimestamp)s)/1000)) = 1
|
||||
AND NOT EXISTS((SELECT 1
|
||||
FROM sessions AS bsess INNER JOIN {event_table} AS bfeature USING (session_id)
|
||||
WHERE bsess.start_ts<%(startTimestamp)s
|
||||
AND project_id = %(project_id)s
|
||||
AND bsess.user_id = sessions.user_id
|
||||
AND bfeature.timestamp<%(startTimestamp)s
|
||||
AND bfeature.{event_column}=%(value)s
|
||||
LIMIT 1))
|
||||
GROUP BY user_id) AS users_list
|
||||
LEFT JOIN LATERAL (SELECT DATE_TRUNC('week', to_timestamp(start_ts / 1000)::timestamp) AS connexion_week,
|
||||
user_id
|
||||
FROM sessions INNER JOIN {event_table} AS feature USING (session_id)
|
||||
WHERE users_list.user_id = sessions.user_id
|
||||
AND %(startTimestamp)s <= sessions.start_ts
|
||||
AND sessions.project_id = %(project_id)s
|
||||
AND sessions.start_ts < (%(endTimestamp)s - 1)
|
||||
AND feature.timestamp >= %(startTimestamp)s
|
||||
AND feature.timestamp < %(endTimestamp)s
|
||||
AND feature.{event_column} = %(value)s
|
||||
GROUP BY connexion_week, user_id) AS connexions_list ON (TRUE)
|
||||
GROUP BY week
|
||||
ORDER BY week;"""
|
||||
|
||||
params = {"project_id": project_id, "startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values}
|
||||
print(cur.mogrify(pg_query, params))
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
rows = cur.fetchall()
|
||||
rows = __compute_weekly_percentage(helper.list_to_camel_case(rows))
|
||||
return {
|
||||
"startTimestamp": startTimestamp,
|
||||
"filters": [{"type": "EVENT_TYPE", "value": event_type}, {"type": "EVENT_VALUE", "value": event_value}],
|
||||
"chart": __complete_retention(rows=rows, start_date=startTimestamp, end_date=TimeUTC.now())
|
||||
}
|
||||
|
||||
|
||||
|
||||
def feature_acquisition(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(),
|
||||
filters=[],
|
||||
**args):
|
||||
startTimestamp = TimeUTC.trunc_week(startTimestamp)
|
||||
endTimestamp = startTimestamp + 10 * TimeUTC.MS_WEEK
|
||||
pg_sub_query = __get_constraints(project_id=project_id, data=args, duration=True, main_table="sessions",
|
||||
time_constraint=True)
|
||||
pg_sub_query.append("user_id IS NOT NULL")
|
||||
pg_sub_query.append("feature.timestamp >= %(startTimestamp)s")
|
||||
pg_sub_query.append("feature.timestamp < %(endTimestamp)s")
|
||||
event_type = "PAGES"
|
||||
event_value = "/"
|
||||
extra_values = {}
|
||||
default = True
|
||||
for f in filters:
|
||||
if f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]):
|
||||
event_type = f["value"]
|
||||
elif f["type"] == "EVENT_VALUE":
|
||||
event_value = f["value"]
|
||||
default = False
|
||||
elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
|
||||
pg_sub_query.append(f"sessions.user_id = %(user_id)s")
|
||||
extra_values["user_id"] = f["value"]
|
||||
event_table = JOURNEY_TYPES[event_type]["table"]
|
||||
event_column = JOURNEY_TYPES[event_type]["column"]
|
||||
|
||||
pg_sub_query.append(f"feature.{event_column} = %(value)s")
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
if default:
|
||||
# get most used value
|
||||
pg_query = f"""SELECT {event_column} AS value, COUNT(*) AS count
|
||||
FROM {event_table} AS feature INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query[:-1])}
|
||||
AND length({event_column}) > 2
|
||||
GROUP BY value
|
||||
ORDER BY count DESC
|
||||
LIMIT 1;"""
|
||||
params = {"project_id": project_id, "startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values}
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
row = cur.fetchone()
|
||||
if row is not None:
|
||||
event_value = row["value"]
|
||||
extra_values["value"] = event_value
|
||||
if len(event_value) > 2:
|
||||
pg_sub_query.append(f"length({event_column})>2")
|
||||
pg_query = f"""SELECT EXTRACT(EPOCH FROM first_connexion_week::date)::bigint*1000 AS first_connexion_week,
|
||||
FLOOR(DATE_PART('day', connexion_week - first_connexion_week) / 7)::integer AS week,
|
||||
COUNT(DISTINCT connexions_list.user_id) AS users_count,
|
||||
ARRAY_AGG(DISTINCT connexions_list.user_id) AS connected_users
|
||||
FROM (SELECT user_id, DATE_TRUNC('week', to_timestamp(first_connexion_week / 1000)) AS first_connexion_week
|
||||
FROM(SELECT DISTINCT user_id, MIN(start_ts) AS first_connexion_week
|
||||
FROM sessions INNER JOIN {event_table} AS feature USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
AND NOT EXISTS((SELECT 1
|
||||
FROM sessions AS bsess INNER JOIN {event_table} AS bfeature USING (session_id)
|
||||
WHERE bsess.start_ts<%(startTimestamp)s
|
||||
AND project_id = %(project_id)s
|
||||
AND bsess.user_id = sessions.user_id
|
||||
AND bfeature.timestamp<%(startTimestamp)s
|
||||
AND bfeature.{event_column}=%(value)s
|
||||
LIMIT 1))
|
||||
GROUP BY user_id) AS raw_users_list) AS users_list
|
||||
LEFT JOIN LATERAL (SELECT DATE_TRUNC('week', to_timestamp(start_ts / 1000)::timestamp) AS connexion_week,
|
||||
user_id
|
||||
FROM sessions INNER JOIN {event_table} AS feature USING(session_id)
|
||||
WHERE users_list.user_id = sessions.user_id
|
||||
AND first_connexion_week <=
|
||||
DATE_TRUNC('week', to_timestamp(sessions.start_ts / 1000)::timestamp)
|
||||
AND sessions.project_id = %(project_id)s
|
||||
AND sessions.start_ts < (%(endTimestamp)s - 1)
|
||||
AND feature.timestamp >= %(startTimestamp)s
|
||||
AND feature.timestamp < %(endTimestamp)s
|
||||
AND feature.{event_column} = %(value)s
|
||||
GROUP BY connexion_week, user_id) AS connexions_list ON (TRUE)
|
||||
GROUP BY first_connexion_week, week
|
||||
ORDER BY first_connexion_week, week;"""
|
||||
|
||||
params = {"project_id": project_id, "startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values}
|
||||
print(cur.mogrify(pg_query, params))
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
rows = cur.fetchall()
|
||||
rows = __compute_weekly_percentage(helper.list_to_camel_case(rows))
|
||||
return {
|
||||
"startTimestamp": startTimestamp,
|
||||
"filters": [{"type": "EVENT_TYPE", "value": event_type}, {"type": "EVENT_VALUE", "value": event_value}],
|
||||
"chart": __complete_acquisition(rows=rows, start_date=startTimestamp, end_date=TimeUTC.now())
|
||||
}
|
||||
|
||||
|
||||
|
||||
def feature_popularity_frequency(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(),
|
||||
filters=[],
|
||||
**args):
|
||||
startTimestamp = TimeUTC.trunc_week(startTimestamp)
|
||||
endTimestamp = startTimestamp + 10 * TimeUTC.MS_WEEK
|
||||
pg_sub_query = __get_constraints(project_id=project_id, data=args, duration=True, main_table="sessions",
|
||||
time_constraint=True)
|
||||
event_table = JOURNEY_TYPES["CLICK"]["table"]
|
||||
event_column = JOURNEY_TYPES["CLICK"]["column"]
|
||||
extra_values = {}
|
||||
for f in filters:
|
||||
if f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]):
|
||||
event_table = JOURNEY_TYPES[f["value"]]["table"]
|
||||
event_column = JOURNEY_TYPES[f["value"]]["column"]
|
||||
elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
|
||||
pg_sub_query.append(f"sessions.user_id = %(user_id)s")
|
||||
extra_values["user_id"] = f["value"]
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = f"""SELECT COUNT(DISTINCT user_id) AS count
|
||||
FROM sessions
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
AND user_id IS NOT NULL;"""
|
||||
params = {"project_id": project_id, "startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values}
|
||||
# print(cur.mogrify(pg_query, params))
|
||||
# print("---------------------")
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
all_user_count = cur.fetchone()["count"]
|
||||
if all_user_count == 0:
|
||||
return []
|
||||
pg_sub_query.append("feature.timestamp >= %(startTimestamp)s")
|
||||
pg_sub_query.append("feature.timestamp < %(endTimestamp)s")
|
||||
pg_sub_query.append(f"length({event_column})>2")
|
||||
pg_query = f"""SELECT {event_column} AS value, COUNT(DISTINCT user_id) AS count
|
||||
FROM {event_table} AS feature INNER JOIN sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
AND user_id IS NOT NULL
|
||||
GROUP BY value
|
||||
ORDER BY count DESC
|
||||
LIMIT 7;"""
|
||||
# TODO: solve full scan
|
||||
print(cur.mogrify(pg_query, params))
|
||||
print("---------------------")
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
popularity = cur.fetchall()
|
||||
pg_query = f"""SELECT {event_column} AS value, COUNT(session_id) AS count
|
||||
FROM {event_table} AS feature INNER JOIN sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
GROUP BY value;"""
|
||||
# TODO: solve full scan
|
||||
print(cur.mogrify(pg_query, params))
|
||||
print("---------------------")
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
frequencies = cur.fetchall()
|
||||
total_usage = sum([f["count"] for f in frequencies])
|
||||
frequencies = {f["value"]: f["count"] for f in frequencies}
|
||||
for p in popularity:
|
||||
p["popularity"] = p.pop("count") / all_user_count
|
||||
p["frequency"] = frequencies[p["value"]] / total_usage
|
||||
|
||||
return popularity
|
||||
|
||||
|
||||
|
||||
def feature_adoption(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(),
|
||||
filters=[],
|
||||
**args):
|
||||
pg_sub_query = __get_constraints(project_id=project_id, data=args, duration=True, main_table="sessions",
|
||||
time_constraint=True)
|
||||
event_type = "CLICK"
|
||||
event_value = '/'
|
||||
extra_values = {}
|
||||
default = True
|
||||
for f in filters:
|
||||
if f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]):
|
||||
event_type = f["value"]
|
||||
elif f["type"] == "EVENT_VALUE":
|
||||
event_value = f["value"]
|
||||
default = False
|
||||
elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
|
||||
pg_sub_query.append(f"sessions.user_id = %(user_id)s")
|
||||
extra_values["user_id"] = f["value"]
|
||||
event_table = JOURNEY_TYPES[event_type]["table"]
|
||||
event_column = JOURNEY_TYPES[event_type]["column"]
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = f"""SELECT COUNT(DISTINCT user_id) AS count
|
||||
FROM sessions
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
AND user_id IS NOT NULL;"""
|
||||
params = {"project_id": project_id, "startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values}
|
||||
# print(cur.mogrify(pg_query, params))
|
||||
# print("---------------------")
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
all_user_count = cur.fetchone()["count"]
|
||||
if all_user_count == 0:
|
||||
return {"adoption": 0, "target": 0, "filters": [{"type": "EVENT_TYPE", "value": event_type},
|
||||
{"type": "EVENT_VALUE", "value": event_value}], }
|
||||
pg_sub_query.append("feature.timestamp >= %(startTimestamp)s")
|
||||
pg_sub_query.append("feature.timestamp < %(endTimestamp)s")
|
||||
if default:
|
||||
# get most used value
|
||||
pg_query = f"""SELECT {event_column} AS value, COUNT(*) AS count
|
||||
FROM {event_table} AS feature INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query[:-1])}
|
||||
AND length({event_column}) > 2
|
||||
GROUP BY value
|
||||
ORDER BY count DESC
|
||||
LIMIT 1;"""
|
||||
params = {"project_id": project_id, "startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values}
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
row = cur.fetchone()
|
||||
if row is not None:
|
||||
event_value = row["value"]
|
||||
extra_values["value"] = event_value
|
||||
if len(event_value) > 2:
|
||||
pg_sub_query.append(f"length({event_column})>2")
|
||||
pg_sub_query.append(f"feature.{event_column} = %(value)s")
|
||||
pg_query = f"""SELECT COUNT(DISTINCT user_id) AS count
|
||||
FROM {event_table} AS feature INNER JOIN sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
AND user_id IS NOT NULL;"""
|
||||
params = {"project_id": project_id, "startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values}
|
||||
# print(cur.mogrify(pg_query, params))
|
||||
# print("---------------------")
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
adoption = cur.fetchone()["count"] / all_user_count
|
||||
return {"target": all_user_count, "adoption": adoption,
|
||||
"filters": [{"type": "EVENT_TYPE", "value": event_type}, {"type": "EVENT_VALUE", "value": event_value}]}
|
||||
|
||||
|
||||
|
||||
def feature_adoption_top_users(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(),
|
||||
filters=[], **args):
|
||||
pg_sub_query = __get_constraints(project_id=project_id, data=args, duration=True, main_table="sessions",
|
||||
time_constraint=True)
|
||||
pg_sub_query.append("user_id IS NOT NULL")
|
||||
event_type = "CLICK"
|
||||
event_value = '/'
|
||||
extra_values = {}
|
||||
default = True
|
||||
for f in filters:
|
||||
if f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]):
|
||||
event_type = f["value"]
|
||||
elif f["type"] == "EVENT_VALUE":
|
||||
event_value = f["value"]
|
||||
default = False
|
||||
elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
|
||||
pg_sub_query.append(f"sessions.user_id = %(user_id)s")
|
||||
extra_values["user_id"] = f["value"]
|
||||
event_table = JOURNEY_TYPES[event_type]["table"]
|
||||
event_column = JOURNEY_TYPES[event_type]["column"]
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_sub_query.append("feature.timestamp >= %(startTimestamp)s")
|
||||
pg_sub_query.append("feature.timestamp < %(endTimestamp)s")
|
||||
if default:
|
||||
# get most used value
|
||||
pg_query = f"""SELECT {event_column} AS value, COUNT(*) AS count
|
||||
FROM {event_table} AS feature INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query[:-1])}
|
||||
AND length({event_column}) > 2
|
||||
GROUP BY value
|
||||
ORDER BY count DESC
|
||||
LIMIT 1;"""
|
||||
params = {"project_id": project_id, "startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values}
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
row = cur.fetchone()
|
||||
if row is not None:
|
||||
event_value = row["value"]
|
||||
extra_values["value"] = event_value
|
||||
if len(event_value) > 2:
|
||||
pg_sub_query.append(f"length({event_column})>2")
|
||||
pg_sub_query.append(f"feature.{event_column} = %(value)s")
|
||||
pg_query = f"""SELECT user_id, COUNT(DISTINCT session_id) AS count
|
||||
FROM {event_table} AS feature
|
||||
INNER JOIN sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
GROUP BY 1
|
||||
ORDER BY 2 DESC
|
||||
LIMIT 10;"""
|
||||
params = {"project_id": project_id, "startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values}
|
||||
# print(cur.mogrify(pg_query, params))
|
||||
# print("---------------------")
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
rows = cur.fetchall()
|
||||
return {"users": helper.list_to_camel_case(rows),
|
||||
"filters": [{"type": "EVENT_TYPE", "value": event_type}, {"type": "EVENT_VALUE", "value": event_value}]}
|
||||
|
||||
|
||||
|
||||
def feature_adoption_daily_usage(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(),
|
||||
filters=[], **args):
|
||||
pg_sub_query = __get_constraints(project_id=project_id, data=args, duration=True, main_table="sessions",
|
||||
time_constraint=True)
|
||||
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=True,
|
||||
chart=True, data=args)
|
||||
event_type = "CLICK"
|
||||
event_value = '/'
|
||||
extra_values = {}
|
||||
default = True
|
||||
for f in filters:
|
||||
if f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]):
|
||||
event_type = f["value"]
|
||||
elif f["type"] == "EVENT_VALUE":
|
||||
event_value = f["value"]
|
||||
default = False
|
||||
elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
|
||||
pg_sub_query_chart.append(f"sessions.user_id = %(user_id)s")
|
||||
extra_values["user_id"] = f["value"]
|
||||
event_table = JOURNEY_TYPES[event_type]["table"]
|
||||
event_column = JOURNEY_TYPES[event_type]["column"]
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_sub_query_chart.append("feature.timestamp >= %(startTimestamp)s")
|
||||
pg_sub_query_chart.append("feature.timestamp < %(endTimestamp)s")
|
||||
pg_sub_query.append("feature.timestamp >= %(startTimestamp)s")
|
||||
pg_sub_query.append("feature.timestamp < %(endTimestamp)s")
|
||||
if default:
|
||||
# get most used value
|
||||
pg_query = f"""SELECT {event_column} AS value, COUNT(*) AS count
|
||||
FROM {event_table} AS feature INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
AND length({event_column})>2
|
||||
GROUP BY value
|
||||
ORDER BY count DESC
|
||||
LIMIT 1;"""
|
||||
params = {"project_id": project_id, "startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values}
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
row = cur.fetchone()
|
||||
if row is not None:
|
||||
event_value = row["value"]
|
||||
extra_values["value"] = event_value
|
||||
if len(event_value) > 2:
|
||||
pg_sub_query.append(f"length({event_column})>2")
|
||||
pg_sub_query_chart.append(f"feature.{event_column} = %(value)s")
|
||||
pg_query = f"""SELECT generated_timestamp AS timestamp,
|
||||
COALESCE(COUNT(session_id), 0) AS count
|
||||
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
|
||||
LEFT JOIN LATERAL ( SELECT DISTINCT session_id
|
||||
FROM {event_table} AS feature INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||
) AS users ON (TRUE)
|
||||
GROUP BY generated_timestamp
|
||||
ORDER BY generated_timestamp;"""
|
||||
params = {"step_size": TimeUTC.MS_DAY, "project_id": project_id, "startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values}
|
||||
print(cur.mogrify(pg_query, params))
|
||||
print("---------------------")
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
rows = cur.fetchall()
|
||||
return {"chart": helper.list_to_camel_case(rows),
|
||||
"filters": [{"type": "EVENT_TYPE", "value": event_type}, {"type": "EVENT_VALUE", "value": event_value}]}
|
||||
|
||||
|
||||
|
||||
def feature_intensity(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(),
|
||||
filters=[],
|
||||
**args):
|
||||
pg_sub_query = __get_constraints(project_id=project_id, data=args, duration=True, main_table="sessions",
|
||||
time_constraint=True)
|
||||
pg_sub_query.append("feature.timestamp >= %(startTimestamp)s")
|
||||
pg_sub_query.append("feature.timestamp < %(endTimestamp)s")
|
||||
event_table = JOURNEY_TYPES["CLICK"]["table"]
|
||||
event_column = JOURNEY_TYPES["CLICK"]["column"]
|
||||
extra_values = {}
|
||||
for f in filters:
|
||||
if f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]):
|
||||
event_table = JOURNEY_TYPES[f["value"]]["table"]
|
||||
event_column = JOURNEY_TYPES[f["value"]]["column"]
|
||||
elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
|
||||
pg_sub_query.append(f"sessions.user_id = %(user_id)s")
|
||||
extra_values["user_id"] = f["value"]
|
||||
pg_sub_query.append(f"length({event_column})>2")
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = f"""SELECT {event_column} AS value, AVG(DISTINCT session_id) AS avg
|
||||
FROM {event_table} AS feature INNER JOIN sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
GROUP BY value
|
||||
ORDER BY avg DESC
|
||||
LIMIT 7;"""
|
||||
params = {"project_id": project_id, "startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values}
|
||||
# TODO: solve full scan issue
|
||||
print(cur.mogrify(pg_query, params))
|
||||
print("---------------------")
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
rows = cur.fetchall()
|
||||
|
||||
return rows
|
||||
|
||||
|
||||
|
||||
def users_active(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(),
|
||||
filters=[],
|
||||
**args):
|
||||
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=True,
|
||||
chart=True, data=args)
|
||||
|
||||
pg_sub_query_chart.append("user_id IS NOT NULL")
|
||||
period = "DAY"
|
||||
extra_values = {}
|
||||
for f in filters:
|
||||
if f["type"] == "PERIOD" and f["value"] in ["DAY", "WEEK"]:
|
||||
period = f["value"]
|
||||
elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
|
||||
pg_sub_query_chart.append(f"sessions.user_id = %(user_id)s")
|
||||
extra_values["user_id"] = f["value"]
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = f"""SELECT AVG(count) AS avg, JSONB_AGG(chart) AS chart
|
||||
FROM (SELECT generated_timestamp AS timestamp,
|
||||
COALESCE(COUNT(users), 0) AS count
|
||||
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
|
||||
LEFT JOIN LATERAL ( SELECT DISTINCT user_id
|
||||
FROM public.sessions
|
||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||
) AS users ON (TRUE)
|
||||
GROUP BY generated_timestamp
|
||||
ORDER BY generated_timestamp) AS chart;"""
|
||||
params = {"step_size": TimeUTC.MS_DAY if period == "DAY" else TimeUTC.MS_WEEK,
|
||||
"project_id": project_id,
|
||||
"startTimestamp": TimeUTC.trunc_day(startTimestamp) if period == "DAY" else TimeUTC.trunc_week(
|
||||
startTimestamp),
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args),
|
||||
**extra_values}
|
||||
# print(cur.mogrify(pg_query, params))
|
||||
# print("---------------------")
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
row_users = cur.fetchone()
|
||||
|
||||
return row_users
|
||||
|
||||
|
||||
|
||||
def users_power(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(),
|
||||
filters=[], **args):
|
||||
pg_sub_query = __get_constraints(project_id=project_id, time_constraint=True, chart=False, data=args)
|
||||
pg_sub_query.append("user_id IS NOT NULL")
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = f"""SELECT AVG(count) AS avg, JSONB_AGG(day_users_partition) AS partition
|
||||
FROM (SELECT number_of_days, COUNT(user_id) AS count
|
||||
FROM (SELECT user_id, COUNT(DISTINCT DATE_TRUNC('day', to_timestamp(start_ts / 1000))) AS number_of_days
|
||||
FROM sessions
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
GROUP BY 1) AS users_connexions
|
||||
GROUP BY number_of_days
|
||||
ORDER BY number_of_days) AS day_users_partition;"""
|
||||
params = {"project_id": project_id,
|
||||
"startTimestamp": startTimestamp, "endTimestamp": endTimestamp, **__get_constraint_values(args)}
|
||||
# print(cur.mogrify(pg_query, params))
|
||||
# print("---------------------")
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
row_users = cur.fetchone()
|
||||
|
||||
return helper.dict_to_camel_case(row_users)
|
||||
|
||||
|
||||
|
||||
def users_slipping(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(),
|
||||
filters=[], **args):
|
||||
pg_sub_query = __get_constraints(project_id=project_id, data=args, duration=True, main_table="sessions",
|
||||
time_constraint=True)
|
||||
pg_sub_query.append("user_id IS NOT NULL")
|
||||
pg_sub_query.append("feature.timestamp >= %(startTimestamp)s")
|
||||
pg_sub_query.append("feature.timestamp < %(endTimestamp)s")
|
||||
event_type = "PAGES"
|
||||
event_value = "/"
|
||||
extra_values = {}
|
||||
default = True
|
||||
for f in filters:
|
||||
if f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]):
|
||||
event_type = f["value"]
|
||||
elif f["type"] == "EVENT_VALUE":
|
||||
event_value = f["value"]
|
||||
default = False
|
||||
elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
|
||||
pg_sub_query.append(f"sessions.user_id = %(user_id)s")
|
||||
extra_values["user_id"] = f["value"]
|
||||
event_table = JOURNEY_TYPES[event_type]["table"]
|
||||
event_column = JOURNEY_TYPES[event_type]["column"]
|
||||
pg_sub_query.append(f"feature.{event_column} = %(value)s")
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
if default:
|
||||
# get most used value
|
||||
pg_query = f"""SELECT {event_column} AS value, COUNT(*) AS count
|
||||
FROM {event_table} AS feature INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query[:-1])}
|
||||
AND length({event_column}) > 2
|
||||
GROUP BY value
|
||||
ORDER BY count DESC
|
||||
LIMIT 1;"""
|
||||
params = {"project_id": project_id, "startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values}
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
row = cur.fetchone()
|
||||
if row is not None:
|
||||
event_value = row["value"]
|
||||
extra_values["value"] = event_value
|
||||
if len(event_value) > 2:
|
||||
pg_sub_query.append(f"length({event_column})>2")
|
||||
pg_query = f"""SELECT user_id, last_time, interactions_count, MIN(start_ts) AS first_seen, MAX(start_ts) AS last_seen
|
||||
FROM (SELECT user_id, MAX(timestamp) AS last_time, COUNT(DISTINCT session_id) AS interactions_count
|
||||
FROM {event_table} AS feature INNER JOIN sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
GROUP BY user_id) AS user_last_usage
|
||||
INNER JOIN sessions USING (user_id)
|
||||
WHERE EXTRACT(EPOCH FROM now()) * 1000 - last_time > 7 * 24 * 60 * 60 * 1000
|
||||
GROUP BY user_id, last_time,interactions_count;"""
|
||||
|
||||
params = {"project_id": project_id, "startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values}
|
||||
# print(cur.mogrify(pg_query, params))
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
rows = cur.fetchall()
|
||||
return {
|
||||
"startTimestamp": startTimestamp,
|
||||
"filters": [{"type": "EVENT_TYPE", "value": event_type}, {"type": "EVENT_VALUE", "value": event_value}],
|
||||
"list": helper.list_to_camel_case(rows)
|
||||
}
|
||||
|
||||
|
||||
|
||||
def search(text, feature_type, project_id, platform=None):
|
||||
if not feature_type:
|
||||
resource_type = "ALL"
|
||||
data = search(text=text, feature_type=resource_type, project_id=project_id, platform=platform)
|
||||
return data
|
||||
|
||||
pg_sub_query = __get_constraints(project_id=project_id, time_constraint=True, duration=True,
|
||||
data={} if platform is None else {"platform": platform})
|
||||
|
||||
params = {"startTimestamp": TimeUTC.now() - 2 * TimeUTC.MS_MONTH,
|
||||
"endTimestamp": TimeUTC.now(),
|
||||
"project_id": project_id,
|
||||
"value": helper.string_to_sql_like(text.lower()),
|
||||
"platform_0": platform}
|
||||
if feature_type == "ALL":
|
||||
with pg_client.PostgresClient() as cur:
|
||||
sub_queries = []
|
||||
for e in JOURNEY_TYPES:
|
||||
sub_queries.append(f"""(SELECT DISTINCT {JOURNEY_TYPES[e]["column"]} AS value, '{e}' AS "type"
|
||||
FROM {JOURNEY_TYPES[e]["table"]} INNER JOIN public.sessions USING(session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)} AND {JOURNEY_TYPES[e]["column"]} ILIKE %(value)s
|
||||
LIMIT 10)""")
|
||||
pg_query = "UNION ALL".join(sub_queries)
|
||||
# print(cur.mogrify(pg_query, params))
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
rows = cur.fetchall()
|
||||
elif JOURNEY_TYPES.get(feature_type) is not None:
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = f"""SELECT DISTINCT {JOURNEY_TYPES[feature_type]["column"]} AS value, '{feature_type}' AS "type"
|
||||
FROM {JOURNEY_TYPES[feature_type]["table"]} INNER JOIN public.sessions USING(session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)} AND {JOURNEY_TYPES[feature_type]["column"]} ILIKE %(value)s
|
||||
LIMIT 10;"""
|
||||
# print(cur.mogrify(pg_query, params))
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
rows = cur.fetchall()
|
||||
else:
|
||||
return []
|
||||
return [helper.dict_to_camel_case(row) for row in rows]
|
||||
87
api/chalicelib/core/integration_github.py
Normal file
87
api/chalicelib/core/integration_github.py
Normal file
|
|
@ -0,0 +1,87 @@
|
|||
import schemas
|
||||
from chalicelib.core import integration_base
|
||||
from chalicelib.core.integration_github_issue import GithubIntegrationIssue
|
||||
from chalicelib.utils import pg_client, helper
|
||||
|
||||
PROVIDER = schemas.IntegrationType.github
|
||||
|
||||
|
||||
class GitHubIntegration(integration_base.BaseIntegration):
|
||||
|
||||
def __init__(self, tenant_id, user_id):
|
||||
self.__tenant_id = tenant_id
|
||||
super(GitHubIntegration, self).__init__(user_id=user_id, ISSUE_CLASS=GithubIntegrationIssue)
|
||||
|
||||
@property
|
||||
def provider(self):
|
||||
return PROVIDER
|
||||
|
||||
@property
|
||||
def issue_handler(self):
|
||||
return self._issue_handler
|
||||
|
||||
def get_obfuscated(self):
|
||||
integration = self.get()
|
||||
if integration is None:
|
||||
return None
|
||||
return {"token": helper.obfuscate(text=integration["token"]), "provider": self.provider.lower()}
|
||||
|
||||
def update(self, changes, obfuscate=False):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
sub_query = [f"{helper.key_to_snake_case(k)} = %({k})s" for k in changes.keys()]
|
||||
cur.execute(
|
||||
cur.mogrify(f"""\
|
||||
UPDATE public.oauth_authentication
|
||||
SET {','.join(sub_query)}
|
||||
WHERE user_id=%(user_id)s
|
||||
RETURNING token;""",
|
||||
{"user_id": self._user_id,
|
||||
**changes})
|
||||
)
|
||||
w = helper.dict_to_camel_case(cur.fetchone())
|
||||
if w and w.get("token") and obfuscate:
|
||||
w["token"] = helper.obfuscate(w["token"])
|
||||
return w
|
||||
|
||||
def _add(self, data):
|
||||
pass
|
||||
|
||||
def add(self, token, obfuscate=False):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
INSERT INTO public.oauth_authentication(user_id, provider, provider_user_id, token)
|
||||
VALUES(%(user_id)s, 'github', '', %(token)s)
|
||||
RETURNING token;""",
|
||||
{"user_id": self._user_id,
|
||||
"token": token})
|
||||
)
|
||||
w = helper.dict_to_camel_case(cur.fetchone())
|
||||
if w and w.get("token") and obfuscate:
|
||||
w["token"] = helper.obfuscate(w["token"])
|
||||
return w
|
||||
|
||||
# TODO: make a revoke token call
|
||||
def delete(self):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
DELETE FROM public.oauth_authentication
|
||||
WHERE user_id=%(user_id)s AND provider=%(provider)s;""",
|
||||
{"user_id": self._user_id, "provider": self.provider.lower()})
|
||||
)
|
||||
return {"state": "success"}
|
||||
|
||||
def add_edit(self, data):
|
||||
s = self.get()
|
||||
if s is not None:
|
||||
return self.update(
|
||||
changes={
|
||||
"token": data["token"] \
|
||||
if data.get("token") and len(data["token"]) > 0 and data["token"].find("***") == -1 \
|
||||
else s["token"]
|
||||
},
|
||||
obfuscate=True
|
||||
)
|
||||
else:
|
||||
return self.add(token=data["token"], obfuscate=True)
|
||||
100
api/chalicelib/core/integration_github_issue.py
Normal file
100
api/chalicelib/core/integration_github_issue.py
Normal file
|
|
@ -0,0 +1,100 @@
|
|||
from chalicelib.core.integration_base_issue import BaseIntegrationIssue
|
||||
from chalicelib.utils import github_client_v3
|
||||
from chalicelib.utils.github_client_v3 import github_formatters as formatter
|
||||
|
||||
|
||||
class GithubIntegrationIssue(BaseIntegrationIssue):
|
||||
def __init__(self, integration_token):
|
||||
self.__client = github_client_v3.githubV3Request(integration_token)
|
||||
super(GithubIntegrationIssue, self).__init__("GITHUB", integration_token)
|
||||
|
||||
def get_current_user(self):
|
||||
return formatter.user(self.__client.get("/user"))
|
||||
|
||||
def get_meta(self, repoId):
|
||||
current_user = self.get_current_user()
|
||||
try:
|
||||
users = self.__client.get(f"/repositories/{repoId}/collaborators")
|
||||
except Exception as e:
|
||||
users = []
|
||||
users = [formatter.user(u) for u in users]
|
||||
if current_user not in users:
|
||||
users.insert(0, current_user)
|
||||
meta = {
|
||||
'users': users,
|
||||
'issueTypes': [formatter.label(l) for l in
|
||||
self.__client.get(f"/repositories/{repoId}/labels")]
|
||||
}
|
||||
|
||||
return meta
|
||||
|
||||
def create_new_assignment(self, integration_project_id, title, description, assignee,
|
||||
issue_type):
|
||||
repoId = integration_project_id
|
||||
assignees = [assignee]
|
||||
labels = [str(issue_type)]
|
||||
|
||||
metas = self.get_meta(repoId)
|
||||
real_assignees = []
|
||||
for a in assignees:
|
||||
for u in metas["users"]:
|
||||
if a == str(u["id"]):
|
||||
real_assignees.append(u["name"])
|
||||
break
|
||||
real_labels = ["OpenReplay"]
|
||||
for l in labels:
|
||||
found = False
|
||||
for ll in metas["issueTypes"]:
|
||||
if l == str(ll["id"]):
|
||||
real_labels.append(ll["name"])
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
real_labels.append(l)
|
||||
issue = self.__client.post(f"/repositories/{repoId}/issues", body={"title": title, "body": description,
|
||||
"assignees": real_assignees,
|
||||
"labels": real_labels})
|
||||
return formatter.issue(issue)
|
||||
|
||||
def get_by_ids(self, saved_issues):
|
||||
results = []
|
||||
for i in saved_issues:
|
||||
results.append(self.get(integration_project_id=i["integrationProjectId"], assignment_id=i["id"]))
|
||||
return {"issues": results}
|
||||
|
||||
def get(self, integration_project_id, assignment_id):
|
||||
repoId = integration_project_id
|
||||
issueNumber = assignment_id
|
||||
issue = self.__client.get(f"/repositories/{repoId}/issues/{issueNumber}")
|
||||
issue = formatter.issue(issue)
|
||||
if issue["commentsCount"] > 0:
|
||||
issue["comments"] = [formatter.comment(c) for c in
|
||||
self.__client.get(f"/repositories/{repoId}/issues/{issueNumber}/comments")]
|
||||
return issue
|
||||
|
||||
def comment(self, integration_project_id, assignment_id, comment):
|
||||
repoId = integration_project_id
|
||||
issueNumber = assignment_id
|
||||
commentCreated = self.__client.post(f"/repositories/{repoId}/issues/{issueNumber}/comments",
|
||||
body={"body": comment})
|
||||
return formatter.comment(commentCreated)
|
||||
|
||||
def get_metas(self, integration_project_id):
|
||||
current_user = self.get_current_user()
|
||||
try:
|
||||
users = self.__client.get(f"/repositories/{integration_project_id}/collaborators")
|
||||
except Exception as e:
|
||||
users = []
|
||||
users = [formatter.user(u) for u in users]
|
||||
if current_user not in users:
|
||||
users.insert(0, current_user)
|
||||
|
||||
return {"provider": self.provider.lower(),
|
||||
'users': users,
|
||||
'issueTypes': [formatter.label(l) for l in
|
||||
self.__client.get(f"/repositories/{integration_project_id}/labels")]
|
||||
}
|
||||
|
||||
def get_projects(self):
|
||||
repos = self.__client.get("/user/repos")
|
||||
return [formatter.repo(r) for r in repos]
|
||||
133
api/chalicelib/core/integration_jira_cloud.py
Normal file
133
api/chalicelib/core/integration_jira_cloud.py
Normal file
|
|
@ -0,0 +1,133 @@
|
|||
import schemas
|
||||
from chalicelib.core import integration_base
|
||||
from chalicelib.core.integration_jira_cloud_issue import JIRACloudIntegrationIssue
|
||||
from chalicelib.utils import pg_client, helper
|
||||
|
||||
PROVIDER = schemas.IntegrationType.jira
|
||||
|
||||
|
||||
def obfuscate_string(string):
|
||||
return "*" * (len(string) - 4) + string[-4:]
|
||||
|
||||
|
||||
class JIRAIntegration(integration_base.BaseIntegration):
|
||||
def __init__(self, tenant_id, user_id):
|
||||
self.__tenant_id = tenant_id
|
||||
# TODO: enable super-constructor when OAuth is done
|
||||
# super(JIRAIntegration, self).__init__(jwt, user_id, JIRACloudIntegrationProxy)
|
||||
self._issue_handler = None
|
||||
self._user_id = user_id
|
||||
self.integration = self.get()
|
||||
|
||||
if self.integration is None:
|
||||
return
|
||||
self.integration["valid"] = True
|
||||
if not self.integration["url"].endswith('atlassian.net'):
|
||||
self.integration["valid"] = False
|
||||
|
||||
@property
|
||||
def provider(self):
|
||||
return PROVIDER
|
||||
|
||||
@property
|
||||
def issue_handler(self):
|
||||
if self.integration["url"].endswith('atlassian.net') and self._issue_handler is None:
|
||||
try:
|
||||
self._issue_handler = JIRACloudIntegrationIssue(token=self.integration["token"],
|
||||
username=self.integration["username"],
|
||||
url=self.integration["url"])
|
||||
except Exception as e:
|
||||
self._issue_handler = None
|
||||
self.integration["valid"] = False
|
||||
return self._issue_handler
|
||||
|
||||
# TODO: remove this once jira-oauth is done
|
||||
def get(self):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(
|
||||
"""SELECT username, token, url
|
||||
FROM public.jira_cloud
|
||||
WHERE user_id=%(user_id)s;""",
|
||||
{"user_id": self._user_id})
|
||||
)
|
||||
data = helper.dict_to_camel_case(cur.fetchone())
|
||||
|
||||
if data is None:
|
||||
return
|
||||
data["valid"] = True
|
||||
if not data["url"].endswith('atlassian.net'):
|
||||
data["valid"] = False
|
||||
return data
|
||||
|
||||
def get_obfuscated(self):
|
||||
if self.integration is None:
|
||||
return None
|
||||
integration = dict(self.integration)
|
||||
integration["token"] = obfuscate_string(self.integration["token"])
|
||||
integration["provider"] = self.provider.lower()
|
||||
return integration
|
||||
|
||||
def update(self, changes, obfuscate=False):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
sub_query = [f"{helper.key_to_snake_case(k)} = %({k})s" for k in changes.keys()]
|
||||
cur.execute(
|
||||
cur.mogrify(f"""\
|
||||
UPDATE public.jira_cloud
|
||||
SET {','.join(sub_query)}
|
||||
WHERE user_id=%(user_id)s
|
||||
RETURNING username, token, url;""",
|
||||
{"user_id": self._user_id,
|
||||
**changes})
|
||||
)
|
||||
w = helper.dict_to_camel_case(cur.fetchone())
|
||||
if obfuscate:
|
||||
w["token"] = obfuscate_string(w["token"])
|
||||
return self.get()
|
||||
|
||||
# TODO: make this generic for all issue tracking integrations
|
||||
def _add(self, data):
|
||||
print("a pretty defined abstract method")
|
||||
return
|
||||
|
||||
def add(self, username, token, url):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
INSERT INTO public.jira_cloud(username, token, user_id,url)
|
||||
VALUES (%(username)s, %(token)s, %(user_id)s,%(url)s)
|
||||
RETURNING username, token, url;""",
|
||||
{"user_id": self._user_id, "username": username,
|
||||
"token": token, "url": url})
|
||||
)
|
||||
w = helper.dict_to_camel_case(cur.fetchone())
|
||||
return self.get()
|
||||
|
||||
def delete(self):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
DELETE FROM public.jira_cloud
|
||||
WHERE user_id=%(user_id)s;""",
|
||||
{"user_id": self._user_id})
|
||||
)
|
||||
return {"state": "success"}
|
||||
|
||||
def add_edit(self, data):
|
||||
if self.integration is not None:
|
||||
return self.update(
|
||||
changes={
|
||||
"username": data["username"],
|
||||
"token": data["token"] \
|
||||
if data.get("token") and len(data["token"]) > 0 and data["token"].find("***") == -1 \
|
||||
else self.integration["token"],
|
||||
"url": data["url"]
|
||||
},
|
||||
obfuscate=True
|
||||
)
|
||||
else:
|
||||
return self.add(
|
||||
username=data["username"],
|
||||
token=data["token"],
|
||||
url=data["url"]
|
||||
)
|
||||
56
api/chalicelib/core/integration_jira_cloud_issue.py
Normal file
56
api/chalicelib/core/integration_jira_cloud_issue.py
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
from chalicelib.utils import jira_client
|
||||
from chalicelib.core.integration_base_issue import BaseIntegrationIssue
|
||||
|
||||
|
||||
class JIRACloudIntegrationIssue(BaseIntegrationIssue):
|
||||
def __init__(self, token, username, url):
|
||||
self.username = username
|
||||
self.url = url
|
||||
self._client = jira_client.JiraManager(self.url, self.username, token, None)
|
||||
super(JIRACloudIntegrationIssue, self).__init__("JIRA", token)
|
||||
|
||||
def create_new_assignment(self, integration_project_id, title, description, assignee, issue_type):
|
||||
self._client.set_jira_project_id(integration_project_id)
|
||||
data = {
|
||||
'summary': title,
|
||||
'description': description,
|
||||
'issuetype': {'id': issue_type},
|
||||
'assignee': {"id": assignee},
|
||||
"labels": ["OpenReplay"]
|
||||
}
|
||||
return self._client.create_issue(data)
|
||||
|
||||
def get_by_ids(self, saved_issues):
|
||||
projects_map = {}
|
||||
for i in saved_issues:
|
||||
if i["integrationProjectId"] not in projects_map.keys():
|
||||
projects_map[i["integrationProjectId"]] = []
|
||||
projects_map[i["integrationProjectId"]].append(i["id"])
|
||||
|
||||
results = []
|
||||
for integration_project_id in projects_map:
|
||||
self._client.set_jira_project_id(integration_project_id)
|
||||
jql = 'labels = OpenReplay'
|
||||
if len(projects_map[integration_project_id]) > 0:
|
||||
jql += f" AND ID IN ({','.join(projects_map[integration_project_id])})"
|
||||
issues = self._client.get_issues(jql, offset=0)
|
||||
results += issues
|
||||
return {"issues": results}
|
||||
|
||||
def get(self, integration_project_id, assignment_id):
|
||||
self._client.set_jira_project_id(integration_project_id)
|
||||
return self._client.get_issue_v3(assignment_id)
|
||||
|
||||
def comment(self, integration_project_id, assignment_id, comment):
|
||||
self._client.set_jira_project_id(integration_project_id)
|
||||
return self._client.add_comment_v3(assignment_id, comment)
|
||||
|
||||
def get_metas(self, integration_project_id):
|
||||
meta = {}
|
||||
self._client.set_jira_project_id(integration_project_id)
|
||||
meta['issueTypes'] = self._client.get_issue_types()
|
||||
meta['users'] = self._client.get_assignable_users()
|
||||
return {"provider": self.provider.lower(), **meta}
|
||||
|
||||
def get_projects(self):
|
||||
return self._client.get_projects()
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue