Compare commits
10 commits
main
...
tracker-11
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
08084ec0bc | ||
|
|
ca7dd4ae46 | ||
|
|
972b15590b | ||
|
|
58d7ac18c9 | ||
|
|
3bfdd9939b | ||
|
|
8f8268d0de | ||
|
|
8581b6e0e9 | ||
|
|
157a364313 | ||
|
|
f386ddea11 | ||
|
|
777e6c8aba |
4789 changed files with 154907 additions and 227901 deletions
74
.github/composite-actions/update-keys/action.yml
vendored
74
.github/composite-actions/update-keys/action.yml
vendored
|
|
@ -1,74 +0,0 @@
|
|||
name: 'Update Keys'
|
||||
description: 'Updates keys'
|
||||
inputs:
|
||||
domain_name:
|
||||
required: true
|
||||
description: 'Domain Name'
|
||||
license_key:
|
||||
required: true
|
||||
description: 'License Key'
|
||||
jwt_secret:
|
||||
required: true
|
||||
description: 'JWT Secret'
|
||||
jwt_spot_secret:
|
||||
required: true
|
||||
description: 'JWT spot Secret'
|
||||
minio_access_key:
|
||||
required: true
|
||||
description: 'MinIO Access Key'
|
||||
minio_secret_key:
|
||||
required: true
|
||||
description: 'MinIO Secret Key'
|
||||
pg_password:
|
||||
required: true
|
||||
description: 'PostgreSQL Password'
|
||||
registry_url:
|
||||
required: true
|
||||
description: 'Registry URL'
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Downloading yq
|
||||
run: |
|
||||
VERSION="v4.42.1"
|
||||
sudo wget https://github.com/mikefarah/yq/releases/download/${VERSION}/yq_linux_amd64 -O /usr/bin/yq
|
||||
sudo chmod +x /usr/bin/yq
|
||||
shell: bash
|
||||
|
||||
- name: "Updating OSS secrets"
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
vars=(
|
||||
"ASSIST_JWT_SECRET:.global.assistJWTSecret"
|
||||
"ASSIST_KEY:.global.assistKey"
|
||||
"DOMAIN_NAME:.global.domainName"
|
||||
"JWT_REFRESH_SECRET:.chalice.env.JWT_REFRESH_SECRET"
|
||||
"JWT_SECRET:.global.jwtSecret"
|
||||
"JWT_SPOT_REFRESH_SECRET:.chalice.env.JWT_SPOT_REFRESH_SECRET"
|
||||
"JWT_SPOT_SECRET:.global.jwtSpotSecret"
|
||||
"LICENSE_KEY:.global.enterpriseEditionLicense"
|
||||
"MINIO_ACCESS_KEY:.global.s3.accessKey"
|
||||
"MINIO_SECRET_KEY:.global.s3.secretKey"
|
||||
"PG_PASSWORD:.postgresql.postgresqlPassword"
|
||||
"REGISTRY_URL:.global.openReplayContainerRegistry"
|
||||
)
|
||||
for var in "${vars[@]}"; do
|
||||
IFS=":" read -r env_var yq_path <<<"$var"
|
||||
yq e -i "${yq_path} = strenv(${env_var})" vars.yaml
|
||||
done
|
||||
shell: bash
|
||||
env:
|
||||
ASSIST_JWT_SECRET: ${{ inputs.assist_jwt_secret }}
|
||||
ASSIST_KEY: ${{ inputs.assist_key }}
|
||||
DOMAIN_NAME: ${{ inputs.domain_name }}
|
||||
JWT_REFRESH_SECRET: ${{ inputs.jwt_refresh_secret }}
|
||||
JWT_SECRET: ${{ inputs.jwt_secret }}
|
||||
JWT_SPOT_REFRESH_SECRET: ${{inputs.jwt_spot_refresh_secret}}
|
||||
JWT_SPOT_SECRET: ${{ inputs.jwt_spot_secret }}
|
||||
LICENSE_KEY: ${{ inputs.license_key }}
|
||||
MINIO_ACCESS_KEY: ${{ inputs.minio_access_key }}
|
||||
MINIO_SECRET_KEY: ${{ inputs.minio_secret_key }}
|
||||
PG_PASSWORD: ${{ inputs.pg_password }}
|
||||
REGISTRY_URL: ${{ inputs.registry_url }}
|
||||
|
||||
69
.github/workflows/alerts-ee.yaml
vendored
69
.github/workflows/alerts-ee.yaml
vendored
|
|
@ -3,9 +3,9 @@ on:
|
|||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_security_checks:
|
||||
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
|
||||
description: 'Skip Security checks if there is a unfixable vuln or error. Value: true/false'
|
||||
required: false
|
||||
default: "false"
|
||||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
|
|
@ -26,6 +26,7 @@ on:
|
|||
- "!ee/api/requirements.txt"
|
||||
- "!ee/api/requirements-crons.txt"
|
||||
|
||||
|
||||
name: Build and Deploy Alerts EE
|
||||
|
||||
jobs:
|
||||
|
|
@ -41,25 +42,9 @@ jobs:
|
|||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.EE_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
|
|
@ -72,6 +57,7 @@ jobs:
|
|||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
|
||||
- name: Building and Pushing api image
|
||||
id: build-image
|
||||
env:
|
||||
|
|
@ -83,10 +69,10 @@ jobs:
|
|||
cd api
|
||||
PUSH_IMAGE=0 bash -x ./build_alerts.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("alerts")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
|
|
@ -106,9 +92,9 @@ jobs:
|
|||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
|
|
@ -123,16 +109,24 @@ jobs:
|
|||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/alerts/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,alerts,quickwit,connector} /tmp/charts/
|
||||
mv openreplay/charts/{ingress-nginx,alerts,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
mv /tmp/{ingress-nginx,alerts,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
|
|
@ -149,14 +143,13 @@ jobs:
|
|||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: "Build failed :bomb:"
|
||||
SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# ENVIRONMENT: staging
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# limit-access-to-actor: true
|
||||
|
|
|
|||
54
.github/workflows/alerts.yaml
vendored
54
.github/workflows/alerts.yaml
vendored
|
|
@ -3,9 +3,9 @@ on:
|
|||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_security_checks:
|
||||
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
|
||||
description: 'Skip Security checks if there is a unfixable vuln or error. Value: true/false'
|
||||
required: false
|
||||
default: "false"
|
||||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
|
|
@ -34,25 +34,9 @@ jobs:
|
|||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
|
|
@ -65,6 +49,7 @@ jobs:
|
|||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
|
||||
- name: Building and Pushing Alerts image
|
||||
id: build-image
|
||||
env:
|
||||
|
|
@ -76,10 +61,10 @@ jobs:
|
|||
cd api
|
||||
PUSH_IMAGE=0 bash -x ./build_alerts.sh
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("alerts")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
|
|
@ -99,9 +84,9 @@ jobs:
|
|||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
|
|
@ -115,7 +100,7 @@ jobs:
|
|||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
|
||||
|
|
@ -123,16 +108,15 @@ jobs:
|
|||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/alerts/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,alerts,quickwit,connector} /tmp/charts/
|
||||
mv openreplay/charts/{ingress-nginx,alerts,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
mv /tmp/{ingress-nginx,alerts,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
|
|
@ -148,13 +132,13 @@ jobs:
|
|||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: "Build failed :bomb:"
|
||||
SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# limit-access-to-actor: true
|
||||
|
||||
|
|
|
|||
209
.github/workflows/api-ee.yaml
vendored
209
.github/workflows/api-ee.yaml
vendored
|
|
@ -3,13 +3,15 @@ on:
|
|||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_security_checks:
|
||||
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
|
||||
description: 'Skip Security checks if there is a unfixable vuln or error. Value: true/false'
|
||||
required: false
|
||||
default: "false"
|
||||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
- v1.11.0-patch
|
||||
- actions_test
|
||||
paths:
|
||||
- "ee/api/**"
|
||||
- "api/**"
|
||||
|
|
@ -23,6 +25,7 @@ on:
|
|||
- "!ee/api/*-dev.sh"
|
||||
- "!ee/api/requirements-*.txt"
|
||||
|
||||
|
||||
name: Build and Deploy Chalice EE
|
||||
|
||||
jobs:
|
||||
|
|
@ -31,129 +34,121 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.EE_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
- name: Building and Pushing api image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd api
|
||||
PUSH_IMAGE=0 bash -x ./build.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("chalice")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
- name: Building and Pushing api image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd api
|
||||
PUSH_IMAGE=0 bash -x ./build.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("chalice")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("chalice")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||
EOF
|
||||
done
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||
EOF
|
||||
done
|
||||
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/chalice/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,chalice,quickwit,connector} /tmp/charts/
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
# Update changed image tag
|
||||
sed -i "/chalice/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: ee
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: "Build failed :bomb:"
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,chalice,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,chalice,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: ee
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# limit-access-to-actor: true
|
||||
|
||||
|
|
|
|||
213
.github/workflows/api.yaml
vendored
213
.github/workflows/api.yaml
vendored
|
|
@ -3,13 +3,14 @@ on:
|
|||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_security_checks:
|
||||
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
|
||||
description: 'Skip Security checks if there is a unfixable vuln or error. Value: true/false'
|
||||
required: false
|
||||
default: "false"
|
||||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
- v1.11.0-patch
|
||||
paths:
|
||||
- "api/**"
|
||||
- "!api/.gitignore"
|
||||
|
|
@ -25,126 +26,118 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
- name: Building and Pushing api image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd api
|
||||
PUSH_IMAGE=0 bash -x ./build.sh
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("chalice")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
- name: Building and Pushing api image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd api
|
||||
PUSH_IMAGE=0 bash -x ./build.sh
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("chalice")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("chalice")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
tag: ${image_array[1]}
|
||||
EOF
|
||||
done
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
tag: ${image_array[1]}
|
||||
EOF
|
||||
done
|
||||
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/chalice/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,chalice,quickwit,connector} /tmp/charts/
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
# Update changed image tag
|
||||
sed -i "/chalice/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: foss
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: "Build failed :bomb:"
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,chalice,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,chalice,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: foss
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}
|
||||
# ENVIRONMENT: staging
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# limit-access-to-actor: true
|
||||
|
|
|
|||
182
.github/workflows/assist-ee.yaml
vendored
182
.github/workflows/assist-ee.yaml
vendored
|
|
@ -3,12 +3,13 @@ on:
|
|||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_security_checks:
|
||||
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
|
||||
description: 'Skip Security checks if there is a unfixable vuln or error. Value: true/false'
|
||||
required: false
|
||||
default: "false"
|
||||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
paths:
|
||||
- "ee/assist/**"
|
||||
- "assist/**"
|
||||
|
|
@ -23,112 +24,103 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.EE_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- name: Building and Pushing Assist image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd assist
|
||||
PUSH_IMAGE=0 bash -x ./build.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("assist")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
- name: Building and Pushing Assist image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd assist
|
||||
PUSH_IMAGE=0 bash -x ./build.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("assist")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("assist")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||
EOF
|
||||
done
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||
EOF
|
||||
done
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/assist/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,assist,quickwit,connector} /tmp/charts/
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
# Update changed image tag
|
||||
sed -i "/assist/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,assist,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,assist,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# IMAGE_TAG: ${{ github.sha }}
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# iimit-access-to-actor: true
|
||||
#
|
||||
|
|
|
|||
122
.github/workflows/assist-server-ee.yaml
vendored
122
.github/workflows/assist-server-ee.yaml
vendored
|
|
@ -1,122 +0,0 @@
|
|||
# This action will push the assist changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_security_checks:
|
||||
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
|
||||
required: false
|
||||
default: "false"
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
paths:
|
||||
- "ee/assist-server/**"
|
||||
|
||||
name: Build and Deploy Assist-Server EE
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.EE_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- name: Building and Pushing Assist-Server image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd assist-server
|
||||
PUSH_IMAGE=0 bash -x ./build.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("assist-server")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("assist-server")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||
EOF
|
||||
done
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
pwd
|
||||
cd scripts/helmcharts/
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/assist-server/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,assist-server,quickwit,connector} /tmp/charts/
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
218
.github/workflows/assist-stats.yaml
vendored
218
.github/workflows/assist-stats.yaml
vendored
|
|
@ -3,9 +3,9 @@ on:
|
|||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_security_checks:
|
||||
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
|
||||
description: 'Skip Security checks if there is a unfixable vuln or error. Value: true/false'
|
||||
required: false
|
||||
default: "false"
|
||||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
|
|
@ -15,7 +15,7 @@ on:
|
|||
- "!assist-stats/*-dev.sh"
|
||||
- "!assist-stats/requirements-*.txt"
|
||||
|
||||
name: Build and Deploy Assist Stats ee
|
||||
name: Build and Deploy Assist Stats
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
|
|
@ -23,140 +23,118 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
- name: Building and Pushing assist-stats image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd assist-stats
|
||||
PUSH_IMAGE=0 bash -x ./build.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("assist-stats")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
- name: Building and Pushing assist-stats image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd assist-stats
|
||||
PUSH_IMAGE=0 bash -x ./build.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("assist-stats")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("assist-stats")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
|
||||
### Enterprise code deployment
|
||||
### Enterprise code deployment
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontextee
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontextee
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.EE_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
- name: Resetting vars file
|
||||
run: |
|
||||
git checkout -- scripts/helmcharts/vars.yaml
|
||||
- name: Deploy to kubernetes ee
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
cat <<EOF>/tmp/image_override.yaml
|
||||
assist-stats:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: ${IMAGE_TAG}
|
||||
EOF
|
||||
|
||||
- name: Deploy to kubernetes ee
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
cat <<EOF>/tmp/image_override.yaml
|
||||
assist-stats:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: ${IMAGE_TAG}
|
||||
EOF
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml
|
||||
|
||||
export IMAGE_TAG=${IMAGE_TAG}
|
||||
# Update changed image tag
|
||||
yq '.utilities.apiCrons.assiststats.image.tag = strenv(IMAGE_TAG)' -i /tmp/image_override.yaml
|
||||
# Update changed image tag
|
||||
sed -i "/assist-stats/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,assist-stats,quickwit,connector} /tmp/charts/
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,assist-stats,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,assist-stats,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: foss
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: "Build failed :bomb:"
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: foss
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# limit-access-to-actor: true
|
||||
|
||||
|
|
|
|||
184
.github/workflows/assist.yaml
vendored
184
.github/workflows/assist.yaml
vendored
|
|
@ -3,12 +3,13 @@ on:
|
|||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_security_checks:
|
||||
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
|
||||
description: 'Skip Security checks if there is a unfixable vuln or error. Value: true/false'
|
||||
required: false
|
||||
default: "false"
|
||||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
paths:
|
||||
- "assist/**"
|
||||
- "!assist/.gitignore"
|
||||
|
|
@ -22,112 +23,103 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- name: Building and Pushing Assist image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd assist
|
||||
PUSH_IMAGE=0 bash -x ./build.sh
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("assist")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
- name: Building and Pushing Assist image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd assist
|
||||
PUSH_IMAGE=0 bash -x ./build.sh
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("assist")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("assist")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||
EOF
|
||||
done
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||
EOF
|
||||
done
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/assist/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.OSS_LICENSE_KEY }}\"/g" vars.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,assist,quickwit,connector} /tmp/charts/
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
# Update changed image tag
|
||||
sed -i "/assist/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,assist,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,assist,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# iimit-access-to-actor: true
|
||||
#
|
||||
|
|
|
|||
210
.github/workflows/crons-ee.yaml
vendored
210
.github/workflows/crons-ee.yaml
vendored
|
|
@ -3,13 +3,14 @@ on:
|
|||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_security_checks:
|
||||
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
|
||||
description: 'Skip Security checks if there is a unfixable vuln or error. Value: true/false'
|
||||
required: false
|
||||
default: "false"
|
||||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
- v1.11.0-patch
|
||||
paths:
|
||||
- "ee/api/**"
|
||||
- "api/**"
|
||||
|
|
@ -34,126 +35,121 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.EE_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
- name: Building and Pushing api image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd api
|
||||
PUSH_IMAGE=0 bash -x ./build_crons.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("crons")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
- name: Building and Pushing api image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd api
|
||||
PUSH_IMAGE=0 bash -x ./build_crons.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("crons")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
env:
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
cat <<EOF>/tmp/image_override.yaml
|
||||
image: &image
|
||||
tag: "${IMAGE_TAG}"
|
||||
utilities:
|
||||
apiCrons:
|
||||
assiststats:
|
||||
image: *image
|
||||
report:
|
||||
image: *image
|
||||
sessionsCleaner:
|
||||
image: *image
|
||||
projectsStats:
|
||||
image: *image
|
||||
fixProjectsStats:
|
||||
image: *image
|
||||
EOF
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("crons")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,utilities,quickwit,connector} /tmp/charts/
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
ENVIRONMENT: staging
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||
EOF
|
||||
done
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: ee
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: "Build failed :bomb:"
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/crons/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,utilities,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,utilities,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: ee
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# iimit-access-to-actor: true
|
||||
#
|
||||
|
|
|
|||
57
.github/workflows/db-migrate.yaml
vendored
57
.github/workflows/db-migrate.yaml
vendored
|
|
@ -59,22 +59,17 @@ jobs:
|
|||
EOF
|
||||
done
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
|
||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
|
||||
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
|
||||
- name: Deploy to kubernetes foss
|
||||
if: ${{ steps.check-migration.outputs.skip_migration_oss != 'true' }}
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
|
|
@ -120,21 +115,22 @@ jobs:
|
|||
EOF
|
||||
done
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
|
||||
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.EE_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
|
||||
- name: Resetting vars file
|
||||
run: |
|
||||
git checkout -- scripts/helmcharts/vars.yaml
|
||||
- name: Deploy to kubernetes ee
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
## Update secerts
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
helm upgrade --install openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --atomic --set forceMigration=true --set dbMigrationUpstreamBranch=${IMAGE_TAG}
|
||||
|
|
@ -144,13 +140,12 @@ jobs:
|
|||
IMAGE_TAG: ${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# limit-access-to-actor: true
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
# AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
# AWS_REGION: eu-central-1
|
||||
# AWS_S3_BUCKET_NAME: ${{ secrets.AWS_S3_BUCKET_NAME }}
|
||||
|
||||
|
|
|
|||
130
.github/workflows/frontend-dev.yaml
vendored
130
.github/workflows/frontend-dev.yaml
vendored
|
|
@ -1,7 +1,7 @@
|
|||
name: Frontend Dev Deployment
|
||||
name: Frontend Dev Deployment
|
||||
on: workflow_dispatch
|
||||
# Disable previous workflows for this action.
|
||||
concurrency:
|
||||
concurrency:
|
||||
group: ${{ github.workflow }} #-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
|
|
@ -9,77 +9,73 @@ jobs:
|
|||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Cache node modules
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: node_modules
|
||||
key: ${{ runner.OS }}-build-${{ hashFiles('**/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.OS }}-build-
|
||||
${{ runner.OS }}-
|
||||
- name: Cache node modules
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: node_modules
|
||||
key: ${{ runner.OS }}-build-${{ hashFiles('**/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.OS }}-build-
|
||||
${{ runner.OS }}-
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
domain_name: ${{ secrets.DEV_DOMAIN_NAME }}
|
||||
license_key: ${{ secrets.DEV_LICENSE_KEY }}
|
||||
jwt_secret: ${{ secrets.DEV_JWT_SECRET }}
|
||||
minio_access_key: ${{ secrets.DEV_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.DEV_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.DEV_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.DEV_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.DEV_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
- name: Building and Pushing frontend image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
set -x
|
||||
cd frontend
|
||||
mv .env.sample .env
|
||||
docker run --rm -v /etc/passwd:/etc/passwd -u `id -u`:`id -g` -v $(pwd):/home/${USER} -w /home/${USER} --name node_build node:14-stretch-slim /bin/bash -c "yarn && yarn build"
|
||||
# https://github.com/docker/cli/issues/1134#issuecomment-613516912
|
||||
DOCKER_BUILDKIT=1 docker build --target=cicd -t $DOCKER_REPO/frontend:${IMAGE_TAG} .
|
||||
docker tag $DOCKER_REPO/frontend:${IMAGE_TAG} $DOCKER_REPO/frontend:${IMAGE_TAG}-ee
|
||||
docker push $DOCKER_REPO/frontend:${IMAGE_TAG}
|
||||
docker push $DOCKER_REPO/frontend:${IMAGE_TAG}-ee
|
||||
|
||||
- name: Building and Pushing frontend image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
set -x
|
||||
cd frontend
|
||||
mv .env.sample .env
|
||||
docker run --rm -v /etc/passwd:/etc/passwd -u `id -u`:`id -g` -v $(pwd):/home/${USER} -w /home/${USER} --name node_build node:20-slim /bin/bash -c "yarn && yarn build"
|
||||
# https://github.com/docker/cli/issues/1134#issuecomment-613516912
|
||||
DOCKER_BUILDKIT=1 docker build --target=cicd -t $DOCKER_REPO/frontend:${IMAGE_TAG} .
|
||||
docker tag $DOCKER_REPO/frontend:${IMAGE_TAG} $DOCKER_REPO/frontend:${IMAGE_TAG}-ee
|
||||
docker push $DOCKER_REPO/frontend:${IMAGE_TAG}
|
||||
docker push $DOCKER_REPO/frontend:${IMAGE_TAG}-ee
|
||||
- name: Deploy to kubernetes foss
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
- name: Deploy to kubernetes foss
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
set -x
|
||||
cat <<EOF>>/tmp/image_override.yaml
|
||||
frontend:
|
||||
image:
|
||||
tag: ${IMAGE_TAG}
|
||||
EOF
|
||||
|
||||
set -x
|
||||
cat <<EOF>>/tmp/image_override.yaml
|
||||
frontend:
|
||||
image:
|
||||
tag: ${IMAGE_TAG}
|
||||
EOF
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.DEV_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.DEV_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.DEV_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.DEV_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.DEV_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/frontend/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
# Update changed image tag
|
||||
sed -i "/frontend/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,frontend,quickwit,connector} /tmp/charts/
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
iMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,frontend,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,frontend,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
|
|
|
|||
234
.github/workflows/frontend.yaml
vendored
234
.github/workflows/frontend.yaml
vendored
|
|
@ -1,4 +1,4 @@
|
|||
name: Frontend Foss Deployment
|
||||
name: Frontend Foss Deployment
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
|
|
@ -7,7 +7,7 @@ on:
|
|||
paths:
|
||||
- frontend/**
|
||||
# Disable previous workflows for this action.
|
||||
concurrency:
|
||||
concurrency:
|
||||
group: ${{ github.workflow }} #-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
|
|
@ -15,145 +15,129 @@ jobs:
|
|||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Cache node modules
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
/home/runner/work/openreplay/openreplay/frontend/node_modules
|
||||
/home/runner/work/openreplay/openreplay/frontend/.yarn
|
||||
key: ${{ runner.OS }}-build-${{ hashFiles('frontend/yarn.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.OS }}-build-
|
||||
${{ runner.OS }}-
|
||||
- name: Cache node modules
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: node_modules
|
||||
key: ${{ runner.OS }}-build-${{ hashFiles('**/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.OS }}-build-
|
||||
${{ runner.OS }}-
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
- name: Building and Pushing frontend image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
set -x
|
||||
cd frontend
|
||||
mv .env.sample .env
|
||||
docker run --rm -v /etc/passwd:/etc/passwd -u `id -u`:`id -g` -v $(pwd):/home/${USER} -w /home/${USER} --name node_build node:14-stretch-slim /bin/bash -c "yarn && yarn build"
|
||||
# https://github.com/docker/cli/issues/1134#issuecomment-613516912
|
||||
DOCKER_BUILDKIT=1 docker build --target=cicd -t $DOCKER_REPO/frontend:${IMAGE_TAG} .
|
||||
docker tag $DOCKER_REPO/frontend:${IMAGE_TAG} $DOCKER_REPO/frontend:${IMAGE_TAG}-ee
|
||||
docker push $DOCKER_REPO/frontend:${IMAGE_TAG}
|
||||
docker push $DOCKER_REPO/frontend:${IMAGE_TAG}-ee
|
||||
|
||||
- name: Building and Pushing frontend image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
set -x
|
||||
cd frontend
|
||||
mv .env.sample .env
|
||||
docker run --rm -v /etc/passwd:/etc/passwd -u `id -u`:`id -g` -v $(pwd):/home/${USER} -w /home/${USER} --name node_build node:20-slim /bin/bash -c "yarn && yarn build"
|
||||
# https://github.com/docker/cli/issues/1134#issuecomment-613516912
|
||||
DOCKER_BUILDKIT=1 docker build --target=cicd -t $DOCKER_REPO/frontend:${IMAGE_TAG} .
|
||||
docker tag $DOCKER_REPO/frontend:${IMAGE_TAG} $DOCKER_REPO/frontend:${IMAGE_TAG}-ee
|
||||
docker push $DOCKER_REPO/frontend:${IMAGE_TAG}
|
||||
docker push $DOCKER_REPO/frontend:${IMAGE_TAG}-ee
|
||||
- name: Deploy to kubernetes foss
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
- name: Deploy to kubernetes foss
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
set -x
|
||||
cat <<EOF>>/tmp/image_override.yaml
|
||||
frontend:
|
||||
image:
|
||||
tag: ${IMAGE_TAG}
|
||||
EOF
|
||||
|
||||
set -x
|
||||
cat <<EOF>>/tmp/image_override.yaml
|
||||
frontend:
|
||||
image:
|
||||
tag: ${IMAGE_TAG}
|
||||
EOF
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/frontend/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
# Update changed image tag
|
||||
sed -i "/frontend/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,frontend,quickwit,connector} /tmp/charts/
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,frontend,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,frontend,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
### Enterprise code deployment
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontextee
|
||||
### Enterprise code deployment
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.EE_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontextee
|
||||
|
||||
- name: Deploy to kubernetes ee
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
cat <<EOF>/tmp/image_override.yaml
|
||||
frontend:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: ${IMAGE_TAG}
|
||||
EOF
|
||||
- name: Resetting vars file
|
||||
run: |
|
||||
git checkout -- scripts/helmcharts/vars.yaml
|
||||
- name: Deploy to kubernetes ee
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
cat <<EOF>/tmp/image_override.yaml
|
||||
frontend:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: ${IMAGE_TAG}
|
||||
EOF
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/frontend/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,frontend,quickwit,connector} /tmp/charts/
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
# Update changed image tag
|
||||
sed -i "/frontend/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,frontend,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,frontend,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# iimit-access-to-actor: true
|
||||
# AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
# AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
# AWS_REGION: eu-central-1
|
||||
# AWS_S3_BUCKET_NAME: ${{ secrets.AWS_S3_BUCKET_NAME }}
|
||||
|
|
|
|||
189
.github/workflows/patch-build-old.yaml
vendored
189
.github/workflows/patch-build-old.yaml
vendored
|
|
@ -1,189 +0,0 @@
|
|||
# Ref: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
services:
|
||||
description: 'Comma separated names of services to build(in small letters).'
|
||||
required: true
|
||||
default: 'chalice,frontend'
|
||||
tag:
|
||||
description: 'Tag to update.'
|
||||
required: true
|
||||
type: string
|
||||
branch:
|
||||
description: 'Branch to build patches from. Make sure the branch is uptodate with tag. Else itll cause missing commits.'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
name: Build patches from tag, rewrite commit HEAD to older timestamp, and Push the tag
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Build Patch from old tag
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
|
||||
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 4
|
||||
ref: ${{ github.event.inputs.tag }}
|
||||
|
||||
- name: Set Remote with GITHUB_TOKEN
|
||||
run: |
|
||||
git config --unset http.https://github.com/.extraheader
|
||||
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}.git
|
||||
|
||||
- name: Create backup tag with timestamp
|
||||
run: |
|
||||
set -e # Exit immediately if a command exits with a non-zero status
|
||||
TIMESTAMP=$(date +%Y%m%d%H%M%S)
|
||||
BACKUP_TAG="${{ github.event.inputs.tag }}-backup-${TIMESTAMP}"
|
||||
echo "BACKUP_TAG=${BACKUP_TAG}" >> $GITHUB_ENV
|
||||
echo "INPUT_TAG=${{ github.event.inputs.tag }}" >> $GITHUB_ENV
|
||||
git tag $BACKUP_TAG || { echo "Failed to create backup tag"; exit 1; }
|
||||
git push origin $BACKUP_TAG || { echo "Failed to push backup tag"; exit 1; }
|
||||
echo "Created backup tag: $BACKUP_TAG"
|
||||
|
||||
# Get the oldest commit date from the last 3 commits in raw format
|
||||
OLDEST_COMMIT_TIMESTAMP=$(git log -3 --pretty=format:"%at" | tail -1)
|
||||
echo "Oldest commit timestamp: $OLDEST_COMMIT_TIMESTAMP"
|
||||
# Add 1 second to the timestamp
|
||||
NEW_TIMESTAMP=$((OLDEST_COMMIT_TIMESTAMP + 1))
|
||||
echo "NEW_TIMESTAMP=$NEW_TIMESTAMP" >> $GITHUB_ENV
|
||||
|
||||
|
||||
- name: Setup yq
|
||||
uses: mikefarah/yq@master
|
||||
|
||||
# Configure AWS credentials for the first registry
|
||||
- name: Configure AWS credentials for RELEASE_ARM_REGISTRY
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_DEPOT_ACCESS_KEY }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_DEPOT_SECRET_KEY }}
|
||||
aws-region: ${{ secrets.AWS_DEPOT_DEFAULT_REGION }}
|
||||
|
||||
- name: Login to Amazon ECR for RELEASE_ARM_REGISTRY
|
||||
id: login-ecr-arm
|
||||
run: |
|
||||
aws ecr get-login-password --region ${{ secrets.AWS_DEPOT_DEFAULT_REGION }} | docker login --username AWS --password-stdin ${{ secrets.RELEASE_ARM_REGISTRY }}
|
||||
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ secrets.RELEASE_OSS_REGISTRY }}
|
||||
|
||||
- uses: depot/setup-action@v1
|
||||
- name: Get HEAD Commit ID
|
||||
run: echo "HEAD_COMMIT_ID=$(git rev-parse HEAD)" >> $GITHUB_ENV
|
||||
- name: Define Branch Name
|
||||
run: echo "BRANCH_NAME=${{inputs.branch}}" >> $GITHUB_ENV
|
||||
|
||||
- name: Build
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO_ARM: ${{ secrets.RELEASE_ARM_REGISTRY }}
|
||||
DOCKER_REPO_OSS: ${{ secrets.RELEASE_OSS_REGISTRY }}
|
||||
MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
|
||||
MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
|
||||
MSAAS_REPO_FOLDER: /tmp/msaas
|
||||
run: |
|
||||
set -exo pipefail
|
||||
git config --local user.email "action@github.com"
|
||||
git config --local user.name "GitHub Action"
|
||||
git checkout -b $BRANCH_NAME
|
||||
working_dir=$(pwd)
|
||||
function image_version(){
|
||||
local service=$1
|
||||
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
|
||||
current_version=$(yq eval '.AppVersion' $chart_path)
|
||||
new_version=$(echo $current_version | awk -F. '{$NF += 1 ; print $1"."$2"."$3}')
|
||||
echo $new_version
|
||||
# yq eval ".AppVersion = \"$new_version\"" -i $chart_path
|
||||
}
|
||||
function clone_msaas() {
|
||||
[ -d $MSAAS_REPO_FOLDER ] || {
|
||||
git clone -b $INPUT_TAG --recursive https://x-access-token:$MSAAS_REPO_CLONE_TOKEN@$MSAAS_REPO_URL $MSAAS_REPO_FOLDER
|
||||
cd $MSAAS_REPO_FOLDER
|
||||
cd openreplay && git fetch origin && git checkout $INPUT_TAG
|
||||
git log -1
|
||||
cd $MSAAS_REPO_FOLDER
|
||||
bash git-init.sh
|
||||
git checkout
|
||||
}
|
||||
}
|
||||
function build_managed() {
|
||||
local service=$1
|
||||
local version=$2
|
||||
echo building managed
|
||||
clone_msaas
|
||||
if [[ $service == 'chalice' ]]; then
|
||||
cd $MSAAS_REPO_FOLDER/openreplay/api
|
||||
else
|
||||
cd $MSAAS_REPO_FOLDER/openreplay/$service
|
||||
fi
|
||||
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh >> /tmp/arm.txt
|
||||
}
|
||||
# Checking for backend images
|
||||
ls backend/cmd >> /tmp/backend.txt
|
||||
echo Services: "${{ github.event.inputs.services }}"
|
||||
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
||||
BUILD_SCRIPT_NAME="build.sh"
|
||||
# Build FOSS
|
||||
for SERVICE in "${SERVICES[@]}"; do
|
||||
# Check if service is backend
|
||||
if grep -q $SERVICE /tmp/backend.txt; then
|
||||
cd backend
|
||||
foss_build_args="nil $SERVICE"
|
||||
ee_build_args="ee $SERVICE"
|
||||
else
|
||||
[[ $SERVICE == 'chalice' || $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && cd $working_dir/api || cd $SERVICE
|
||||
[[ $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && BUILD_SCRIPT_NAME="build_${SERVICE}.sh"
|
||||
ee_build_args="ee"
|
||||
fi
|
||||
version=$(image_version $SERVICE)
|
||||
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
echo IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||
IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||
if [[ "$SERVICE" != "chalice" && "$SERVICE" != "frontend" ]]; then
|
||||
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
else
|
||||
build_managed $SERVICE $version
|
||||
fi
|
||||
cd $working_dir
|
||||
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$SERVICE/Chart.yaml"
|
||||
yq eval ".AppVersion = \"$version\"" -i $chart_path
|
||||
git add $chart_path
|
||||
git commit -m "Increment $SERVICE chart version"
|
||||
done
|
||||
|
||||
- name: Change commit timestamp
|
||||
run: |
|
||||
# Convert the timestamp to a date format git can understand
|
||||
NEW_DATE=$(perl -le 'print scalar gmtime($ARGV[0])." +0000"' $NEW_TIMESTAMP)
|
||||
echo "Setting commit date to: $NEW_DATE"
|
||||
|
||||
# Amend the commit with the new date
|
||||
GIT_COMMITTER_DATE="$NEW_DATE" git commit --amend --no-edit --date="$NEW_DATE"
|
||||
|
||||
# Verify the change
|
||||
git log -1 --pretty=format:"Commit now dated: %cD"
|
||||
|
||||
# git tag and push
|
||||
git tag $INPUT_TAG -f
|
||||
git push origin $INPUT_TAG -f
|
||||
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO_ARM: ${{ secrets.RELEASE_ARM_REGISTRY }}
|
||||
# DOCKER_REPO_OSS: ${{ secrets.RELEASE_OSS_REGISTRY }}
|
||||
# MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
|
||||
# MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
|
||||
# MSAAS_REPO_FOLDER: /tmp/msaas
|
||||
# with:
|
||||
# limit-access-to-actor: true
|
||||
261
.github/workflows/patch-build.yaml
vendored
261
.github/workflows/patch-build.yaml
vendored
|
|
@ -1,261 +0,0 @@
|
|||
# Ref: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
services:
|
||||
description: 'Comma separated names of services to build(in small letters).'
|
||||
required: true
|
||||
default: 'chalice,frontend'
|
||||
|
||||
name: Build patches from main branch, Raise PR to Main, and Push to tag
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Build Patch from main
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
|
||||
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Rebase with main branch, to make sure the code has latest main changes
|
||||
if: github.ref != 'refs/heads/main'
|
||||
run: |
|
||||
git remote -v
|
||||
git config --global user.email "action@github.com"
|
||||
git config --global user.name "GitHub Action"
|
||||
git config --global rebase.autoStash true
|
||||
git fetch origin main:main
|
||||
git rebase main
|
||||
git log -3
|
||||
|
||||
- name: Downloading yq
|
||||
run: |
|
||||
VERSION="v4.42.1"
|
||||
sudo wget https://github.com/mikefarah/yq/releases/download/${VERSION}/yq_linux_amd64 -O /usr/bin/yq
|
||||
sudo chmod +x /usr/bin/yq
|
||||
|
||||
# Configure AWS credentials for the first registry
|
||||
- name: Configure AWS credentials for RELEASE_ARM_REGISTRY
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_DEPOT_ACCESS_KEY }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_DEPOT_SECRET_KEY }}
|
||||
aws-region: ${{ secrets.AWS_DEPOT_DEFAULT_REGION }}
|
||||
|
||||
- name: Login to Amazon ECR for RELEASE_ARM_REGISTRY
|
||||
id: login-ecr-arm
|
||||
run: |
|
||||
aws ecr get-login-password --region ${{ secrets.AWS_DEPOT_DEFAULT_REGION }} | docker login --username AWS --password-stdin ${{ secrets.RELEASE_ARM_REGISTRY }}
|
||||
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ secrets.RELEASE_OSS_REGISTRY }}
|
||||
|
||||
- uses: depot/setup-action@v1
|
||||
env:
|
||||
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
|
||||
- name: Get HEAD Commit ID
|
||||
run: echo "HEAD_COMMIT_ID=$(git rev-parse HEAD)" >> $GITHUB_ENV
|
||||
- name: Define Branch Name
|
||||
run: echo "BRANCH_NAME=patch/main/${HEAD_COMMIT_ID}" >> $GITHUB_ENV
|
||||
- name: Set Remote with GITHUB_TOKEN
|
||||
run: |
|
||||
git config --unset http.https://github.com/.extraheader
|
||||
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}.git
|
||||
|
||||
- name: Build
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO_ARM: ${{ secrets.RELEASE_ARM_REGISTRY }}
|
||||
DOCKER_REPO_OSS: ${{ secrets.RELEASE_OSS_REGISTRY }}
|
||||
MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
|
||||
MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
|
||||
MSAAS_REPO_FOLDER: /tmp/msaas
|
||||
SERVICES_INPUT: ${{ github.event.inputs.services }}
|
||||
run: |
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
# Configuration
|
||||
readonly WORKING_DIR=$(pwd)
|
||||
readonly BUILD_SCRIPT_NAME="build.sh"
|
||||
readonly BACKEND_SERVICES_FILE="/tmp/backend.txt"
|
||||
|
||||
# Initialize git configuration
|
||||
setup_git() {
|
||||
git config --local user.email "action@github.com"
|
||||
git config --local user.name "GitHub Action"
|
||||
git checkout -b "$BRANCH_NAME"
|
||||
}
|
||||
|
||||
# Get and increment image version
|
||||
image_version() {
|
||||
local service=$1
|
||||
local chart_path="$WORKING_DIR/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
|
||||
local current_version new_version
|
||||
|
||||
current_version=$(yq eval '.AppVersion' "$chart_path")
|
||||
new_version=$(echo "$current_version" | awk -F. '{$NF += 1; print $1"."$2"."$3}')
|
||||
echo "$new_version"
|
||||
}
|
||||
|
||||
# Clone MSAAS repository if not exists
|
||||
clone_msaas() {
|
||||
if [[ ! -d "$MSAAS_REPO_FOLDER" ]]; then
|
||||
git clone -b dev --recursive "https://x-access-token:${MSAAS_REPO_CLONE_TOKEN}@${MSAAS_REPO_URL}" "$MSAAS_REPO_FOLDER"
|
||||
cd "$MSAAS_REPO_FOLDER"
|
||||
cd openreplay && git fetch origin && git checkout main
|
||||
git log -1
|
||||
cd "$MSAAS_REPO_FOLDER"
|
||||
bash git-init.sh
|
||||
git checkout
|
||||
fi
|
||||
}
|
||||
|
||||
# Build managed services
|
||||
build_managed() {
|
||||
local service=$1
|
||||
local version=$2
|
||||
|
||||
echo "Building managed service: $service"
|
||||
clone_msaas
|
||||
|
||||
if [[ $service == 'chalice' ]]; then
|
||||
cd "$MSAAS_REPO_FOLDER/openreplay/api"
|
||||
else
|
||||
cd "$MSAAS_REPO_FOLDER/openreplay/$service"
|
||||
fi
|
||||
|
||||
local build_cmd="IMAGE_TAG=$version DOCKER_RUNTIME=depot DOCKER_BUILD_ARGS=--push ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh"
|
||||
|
||||
echo "Executing: $build_cmd"
|
||||
if ! eval "$build_cmd" 2>&1; then
|
||||
echo "Build failed for $service"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Build service with given arguments
|
||||
build_service() {
|
||||
local service=$1
|
||||
local version=$2
|
||||
local build_args=$3
|
||||
local build_script=${4:-$BUILD_SCRIPT_NAME}
|
||||
|
||||
local command="IMAGE_TAG=$version DOCKER_RUNTIME=depot DOCKER_BUILD_ARGS=--push ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash $build_script $build_args"
|
||||
echo "Executing: $command"
|
||||
eval "$command"
|
||||
}
|
||||
|
||||
# Update chart version and commit changes
|
||||
update_chart_version() {
|
||||
local service=$1
|
||||
local version=$2
|
||||
local chart_path="$WORKING_DIR/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
|
||||
|
||||
# Ensure we're in the original working directory/repository
|
||||
cd "$WORKING_DIR"
|
||||
yq eval ".AppVersion = \"$version\"" -i "$chart_path"
|
||||
git add "$chart_path"
|
||||
git commit -m "Increment $service chart version to $version"
|
||||
git push --set-upstream origin "$BRANCH_NAME"
|
||||
cd -
|
||||
}
|
||||
|
||||
# Main execution
|
||||
main() {
|
||||
setup_git
|
||||
|
||||
# Get backend services list
|
||||
ls backend/cmd >"$BACKEND_SERVICES_FILE"
|
||||
|
||||
# Parse services input (fix for GitHub Actions syntax)
|
||||
echo "Services: ${SERVICES_INPUT:-$1}"
|
||||
IFS=',' read -ra services <<<"${SERVICES_INPUT:-$1}"
|
||||
|
||||
# Process each service
|
||||
for service in "${services[@]}"; do
|
||||
echo "Processing service: $service"
|
||||
cd "$WORKING_DIR"
|
||||
|
||||
local foss_build_args="" ee_build_args="" build_script="$BUILD_SCRIPT_NAME"
|
||||
|
||||
# Determine build configuration based on service type
|
||||
if grep -q "$service" "$BACKEND_SERVICES_FILE"; then
|
||||
# Backend service
|
||||
cd backend
|
||||
foss_build_args="nil $service"
|
||||
ee_build_args="ee $service"
|
||||
else
|
||||
# Non-backend service
|
||||
case "$service" in
|
||||
chalice | alerts | crons)
|
||||
cd "$WORKING_DIR/api"
|
||||
;;
|
||||
*)
|
||||
cd "$service"
|
||||
;;
|
||||
esac
|
||||
|
||||
# Special build scripts for alerts/crons
|
||||
if [[ $service == 'alerts' || $service == 'crons' ]]; then
|
||||
build_script="build_${service}.sh"
|
||||
fi
|
||||
|
||||
ee_build_args="ee"
|
||||
fi
|
||||
|
||||
# Get version and build
|
||||
local version
|
||||
version=$(image_version "$service")
|
||||
|
||||
# Build FOSS and EE versions
|
||||
build_service "$service" "$version" "$foss_build_args"
|
||||
build_service "$service" "${version}-ee" "$ee_build_args"
|
||||
|
||||
# Build managed version for specific services
|
||||
if [[ "$service" != "chalice" && "$service" != "frontend" ]]; then
|
||||
echo "Nothing to build in managed for service $service"
|
||||
else
|
||||
build_managed "$service" "$version"
|
||||
fi
|
||||
|
||||
# Update chart and commit
|
||||
update_chart_version "$service" "$version"
|
||||
done
|
||||
cd "$WORKING_DIR"
|
||||
|
||||
# Cleanup
|
||||
rm -f "$BACKEND_SERVICES_FILE"
|
||||
}
|
||||
|
||||
echo "Working directory: $WORKING_DIR"
|
||||
# Run main function with all arguments
|
||||
main "$SERVICES_INPUT"
|
||||
|
||||
|
||||
- name: Create Pull Request
|
||||
uses: repo-sync/pull-request@v2
|
||||
with:
|
||||
github_token: ${{ secrets.ACTIONS_COMMMIT_TOKEN }}
|
||||
source_branch: ${{ env.BRANCH_NAME }}
|
||||
destination_branch: "main"
|
||||
pr_title: "Updated patch build from main ${{ env.HEAD_COMMIT_ID }}"
|
||||
pr_body: |
|
||||
This PR updates the Helm chart version after building the patch from $HEAD_COMMIT_ID.
|
||||
Once this PR is merged, tag update job will run automatically.
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO_ARM: ${{ secrets.RELEASE_ARM_REGISTRY }}
|
||||
# DOCKER_REPO_OSS: ${{ secrets.RELEASE_OSS_REGISTRY }}
|
||||
# MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
|
||||
# MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
|
||||
# MSAAS_REPO_FOLDER: /tmp/msaas
|
||||
# with:
|
||||
# limit-access-to-actor: true
|
||||
143
.github/workflows/peers-ee.yaml
vendored
Normal file
143
.github/workflows/peers-ee.yaml
vendored
Normal file
|
|
@ -0,0 +1,143 @@
|
|||
# This action will push the peers changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_security_checks:
|
||||
description: 'Skip Security checks if there is a unfixable vuln or error. Value: true/false'
|
||||
required: false
|
||||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
paths:
|
||||
- "ee/peers/**"
|
||||
- "peers/**"
|
||||
- "!peers/.gitignore"
|
||||
- "!peers/*-dev.sh"
|
||||
|
||||
name: Build and Deploy Peers EE
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
|
||||
- name: Building and Pushing peers image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd peers
|
||||
PUSH_IMAGE=0 bash -x ./build.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("peers")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("peers")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||
EOF
|
||||
done
|
||||
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/peers/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,peers,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,peers,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: ee
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}
|
||||
# ENVIRONMENT: staging
|
||||
#
|
||||
140
.github/workflows/peers.yaml
vendored
Normal file
140
.github/workflows/peers.yaml
vendored
Normal file
|
|
@ -0,0 +1,140 @@
|
|||
# This action will push the peers changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_security_checks:
|
||||
description: 'Skip Security checks if there is a unfixable vuln or error. Value: true/false'
|
||||
required: false
|
||||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
paths:
|
||||
- "peers/**"
|
||||
- "!peers/.gitignore"
|
||||
- "!peers/*-dev.sh"
|
||||
|
||||
name: Build and Deploy Peers
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
|
||||
- name: Building and Pushing peers image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd peers
|
||||
PUSH_IMAGE=0 bash -x ./build.sh
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("peers")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("peers")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
tag: ${image_array[1]}
|
||||
EOF
|
||||
done
|
||||
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/peers/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,peers,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,peers,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: foss
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}
|
||||
# ENVIRONMENT: staging
|
||||
|
||||
2
.github/workflows/pr-env-delete.yaml
vendored
2
.github/workflows/pr-env-delete.yaml
vendored
|
|
@ -83,4 +83,4 @@ jobs:
|
|||
]
|
||||
}
|
||||
EOF
|
||||
iws route53 change-resource-record-sets --hosted-zone-id ${{ secrets.OR_PR_HOSTED_ZONE_ID }} --change-batch file://route53-changes.json
|
||||
aws route53 change-resource-record-sets --hosted-zone-id ${{ secrets.OR_PR_HOSTED_ZONE_ID }} --change-batch file://route53-changes.json
|
||||
|
|
|
|||
16
.github/workflows/pr-env.yaml
vendored
16
.github/workflows/pr-env.yaml
vendored
|
|
@ -329,12 +329,10 @@ jobs:
|
|||
# run: |
|
||||
# # Add any cleanup commands if necessary
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# iimit-access-to-actor: true
|
||||
- name: Debug Job
|
||||
if: failure()
|
||||
uses: mxschmitt/action-tmate@v3
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
|
|
|||
103
.github/workflows/release-deployment.yaml
vendored
103
.github/workflows/release-deployment.yaml
vendored
|
|
@ -1,103 +0,0 @@
|
|||
name: Release Deployment
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
services:
|
||||
description: 'Comma-separated list of services to deploy. eg: frontend,api,sink'
|
||||
required: true
|
||||
branch:
|
||||
description: 'Branch to deploy (defaults to dev)'
|
||||
required: false
|
||||
default: 'dev'
|
||||
|
||||
env:
|
||||
IMAGE_REGISTRY_URL: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
||||
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.inputs.branch }}
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login $IMAGE_REGISTRY_URL -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
|
||||
- name: Set image tag with branch info
|
||||
run: |
|
||||
SHORT_SHA=$(git rev-parse --short HEAD)
|
||||
echo "IMAGE_TAG=${{ github.event.inputs.branch }}-${SHORT_SHA}" >> $GITHUB_ENV
|
||||
echo "Using image tag: $IMAGE_TAG"
|
||||
|
||||
- uses: depot/setup-action@v1
|
||||
|
||||
- name: Build and push Docker images
|
||||
run: |
|
||||
# Parse the comma-separated services list into an array
|
||||
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
||||
working_dir=$(pwd)
|
||||
|
||||
# Define backend services (consider moving this to workflow inputs or repo config)
|
||||
ls backend/cmd >> /tmp/backend.txt
|
||||
BUILD_SCRIPT_NAME="build.sh"
|
||||
|
||||
for SERVICE in "${SERVICES[@]}"; do
|
||||
# Check if service is backend
|
||||
if grep -q $SERVICE /tmp/backend.txt; then
|
||||
cd $working_dir/backend
|
||||
foss_build_args="nil $SERVICE"
|
||||
ee_build_args="ee $SERVICE"
|
||||
else
|
||||
cd $working_dir
|
||||
[[ $SERVICE == 'chalice' || $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && cd $working_dir/api || cd $SERVICE
|
||||
[[ $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && BUILD_SCRIPT_NAME="build_${SERVICE}.sh"
|
||||
ee_build_args="ee"
|
||||
fi
|
||||
{
|
||||
echo IMAGE_TAG=$IMAGE_TAG DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
IMAGE_TAG=$IMAGE_TAG DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
}&
|
||||
{
|
||||
echo IMAGE_TAG=${IMAGE_TAG}-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||
IMAGE_TAG=${IMAGE_TAG}-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||
}&
|
||||
done
|
||||
wait
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
name: Using ee release cluster
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_RELEASE_KUBECONFIG }}
|
||||
|
||||
- name: Deploy to ee release Kubernetes
|
||||
run: |
|
||||
echo "Deploying services to EE cluster: ${{ github.event.inputs.services }}"
|
||||
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
||||
for SERVICE in "${SERVICES[@]}"; do
|
||||
SERVICE=$(echo $SERVICE | xargs) # Trim whitespace
|
||||
echo "Deploying $SERVICE to EE cluster with image tag: ${IMAGE_TAG}"
|
||||
kubectl set image deployment/$SERVICE-openreplay -n app $SERVICE=${IMAGE_REGISTRY_URL}/$SERVICE:${IMAGE_TAG}-ee
|
||||
done
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
name: Using foss release cluster
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.FOSS_RELEASE_KUBECONFIG }}
|
||||
|
||||
- name: Deploy to FOSS release Kubernetes
|
||||
run: |
|
||||
echo "Deploying services to FOSS cluster: ${{ github.event.inputs.services }}"
|
||||
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
||||
for SERVICE in "${SERVICES[@]}"; do
|
||||
SERVICE=$(echo $SERVICE | xargs) # Trim whitespace
|
||||
echo "Deploying $SERVICE to FOSS cluster with image tag: ${IMAGE_TAG}"
|
||||
echo "Deploying $SERVICE to FOSS cluster with image tag: ${IMAGE_TAG}"
|
||||
kubectl set image deployment/$SERVICE-openreplay -n app $SERVICE=${IMAGE_REGISTRY_URL}/$SERVICE:${IMAGE_TAG}
|
||||
done
|
||||
212
.github/workflows/sourcemaps-reader-ee.yaml
vendored
212
.github/workflows/sourcemaps-reader-ee.yaml
vendored
|
|
@ -1,21 +1,21 @@
|
|||
# This action will push the sourcemapreader changes to ee
|
||||
# This action will push the sourcemapreader changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_security_checks:
|
||||
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
|
||||
description: 'Skip Security checks if there is a unfixable vuln or error. Value: true/false'
|
||||
required: false
|
||||
default: "false"
|
||||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
paths:
|
||||
- "ee/sourcemap-reader/**"
|
||||
- "sourcemap-reader/**"
|
||||
- "!sourcemap-reader/.gitignore"
|
||||
- "!sourcemap-reader/*-dev.sh"
|
||||
|
||||
name: Build and Deploy sourcemap-reader EE
|
||||
name: Build and Deploy sourcemap-reader
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
|
|
@ -23,128 +23,120 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.EE_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
- name: Building and Pushing sourcemaps-reader image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd sourcemap-reader
|
||||
PUSH_IMAGE=0 bash -x ./build.sh
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("sourcemaps-reader")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
- name: Building and Pushing sourcemaps-reader image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd sourcemap-reader
|
||||
PUSH_IMAGE=0 bash -x ./build.sh
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("sourcemaps-reader")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("sourcemaps-reader")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
tag: ${image_array[1]}
|
||||
EOF
|
||||
done
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
tag: ${image_array[1]}
|
||||
EOF
|
||||
done
|
||||
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/sourcemaps-reader/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
sed -i "s/sourcemaps-reader/sourcemapreader/g" /tmp/image_override.yaml
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,sourcemapreader,quickwit,connector} /tmp/charts/
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
# Update changed image tag
|
||||
sed -i "/sourcemaps-reader/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
sed -i "s/sourcemaps-reader/sourcemapreader/g" /tmp/image_override.yaml
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: ee
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: "Build failed :bomb:"
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,sourcemapreader,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,sourcemapreader,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: foss
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# IMAGE_TAG: ${{ github.sha }}
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# limit-access-to-actor: true
|
||||
|
||||
|
|
|
|||
208
.github/workflows/sourcemaps-reader.yaml
vendored
208
.github/workflows/sourcemaps-reader.yaml
vendored
|
|
@ -3,12 +3,13 @@ on:
|
|||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_security_checks:
|
||||
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
|
||||
description: 'Skip Security checks if there is a unfixable vuln or error. Value: true/false'
|
||||
required: false
|
||||
default: "false"
|
||||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
paths:
|
||||
- "sourcemap-reader/**"
|
||||
- "!sourcemap-reader/.gitignore"
|
||||
|
|
@ -22,128 +23,119 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
- name: Building and Pushing sourcemaps-reader image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd sourcemap-reader
|
||||
PUSH_IMAGE=0 bash -x ./build.sh
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("sourcemaps-reader")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
- name: Building and Pushing sourcemaps-reader image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd sourcemap-reader
|
||||
PUSH_IMAGE=0 bash -x ./build.sh
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("sourcemaps-reader")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("sourcemaps-reader")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
tag: ${image_array[1]}
|
||||
EOF
|
||||
done
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
tag: ${image_array[1]}
|
||||
EOF
|
||||
done
|
||||
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/sourcemaps-reader/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
sed -i "s/sourcemaps-reader/sourcemapreader/g" /tmp/image_override.yaml
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,sourcemapreader,quickwit,connector} /tmp/charts/
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
# Update changed image tag
|
||||
sed -i "/sourcemaps-reader/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
sed -i "s/sourcemaps-reader/sourcemapreader/g" /tmp/image_override.yaml
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: foss
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: "Build failed :bomb:"
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,sourcemapreader,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,sourcemapreader,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: foss
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# limit-access-to-actor: true
|
||||
|
||||
|
|
|
|||
12
.github/workflows/tracker-tests.yaml
vendored
12
.github/workflows/tracker-tests.yaml
vendored
|
|
@ -9,16 +9,24 @@ on:
|
|||
pull_request:
|
||||
branches: [ "dev", "main" ]
|
||||
paths:
|
||||
- frontend/**
|
||||
- tracker/**
|
||||
jobs:
|
||||
build-and-test:
|
||||
runs-on: macos-latest
|
||||
name: Build and test Tracker
|
||||
strategy:
|
||||
matrix:
|
||||
node-version: [ 18.x ]
|
||||
steps:
|
||||
- uses: oven-sh/setup-bun@v2
|
||||
- uses: oven-sh/setup-bun@v1
|
||||
with:
|
||||
bun-version: latest
|
||||
- uses: actions/checkout@v3
|
||||
- name: Use Node.js ${{ matrix.node-version }}
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: ${{ matrix.node-version }}
|
||||
- name: Cache tracker modules
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
|
|
@ -64,4 +72,4 @@ jobs:
|
|||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: tracker
|
||||
iame: tracker
|
||||
name: tracker
|
||||
|
|
|
|||
11
.github/workflows/ui-tests.js.yml
vendored
11
.github/workflows/ui-tests.js.yml
vendored
|
|
@ -27,9 +27,9 @@ jobs:
|
|||
name: Build and test Tracker plus Replayer
|
||||
strategy:
|
||||
matrix:
|
||||
node-version: [ 20.x ]
|
||||
node-version: [ 18.x ]
|
||||
steps:
|
||||
- uses: oven-sh/setup-bun@v2
|
||||
- uses: oven-sh/setup-bun@v1
|
||||
with:
|
||||
bun-version: latest
|
||||
- uses: actions/checkout@v3
|
||||
|
|
@ -102,11 +102,11 @@ jobs:
|
|||
- name: Setup packages
|
||||
run: |
|
||||
cd frontend
|
||||
yarn
|
||||
npm install --legacy-peer-deps
|
||||
- name: Run unit tests
|
||||
run: |
|
||||
cd frontend
|
||||
yarn test:ci
|
||||
npm run test:ci
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
with:
|
||||
|
|
@ -138,13 +138,12 @@ jobs:
|
|||
- name: (Chrome) Run visual tests
|
||||
run: |
|
||||
cd frontend
|
||||
yarn cy:test
|
||||
npm run cy:test
|
||||
# firefox have different viewport somehow
|
||||
# - name: (Firefox) Run visual tests
|
||||
# run: yarn cy:test-firefox
|
||||
# - name: (Edge) Run visual tests
|
||||
# run: yarn cy:test-edge
|
||||
timeout-minutes: 5
|
||||
- name: Upload Debug
|
||||
if: ${{ failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
|
|
|
|||
42
.github/workflows/update-tag.yaml
vendored
42
.github/workflows/update-tag.yaml
vendored
|
|
@ -1,42 +0,0 @@
|
|||
on:
|
||||
pull_request:
|
||||
types: [closed]
|
||||
branches:
|
||||
- main
|
||||
name: Release tag update --force
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Build Patch from main
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ (github.event_name == 'pull_request' && github.event.pull_request.merged == true) || github.event.inputs.services == 'true' }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Get latest release tag using GitHub API
|
||||
id: get-latest-tag
|
||||
run: |
|
||||
LATEST_TAG=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
|
||||
"https://api.github.com/repos/${{ github.repository }}/releases/latest" \
|
||||
| jq -r .tag_name)
|
||||
|
||||
# Fallback to git command if API doesn't return a tag
|
||||
if [ "$LATEST_TAG" == "null" ] || [ -z "$LATEST_TAG" ]; then
|
||||
echo "Not found latest tag"
|
||||
exit 100
|
||||
fi
|
||||
|
||||
echo "LATEST_TAG=$LATEST_TAG" >> $GITHUB_ENV
|
||||
echo "Latest tag: $LATEST_TAG"
|
||||
|
||||
- name: Set Remote with GITHUB_TOKEN
|
||||
run: |
|
||||
git config --unset http.https://github.com/.extraheader
|
||||
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}
|
||||
|
||||
- name: Push main branch to tag
|
||||
run: |
|
||||
git checkout main
|
||||
echo "Updating tag ${{ env.LATEST_TAG }} to point to latest commit on main"
|
||||
git push origin HEAD:refs/tags/${{ env.LATEST_TAG }} --force
|
||||
290
.github/workflows/workers-ee.yaml
vendored
290
.github/workflows/workers-ee.yaml
vendored
|
|
@ -6,14 +6,14 @@ on:
|
|||
build_service:
|
||||
description: 'Name of a single service to build(in small letters). "all" to build everything'
|
||||
required: false
|
||||
default: "false"
|
||||
default: 'false'
|
||||
skip_security_checks:
|
||||
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
|
||||
description: 'Skip Security checks if there is a unfixable vuln or error. Value: true/false'
|
||||
required: false
|
||||
default: "false"
|
||||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
- dev
|
||||
paths:
|
||||
- ee/backend/**
|
||||
- backend/**
|
||||
|
|
@ -26,168 +26,154 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
# ref: staging
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
# ref: staging
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.EE_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- name: Downloading yq
|
||||
run: |
|
||||
VERSION="v4.42.1"
|
||||
sudo wget https://github.com/mikefarah/yq/releases/download/${VERSION}/yq_linux_amd64 -O /usr/bin/yq
|
||||
sudo chmod +x /usr/bin/yq
|
||||
# # Caching docker images
|
||||
# - uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# # Ignore the failure of a step and avoid terminating the job.
|
||||
# continue-on-error: true
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
# # Caching docker images
|
||||
# - uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# # Ignore the failure of a step and avoid terminating the job.
|
||||
# continue-on-error: true
|
||||
|
||||
- name: Build, tag
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
#
|
||||
# TODO: Check the container tags are same, then skip the build and deployment.
|
||||
#
|
||||
# Build a docker container and push it to Docker Registry so that it can be deployed to Kubernetes cluster.
|
||||
#
|
||||
# Getting the images to build
|
||||
#
|
||||
set -x
|
||||
touch /tmp/images_to_build.txt
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
tmp_param=${{ github.event.inputs.build_service }}
|
||||
build_param=${tmp_param:-'false'}
|
||||
case ${build_param} in
|
||||
false)
|
||||
{
|
||||
git diff --name-only HEAD HEAD~1 | grep -E "backend/pkg|backend/internal" | grep -vE ^ee/ | cut -d '/' -f3 | uniq | while read -r pkg_name ; do
|
||||
grep -rl "pkg/$pkg_name" backend/services backend/cmd | cut -d '/' -f3
|
||||
done
|
||||
} | awk '!seen[$0]++' > /tmp/images_to_build.txt
|
||||
- name: Build, tag
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
#
|
||||
# TODO: Check the container tags are same, then skip the build and deployment.
|
||||
#
|
||||
# Build a docker container and push it to Docker Registry so that it can be deployed to Kubernetes cluster.
|
||||
#
|
||||
# Getting the images to build
|
||||
#
|
||||
set -x
|
||||
touch /tmp/images_to_build.txt
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
tmp_param=${{ github.event.inputs.build_service }}
|
||||
build_param=${tmp_param:-'false'}
|
||||
case ${build_param} in
|
||||
false)
|
||||
{
|
||||
git diff --name-only HEAD HEAD~1 | grep -E "backend/pkg|backend/internal" | grep -vE ^ee/ | cut -d '/' -f3 | uniq | while read -r pkg_name ; do
|
||||
grep -rl "pkg/$pkg_name" backend/services backend/cmd | cut -d '/' -f3
|
||||
done
|
||||
} | awk '!seen[$0]++' > /tmp/images_to_build.txt
|
||||
;;
|
||||
all)
|
||||
ls backend/cmd > /tmp/images_to_build.txt
|
||||
;;
|
||||
all)
|
||||
ls backend/cmd > /tmp/images_to_build.txt
|
||||
;;
|
||||
*)
|
||||
echo ${{github.event.inputs.build_service }} > /tmp/images_to_build.txt
|
||||
;;
|
||||
esac
|
||||
*)
|
||||
echo ${{github.event.inputs.build_service }} > /tmp/images_to_build.txt
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ $(cat /tmp/images_to_build.txt) == "" ]]; then
|
||||
echo "Nothing to build here"
|
||||
touch /tmp/nothing-to-build-here
|
||||
exit 0
|
||||
fi
|
||||
#
|
||||
# Pushing image to registry
|
||||
#
|
||||
cd backend
|
||||
cat /tmp/images_to_build.txt
|
||||
for image in $(cat /tmp/images_to_build.txt);
|
||||
do
|
||||
echo "Bulding $image"
|
||||
PUSH_IMAGE=0 bash -x ./build.sh ee $image
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
if [[ $(cat /tmp/images_to_build.txt) == "" ]]; then
|
||||
echo "Nothing to build here"
|
||||
touch /tmp/nothing-to-build-here
|
||||
exit 0
|
||||
fi
|
||||
#
|
||||
# Pushing image to registry
|
||||
#
|
||||
cd backend
|
||||
cat /tmp/images_to_build.txt
|
||||
for image in $(cat /tmp/images_to_build.txt);
|
||||
do
|
||||
echo "Bulding $image"
|
||||
PUSH_IMAGE=0 bash -x ./build.sh ee $image
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
./trivy image --exit-code 1 --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
echo "::set-output name=image::$DOCKER_REPO/$image:$IMAGE_TAG"
|
||||
done
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
echo "::set-output name=image::$DOCKER_REPO/$image:$IMAGE_TAG"
|
||||
done
|
||||
|
||||
- name: Deploying to kuberntes
|
||||
env:
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
run: |
|
||||
#
|
||||
# Deploying image to environment.
|
||||
#
|
||||
set -x
|
||||
[[ -f /tmp/nothing-to-build-here ]] && exit 0
|
||||
cd scripts/helmcharts/
|
||||
- name: Deploying to kuberntes
|
||||
env:
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
run: |
|
||||
#
|
||||
# Deploying image to environment.
|
||||
#
|
||||
set -x
|
||||
[[ -f /tmp/nothing-to-build-here ]] && exit 0
|
||||
cd scripts/helmcharts/
|
||||
|
||||
set -x
|
||||
echo > /tmp/image_override.yaml
|
||||
mkdir /tmp/helmcharts
|
||||
mv openreplay/charts/ingress-nginx /tmp/helmcharts/
|
||||
mv openreplay/charts/quickwit /tmp/helmcharts/
|
||||
mv openreplay/charts/connector /tmp/helmcharts/
|
||||
## Update images
|
||||
for image in $(cat /tmp/images_to_build.txt);
|
||||
do
|
||||
mv openreplay/charts/$image /tmp/helmcharts/
|
||||
cat <<EOF>>/tmp/image_override.yaml
|
||||
${image}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: ${IMAGE_TAG}
|
||||
EOF
|
||||
done
|
||||
ls /tmp/helmcharts
|
||||
rm -rf openreplay/charts/*
|
||||
ls openreplay/charts
|
||||
mv /tmp/helmcharts/* openreplay/charts/
|
||||
ls openreplay/charts
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml
|
||||
|
||||
# Deploy command
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true | kubectl apply -f -
|
||||
set -x
|
||||
echo > /tmp/image_override.yaml
|
||||
mkdir /tmp/helmcharts
|
||||
mv openreplay/charts/ingress-nginx /tmp/helmcharts/
|
||||
mv openreplay/charts/quickwit /tmp/helmcharts/
|
||||
## Update images
|
||||
for image in $(cat /tmp/images_to_build.txt);
|
||||
do
|
||||
mv openreplay/charts/$image /tmp/helmcharts/
|
||||
cat <<EOF>>/tmp/image_override.yaml
|
||||
${image}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: ${IMAGE_TAG}
|
||||
EOF
|
||||
done
|
||||
ls /tmp/helmcharts
|
||||
rm -rf openreplay/charts/*
|
||||
ls openreplay/charts
|
||||
mv /tmp/helmcharts/* openreplay/charts/
|
||||
ls openreplay/charts
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: ee
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: "Build failed :bomb:"
|
||||
# Deploy command
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true | kubectl apply -f -
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: ee
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# IMAGE_TAG: ${{ github.sha }}
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# iimit-access-to-actor: true
|
||||
#
|
||||
|
|
|
|||
286
.github/workflows/workers.yaml
vendored
286
.github/workflows/workers.yaml
vendored
|
|
@ -6,14 +6,14 @@ on:
|
|||
build_service:
|
||||
description: 'Name of a single service to build(in small letters). "all" to build everything'
|
||||
required: false
|
||||
default: "false"
|
||||
default: 'false'
|
||||
skip_security_checks:
|
||||
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
|
||||
description: 'Skip Security checks if there is a unfixable vuln or error. Value: true/false'
|
||||
required: false
|
||||
default: "false"
|
||||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
- dev
|
||||
paths:
|
||||
- backend/**
|
||||
|
||||
|
|
@ -25,162 +25,152 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
# ref: staging
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
# ref: staging
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
# Caching docker images
|
||||
# - uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# # Ignore the failure of a step and avoid terminating the job.
|
||||
# continue-on-error: true
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
# Caching docker images
|
||||
# - uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# # Ignore the failure of a step and avoid terminating the job.
|
||||
# continue-on-error: true
|
||||
|
||||
- name: Build, tag
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
#
|
||||
# TODO: Check the container tags are same, then skip the build and deployment.
|
||||
#
|
||||
# Build a docker container and push it to Docker Registry so that it can be deployed to Kubernetes cluster.
|
||||
#
|
||||
# Getting the images to build
|
||||
#
|
||||
set -xe
|
||||
touch /tmp/images_to_build.txt
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
tmp_param=${{ github.event.inputs.build_service }}
|
||||
build_param=${tmp_param:-'false'}
|
||||
case ${build_param} in
|
||||
false)
|
||||
{
|
||||
git diff --name-only HEAD HEAD~1 | grep -E "backend/pkg|backend/internal" | grep -vE ^ee/ | cut -d '/' -f3 | uniq | while read -r pkg_name ; do
|
||||
grep -rl "pkg/$pkg_name" backend/services backend/cmd | cut -d '/' -f3
|
||||
done
|
||||
} | awk '!seen[$0]++' > /tmp/images_to_build.txt
|
||||
- name: Build, tag
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
#
|
||||
# TODO: Check the container tags are same, then skip the build and deployment.
|
||||
#
|
||||
# Build a docker container and push it to Docker Registry so that it can be deployed to Kubernetes cluster.
|
||||
#
|
||||
# Getting the images to build
|
||||
#
|
||||
set -xe
|
||||
touch /tmp/images_to_build.txt
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
tmp_param=${{ github.event.inputs.build_service }}
|
||||
build_param=${tmp_param:-'false'}
|
||||
case ${build_param} in
|
||||
false)
|
||||
{
|
||||
git diff --name-only HEAD HEAD~1 | grep -E "backend/pkg|backend/internal" | grep -vE ^ee/ | cut -d '/' -f3 | uniq | while read -r pkg_name ; do
|
||||
grep -rl "pkg/$pkg_name" backend/services backend/cmd | cut -d '/' -f3
|
||||
done
|
||||
} | awk '!seen[$0]++' > /tmp/images_to_build.txt
|
||||
;;
|
||||
all)
|
||||
ls backend/cmd > /tmp/images_to_build.txt
|
||||
;;
|
||||
all)
|
||||
ls backend/cmd > /tmp/images_to_build.txt
|
||||
;;
|
||||
*)
|
||||
echo ${{github.event.inputs.build_service }} > /tmp/images_to_build.txt
|
||||
;;
|
||||
esac
|
||||
*)
|
||||
echo ${{github.event.inputs.build_service }} > /tmp/images_to_build.txt
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ $(cat /tmp/images_to_build.txt) == "" ]]; then
|
||||
echo "Nothing to build here"
|
||||
touch /tmp/nothing-to-build-here
|
||||
exit 0
|
||||
fi
|
||||
#
|
||||
# Pushing image to registry
|
||||
#
|
||||
cd backend
|
||||
cat /tmp/images_to_build.txt
|
||||
for image in $(cat /tmp/images_to_build.txt);
|
||||
do
|
||||
echo "Bulding $image"
|
||||
PUSH_IMAGE=0 bash -x ./build.sh skip $image
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
if [[ $(cat /tmp/images_to_build.txt) == "" ]]; then
|
||||
echo "Nothing to build here"
|
||||
touch /tmp/nothing-to-build-here
|
||||
exit 0
|
||||
fi
|
||||
#
|
||||
# Pushing image to registry
|
||||
#
|
||||
cd backend
|
||||
cat /tmp/images_to_build.txt
|
||||
for image in $(cat /tmp/images_to_build.txt);
|
||||
do
|
||||
echo "Bulding $image"
|
||||
PUSH_IMAGE=0 bash -x ./build.sh skip $image
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
./trivy image --exit-code 1 --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
echo "::set-output name=image::$DOCKER_REPO/$image:$IMAGE_TAG"
|
||||
done
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
echo "::set-output name=image::$DOCKER_REPO/$image:$IMAGE_TAG"
|
||||
done
|
||||
|
||||
- name: Deploying to kuberntes
|
||||
env:
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
run: |
|
||||
#
|
||||
# Deploying image to environment.
|
||||
#
|
||||
set -x
|
||||
[[ -f /tmp/nothing-to-build-here ]] && exit 0
|
||||
cd scripts/helmcharts/
|
||||
- name: Deploying to kuberntes
|
||||
env:
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
run: |
|
||||
#
|
||||
# Deploying image to environment.
|
||||
#
|
||||
set -x
|
||||
[[ -f /tmp/nothing-to-build-here ]] && exit 0
|
||||
cd scripts/helmcharts/
|
||||
|
||||
set -x
|
||||
echo > /tmp/image_override.yaml
|
||||
mkdir /tmp/helmcharts
|
||||
mv openreplay/charts/ingress-nginx /tmp/helmcharts/
|
||||
mv openreplay/charts/quickwit /tmp/helmcharts/
|
||||
mv openreplay/charts/connector /tmp/helmcharts/
|
||||
## Update images
|
||||
for image in $(cat /tmp/images_to_build.txt);
|
||||
do
|
||||
mv openreplay/charts/$image /tmp/helmcharts/
|
||||
cat <<EOF>>/tmp/image_override.yaml
|
||||
${image}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: ${IMAGE_TAG}
|
||||
EOF
|
||||
done
|
||||
ls /tmp/helmcharts
|
||||
rm -rf openreplay/charts/*
|
||||
ls openreplay/charts
|
||||
mv /tmp/helmcharts/* openreplay/charts/
|
||||
ls openreplay/charts
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
|
||||
# Deploy command
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true | kubectl apply -f -
|
||||
set -x
|
||||
echo > /tmp/image_override.yaml
|
||||
mkdir /tmp/helmcharts
|
||||
mv openreplay/charts/ingress-nginx /tmp/helmcharts/
|
||||
mv openreplay/charts/quickwit /tmp/helmcharts/
|
||||
## Update images
|
||||
for image in $(cat /tmp/images_to_build.txt);
|
||||
do
|
||||
mv openreplay/charts/$image /tmp/helmcharts/
|
||||
cat <<EOF>>/tmp/image_override.yaml
|
||||
${image}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: ${IMAGE_TAG}
|
||||
EOF
|
||||
done
|
||||
ls /tmp/helmcharts
|
||||
rm -rf openreplay/charts/*
|
||||
ls openreplay/charts
|
||||
mv /tmp/helmcharts/* openreplay/charts/
|
||||
ls openreplay/charts
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: foss
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: "Build failed :bomb:"
|
||||
# Deploy command
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true | kubectl apply -f -
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: foss
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# iimit-access-to-actor: true
|
||||
#
|
||||
#
|
||||
|
|
|
|||
3
.gitmodules
vendored
Normal file
3
.gitmodules
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
[submodule "ee/intelligent_search/llama"]
|
||||
path = ee/intelligent_search/llama
|
||||
url = https://github.com/facebookresearch/llama.git
|
||||
702
LICENSE
702
LICENSE
|
|
@ -1,694 +1,64 @@
|
|||
Copyright (c) 2021-2025 Asayer, Inc dba OpenReplay
|
||||
Copyright (c) 2022 Asayer, Inc.
|
||||
|
||||
OpenReplay monorepo uses multiple licenses. Portions of this software are licensed as follows:
|
||||
- All content that resides under the "ee/" directory of this repository, is licensed under the license defined in "ee/LICENSE".
|
||||
- All third party components incorporated into the OpenReplay Software are licensed under the original license provided by the owner of the applicable component.
|
||||
- Some directories are licensed under the "MIT" license, as defined below.
|
||||
- Content outside of the above mentioned directories or restrictions defaults to the "GNU Affero General Public License Version 3 (AGPL v3)" license, as defined below.
|
||||
- Some directories have a specific LICENSE file and are licensed under the "MIT" license, as defined below.
|
||||
- Content outside of the above mentioned directories or restrictions defaults to the "Elastic License 2.0 (ELv2)" license, as defined below.
|
||||
|
||||
Reach out (license@openreplay.com) if you have any questions regarding licenses.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
MIT LICENSE
|
||||
------------------------------------------------------------------------------------
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||
Version 3, 19 November 2007
|
||||
------------------------------------------------------------------------------------
|
||||
Elastic License 2.0 (ELv2)
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
**Acceptance**
|
||||
By using the software, you agree to all of the terms and conditions below.
|
||||
|
||||
Preamble
|
||||
**Copyright License**
|
||||
The licensor grants you a non-exclusive, royalty-free, worldwide, non-sublicensable, non-transferable license to use, copy, distribute, make available, and prepare derivative works of the software, in each case subject to the limitations and conditions below
|
||||
|
||||
The GNU Affero General Public License is a free, copyleft license for
|
||||
software and other kinds of works, specifically designed to ensure
|
||||
cooperation with the community in the case of network server software.
|
||||
**Limitations**
|
||||
You may not provide the software to third parties as a hosted or managed service, where the service provides users with access to any substantial set of the features or functionality of the software.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
our General Public Licenses are intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users.
|
||||
You may not move, change, disable, or circumvent the license key functionality in the software, and you may not remove or obscure any functionality in the software that is protected by the license key.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
You may not alter, remove, or obscure any licensing, copyright, or other notices of the licensor in the software. Any use of the licensor’s trademarks is subject to applicable law.
|
||||
|
||||
Developers that use our General Public Licenses protect your rights
|
||||
with two steps: (1) assert copyright on the software, and (2) offer
|
||||
you this License which gives you legal permission to copy, distribute
|
||||
and/or modify the software.
|
||||
**Patents**
|
||||
The licensor grants you a license, under any patent claims the licensor can license, or becomes able to license, to make, have made, use, sell, offer for sale, import and have imported the software, in each case subject to the limitations and conditions in this license. This license does not cover any patent claims that you cause to be infringed by modifications or additions to the software. If you or your company make any written claim that the software infringes or contributes to infringement of any patent, your patent license for the software granted under these terms ends immediately. If your company makes such a claim, your patent license ends immediately for work on behalf of your company.
|
||||
|
||||
A secondary benefit of defending all users' freedom is that
|
||||
improvements made in alternate versions of the program, if they
|
||||
receive widespread use, become available for other developers to
|
||||
incorporate. Many developers of free software are heartened and
|
||||
encouraged by the resulting cooperation. However, in the case of
|
||||
software used on network servers, this result may fail to come about.
|
||||
The GNU General Public License permits making a modified version and
|
||||
letting the public access it on a server without ever releasing its
|
||||
source code to the public.
|
||||
**Notices**
|
||||
You must ensure that anyone who gets a copy of any part of the software from you also gets a copy of these terms.
|
||||
|
||||
The GNU Affero General Public License is designed specifically to
|
||||
ensure that, in such cases, the modified source code becomes available
|
||||
to the community. It requires the operator of a network server to
|
||||
provide the source code of the modified version running there to the
|
||||
users of that server. Therefore, public use of a modified version, on
|
||||
a publicly accessible server, gives the public access to the source
|
||||
code of the modified version.
|
||||
If you modify the software, you must include in any modified copies of the software prominent notices stating that you have modified the software.
|
||||
|
||||
An older license, called the Affero General Public License and
|
||||
published by Affero, was designed to accomplish similar goals. This is
|
||||
a different license, not a version of the Affero GPL, but Affero has
|
||||
released a new version of the Affero GPL which permits relicensing under
|
||||
this license.
|
||||
**No Other Rights**
|
||||
These terms do not imply any licenses other than those expressly granted in these terms.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
**Termination**
|
||||
If you use the software in violation of these terms, such use is not licensed, and your licenses will automatically terminate. If the licensor provides you with a notice of your violation, and you cease all violation of this license no later than 30 days after you receive that notice, your licenses will be reinstated retroactively. However, if you violate these terms after such reinstatement, any additional violation of these terms will cause your licenses to terminate automatically and permanently.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
**No Liability**
|
||||
As far as the law allows, the software comes as is, without any warranty or condition, and the licensor will not be liable to you for any damages arising out of these terms or the use or nature of the software, under any kind of legal claim.
|
||||
|
||||
0. Definitions.
|
||||
**Definitions**
|
||||
The *licensor* is the entity offering these terms, and the *software* is the software the licensor makes available under these terms, including any portion of it.
|
||||
|
||||
"This License" refers to version 3 of the GNU Affero General Public License.
|
||||
*you* refers to the individual or entity agreeing to these terms.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
*your company* is any legal entity, sole proprietorship, or other kind of organization that you work for, plus all organizations that have control over, are under the control of, or are under common control with that organization. *control* means ownership of substantially all the assets of an entity, or the power to direct its management and policies by vote, contract, or otherwise. Control can be direct or indirect.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
*your licenses* are all the licenses granted to you for the software under these terms.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
*use* means anything you do with the software requiring one of your licenses.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Remote Network Interaction; Use with the GNU General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, if you modify the
|
||||
Program, your modified version must prominently offer all users
|
||||
interacting with it remotely through a computer network (if your version
|
||||
supports such interaction) an opportunity to receive the Corresponding
|
||||
Source of your version by providing access to the Corresponding Source
|
||||
from a network server at no charge, through some standard or customary
|
||||
means of facilitating copying of software. This Corresponding Source
|
||||
shall include the Corresponding Source for any work covered by version 3
|
||||
of the GNU General Public License that is incorporated pursuant to the
|
||||
following paragraph.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the work with which it is combined will remain governed by version
|
||||
3 of the GNU General Public License.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU Affero General Public License from time to time. Such new versions
|
||||
will be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU Affero General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU Affero General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU Affero General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If your software can interact with users remotely through a computer
|
||||
network, you should also make sure that it provides a way for users to
|
||||
get its source. For example, if your program is a web application, its
|
||||
interface could display a "Source" link that leads users to an archive
|
||||
of the code. There are many ways you could offer source, and different
|
||||
solutions will be better for different programs; see section 13 for the
|
||||
specific requirements.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU AGPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
||||
*trademark* means trademarks, service marks, and similar rights.
|
||||
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@
|
|||
</a>
|
||||
</p>
|
||||
|
||||
OpenReplay is an open-source session replay suite you can host yourself, that lets you see what users do on your web app, helping you troubleshoot issues faster.
|
||||
OpenReplay is a session replay suite you can host yourself, that lets you see what users do on your web app, helping you troubleshoot issues faster.
|
||||
|
||||
- **Session replay.** OpenReplay replays what users do, but not only. It also shows you what went under the hood, how your website or app behaves by capturing network activity, console logs, JS errors, store actions/state, page speed metrics, cpu/memory usage and much more.
|
||||
- **Low footprint**. With a ~26KB (.br) tracker that asynchronously sends minimal data for a very limited impact on performance.
|
||||
|
|
@ -55,9 +55,9 @@ OpenReplay is an open-source session replay suite you can host yourself, that le
|
|||
## Features
|
||||
|
||||
- **Session replay:** Lets you relive your users' experience, see where they struggle and how it affects their behavior. Each session replay is automatically analyzed based on heuristics, for easy triage.
|
||||
- **Spot:** A Chrome extension that lets record bugs directly from your browser — each recording includes all the technical details developers need to fix them.
|
||||
- **DevTools:** It's like debugging in your own browser. OpenReplay provides you with the full context (network activity, JS errors, store actions/state and 40+ metrics) so you can instantly reproduce bugs and understand performance issues.
|
||||
- **Assist:** Helps you support your users by seeing their live screen and instantly hopping on call (WebRTC) with them without requiring any 3rd-party screen sharing software.
|
||||
- **Feature flags:** Enable or disable a feature, make gradual releases and A/B test all without redeploying your app.
|
||||
- **Omni-search:** Search and filter by almost any user action/criteria, session attribute or technical event, so you can answer any question. No instrumentation required.
|
||||
- **Analytics:** For surfacing the most impactful issues causing conversion and revenue loss.
|
||||
- **Fine-grained privacy controls:** Choose what to capture, what to obscure or what to ignore so user data doesn't even reach your servers.
|
||||
|
|
@ -98,6 +98,10 @@ See our [Contributing Guide](CONTRIBUTING.md) for more details.
|
|||
|
||||
Also, feel free to join our [Slack](https://slack.openreplay.com) to ask questions, discuss ideas or connect with our contributors.
|
||||
|
||||
## Roadmap
|
||||
|
||||
Check out our [roadmap](https://www.notion.so/openreplay/Roadmap-889d2c3d968b4786ab9b281ab2394a94) and keep an eye on what's coming next. You're free to [submit](https://github.com/openreplay/openreplay/issues/new) new ideas and vote on features.
|
||||
|
||||
## License
|
||||
|
||||
This monorepo uses several licenses. See [LICENSE](/LICENSE) for more details.
|
||||
|
|
|
|||
3
api/.gitignore
vendored
3
api/.gitignore
vendored
|
|
@ -175,5 +175,4 @@ SUBNETS.json
|
|||
|
||||
./chalicelib/.configs
|
||||
README/*
|
||||
.local
|
||||
/.dev/
|
||||
.local
|
||||
|
|
@ -1,31 +1,30 @@
|
|||
FROM python:3.12-alpine AS builder
|
||||
LABEL maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
|
||||
LABEL maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
|
||||
|
||||
RUN apk add --no-cache build-base
|
||||
WORKDIR /work
|
||||
COPY requirements.txt ./requirements.txt
|
||||
RUN pip install --no-cache-dir --upgrade uv && \
|
||||
export UV_SYSTEM_PYTHON=true && \
|
||||
uv pip install --no-cache-dir --upgrade pip setuptools wheel && \
|
||||
uv pip install --no-cache-dir --upgrade -r requirements.txt
|
||||
|
||||
FROM python:3.12-alpine
|
||||
FROM python:3.11-alpine
|
||||
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
|
||||
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
|
||||
ARG GIT_SHA
|
||||
LABEL GIT_SHA=$GIT_SHA
|
||||
|
||||
RUN apk add --no-cache build-base tini
|
||||
ARG envarg
|
||||
# Add Tini
|
||||
# Startup daemon
|
||||
ENV SOURCE_MAP_VERSION=0.7.4 \
|
||||
APP_NAME=chalice \
|
||||
LISTEN_PORT=8000 \
|
||||
PRIVATE_ENDPOINTS=false \
|
||||
ENTERPRISE_BUILD=${envarg} \
|
||||
GIT_SHA=$GIT_SHA
|
||||
COPY --from=builder /usr/local/lib/python3.12/site-packages /usr/local/lib/python3.12/site-packages
|
||||
COPY --from=builder /usr/local/bin /usr/local/bin
|
||||
APP_NAME=chalice \
|
||||
LISTEN_PORT=8000 \
|
||||
PRIVATE_ENDPOINTS=false \
|
||||
ENTERPRISE_BUILD=${envarg} \
|
||||
GIT_SHA=$GIT_SHA
|
||||
|
||||
WORKDIR /work
|
||||
COPY requirements.txt ./requirements.txt
|
||||
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
||||
|
||||
COPY . .
|
||||
RUN apk add --no-cache tini && mv env.default .env
|
||||
RUN mv env.default .env
|
||||
|
||||
RUN adduser -u 1001 openreplay -D
|
||||
USER 1001
|
||||
|
||||
ENTRYPOINT ["/sbin/tini", "--"]
|
||||
CMD ["./entrypoint.sh"]
|
||||
CMD ./entrypoint.sh
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
FROM python:3.12-alpine
|
||||
FROM python:3.11-alpine
|
||||
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
|
||||
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
|
||||
ARG GIT_SHA
|
||||
|
|
@ -16,9 +16,7 @@ ENV APP_NAME=alerts \
|
|||
|
||||
WORKDIR /work
|
||||
COPY requirements-alerts.txt ./requirements.txt
|
||||
RUN pip install --no-cache-dir --upgrade uv
|
||||
RUN uv pip install --no-cache-dir --upgrade pip setuptools wheel --system
|
||||
RUN uv pip install --no-cache-dir --upgrade -r requirements.txt --system
|
||||
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
||||
|
||||
COPY . .
|
||||
RUN mv env.default .env && mv app_alerts.py app.py && mv entrypoint_alerts.sh entrypoint.sh
|
||||
|
|
|
|||
32
api/Pipfile
32
api/Pipfile
|
|
@ -4,26 +4,22 @@ verify_ssl = true
|
|||
name = "pypi"
|
||||
|
||||
[packages]
|
||||
urllib3 = "==2.3.0"
|
||||
requests = "==2.32.3"
|
||||
boto3 = "==1.36.12"
|
||||
pyjwt = "==2.10.1"
|
||||
psycopg2-binary = "==2.9.10"
|
||||
psycopg = {extras = ["pool", "binary"], version = "==3.2.4"}
|
||||
clickhouse-driver = {extras = ["lz4"], version = "==0.2.9"}
|
||||
clickhouse-connect = "==0.8.15"
|
||||
elasticsearch = "==8.17.1"
|
||||
jira = "==3.8.0"
|
||||
cachetools = "==5.5.1"
|
||||
fastapi = "==0.115.8"
|
||||
uvicorn = {extras = ["standard"], version = "==0.34.0"}
|
||||
requests = "==2.31.0"
|
||||
boto3 = "==1.33.8"
|
||||
pyjwt = "==2.8.0"
|
||||
psycopg2-binary = "==2.9.9"
|
||||
elasticsearch = "==8.11.0"
|
||||
jira = "==3.5.2"
|
||||
fastapi = "==0.104.1"
|
||||
python-decouple = "==3.8"
|
||||
pydantic = {extras = ["email"], version = "==2.10.6"}
|
||||
apscheduler = "==3.11.0"
|
||||
redis = "==5.2.1"
|
||||
apscheduler = "==3.10.4"
|
||||
redis = "==5.0.1"
|
||||
urllib3 = "==1.26.16"
|
||||
uvicorn = {extras = ["standard"], version = "==0.23.2"}
|
||||
pydantic = {extras = ["email"], version = "==2.3.0"}
|
||||
psycopg = {extras = ["binary", "pool"], version = "==3.1.14"}
|
||||
|
||||
[dev-packages]
|
||||
|
||||
[requires]
|
||||
python_version = "3.12"
|
||||
python_full_version = "3.12.8"
|
||||
python_version = "3.11"
|
||||
|
|
|
|||
25
api/app.py
25
api/app.py
|
|
@ -9,18 +9,18 @@ from fastapi import FastAPI, Request
|
|||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.middleware.gzip import GZipMiddleware
|
||||
from psycopg import AsyncConnection
|
||||
from psycopg.rows import dict_row
|
||||
from starlette.responses import StreamingResponse
|
||||
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils import pg_client, ch_client
|
||||
from chalicelib.utils import pg_client
|
||||
from crons import core_crons, core_dynamic_crons
|
||||
from routers import core, core_dynamic
|
||||
from routers.subs import insights, metrics, v1_api, health, usability_tests, spot, product_anaytics
|
||||
from routers.subs import insights, metrics, v1_api, health, usability_tests
|
||||
|
||||
loglevel = config("LOGLEVEL", default=logging.WARNING)
|
||||
print(f">Loglevel set to: {loglevel}")
|
||||
logging.basicConfig(level=loglevel)
|
||||
from psycopg.rows import dict_row
|
||||
|
||||
|
||||
class ORPYAsyncConnection(AsyncConnection):
|
||||
|
|
@ -38,7 +38,6 @@ async def lifespan(app: FastAPI):
|
|||
|
||||
app.schedule = AsyncIOScheduler()
|
||||
await pg_client.init()
|
||||
await ch_client.init()
|
||||
app.schedule.start()
|
||||
|
||||
for job in core_crons.cron_jobs + core_dynamic_crons.cron_jobs:
|
||||
|
|
@ -57,9 +56,7 @@ async def lifespan(app: FastAPI):
|
|||
"application_name": "AIO" + config("APP_NAME", default="PY"),
|
||||
}
|
||||
|
||||
database = psycopg_pool.AsyncConnectionPool(kwargs=database, connection_class=ORPYAsyncConnection,
|
||||
min_size=config("PG_AIO_MINCONN", cast=int, default=1),
|
||||
max_size=config("PG_AIO_MAXCONN", cast=int, default=5), )
|
||||
database = psycopg_pool.AsyncConnectionPool(kwargs=database, connection_class=ORPYAsyncConnection)
|
||||
app.state.postgresql = database
|
||||
|
||||
# App listening
|
||||
|
|
@ -93,7 +90,6 @@ async def or_middleware(request: Request, call_next):
|
|||
if now > 2:
|
||||
now = round(now, 2)
|
||||
logging.warning(f"Execution time: {now} s for {request.method}: {request.url.path}")
|
||||
response.headers["x-robots-tag"] = 'noindex, nofollow'
|
||||
return response
|
||||
|
||||
|
||||
|
|
@ -125,10 +121,9 @@ app.include_router(usability_tests.public_app)
|
|||
app.include_router(usability_tests.app)
|
||||
app.include_router(usability_tests.app_apikey)
|
||||
|
||||
app.include_router(spot.public_app)
|
||||
app.include_router(spot.app)
|
||||
app.include_router(spot.app_apikey)
|
||||
|
||||
app.include_router(product_anaytics.public_app)
|
||||
app.include_router(product_anaytics.app)
|
||||
app.include_router(product_anaytics.app_apikey)
|
||||
# @app.get('/private/shutdown', tags=["private"])
|
||||
# async def stop_server():
|
||||
# logging.info("Requested shutdown")
|
||||
# await shutdown()
|
||||
# import os, signal
|
||||
# os.kill(1, signal.SIGTERM)
|
||||
|
|
|
|||
|
|
@ -5,14 +5,14 @@ from apscheduler.schedulers.asyncio import AsyncIOScheduler
|
|||
from decouple import config
|
||||
from fastapi import FastAPI
|
||||
|
||||
from chalicelib.core.alerts import alerts_processor
|
||||
from chalicelib.core import alerts_processor
|
||||
from chalicelib.utils import pg_client
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
# Startup
|
||||
ap_logger.info(">>>>> starting up <<<<<")
|
||||
logging.info(">>>>> starting up <<<<<")
|
||||
await pg_client.init()
|
||||
app.schedule.start()
|
||||
app.schedule.add_job(id="alerts_processor", **{"func": alerts_processor.process, "trigger": "interval",
|
||||
|
|
@ -27,22 +27,14 @@ async def lifespan(app: FastAPI):
|
|||
yield
|
||||
|
||||
# Shutdown
|
||||
ap_logger.info(">>>>> shutting down <<<<<")
|
||||
logging.info(">>>>> shutting down <<<<<")
|
||||
app.schedule.shutdown(wait=False)
|
||||
await pg_client.terminate()
|
||||
|
||||
|
||||
loglevel = config("LOGLEVEL", default=logging.INFO)
|
||||
print(f">Loglevel set to: {loglevel}")
|
||||
logging.basicConfig(level=loglevel)
|
||||
ap_logger = logging.getLogger('apscheduler')
|
||||
ap_logger.setLevel(loglevel)
|
||||
|
||||
app = FastAPI(root_path=config("root_path", default="/alerts"), docs_url=config("docs_url", default=""),
|
||||
redoc_url=config("redoc_url", default=""), lifespan=lifespan)
|
||||
|
||||
app.schedule = AsyncIOScheduler()
|
||||
ap_logger.info("============= ALERTS =============")
|
||||
logging.info("============= ALERTS =============")
|
||||
|
||||
|
||||
@app.get("/")
|
||||
|
|
@ -58,8 +50,17 @@ async def get_health_status():
|
|||
}}
|
||||
|
||||
|
||||
app.schedule = AsyncIOScheduler()
|
||||
|
||||
loglevel = config("LOGLEVEL", default=logging.INFO)
|
||||
print(f">Loglevel set to: {loglevel}")
|
||||
logging.basicConfig(level=loglevel)
|
||||
ap_logger = logging.getLogger('apscheduler')
|
||||
ap_logger.setLevel(loglevel)
|
||||
app.schedule = AsyncIOScheduler()
|
||||
|
||||
if config("LOCAL_DEV", default=False, cast=bool):
|
||||
@app.get('/trigger', tags=["private"])
|
||||
async def trigger_main_cron():
|
||||
ap_logger.info("Triggering main cron")
|
||||
logging.info("Triggering main cron")
|
||||
alerts_processor.process()
|
||||
|
|
|
|||
|
|
@ -2,14 +2,13 @@ import datetime
|
|||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from decouple import config
|
||||
from fastapi import Request
|
||||
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
|
||||
from starlette import status
|
||||
from starlette.exceptions import HTTPException
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import authorizers, users, spot
|
||||
from chalicelib.core import authorizers, users
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -33,10 +32,28 @@ class JWTAuth(HTTPBearer):
|
|||
|
||||
async def __call__(self, request: Request) -> Optional[schemas.CurrentContext]:
|
||||
if request.url.path in ["/refresh", "/api/refresh"]:
|
||||
return await self.__process_refresh_call(request)
|
||||
refresh_token = request.cookies.get("refreshToken")
|
||||
jwt_payload = authorizers.jwt_refresh_authorizer(scheme="Bearer", token=refresh_token)
|
||||
if jwt_payload is None or jwt_payload.get("jti") is None:
|
||||
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Invalid token or expired token.")
|
||||
auth_exists = users.refresh_auth_exists(user_id=jwt_payload.get("userId", -1),
|
||||
jwt_jti=jwt_payload["jti"])
|
||||
if not auth_exists:
|
||||
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Invalid token or expired token.")
|
||||
|
||||
elif request.url.path in ["/spot/refresh", "/api/spot/refresh"]:
|
||||
return await self.__process_spot_refresh_call(request)
|
||||
credentials: HTTPAuthorizationCredentials = await super(JWTAuth, self).__call__(request)
|
||||
if credentials:
|
||||
if not credentials.scheme == "Bearer":
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="Invalid authentication scheme.")
|
||||
old_jwt_payload = authorizers.jwt_authorizer(scheme=credentials.scheme, token=credentials.credentials,
|
||||
leeway=datetime.timedelta(days=3))
|
||||
if old_jwt_payload is None \
|
||||
or old_jwt_payload.get("userId") is None \
|
||||
or old_jwt_payload.get("userId") != jwt_payload.get("userId"):
|
||||
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Invalid token or expired token.")
|
||||
|
||||
return _get_current_auth_context(request=request, jwt_payload=jwt_payload)
|
||||
|
||||
else:
|
||||
credentials: HTTPAuthorizationCredentials = await super(JWTAuth, self).__call__(request)
|
||||
|
|
@ -45,8 +62,9 @@ class JWTAuth(HTTPBearer):
|
|||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="Invalid authentication scheme.")
|
||||
jwt_payload = authorizers.jwt_authorizer(scheme=credentials.scheme, token=credentials.credentials)
|
||||
auth_exists = jwt_payload is not None and users.auth_exists(user_id=jwt_payload.get("userId", -1),
|
||||
jwt_iat=jwt_payload.get("iat", 100))
|
||||
auth_exists = jwt_payload is not None \
|
||||
and users.auth_exists(user_id=jwt_payload.get("userId", -1),
|
||||
jwt_iat=jwt_payload.get("iat", 100))
|
||||
if jwt_payload is None \
|
||||
or jwt_payload.get("iat") is None or jwt_payload.get("aud") is None \
|
||||
or not auth_exists:
|
||||
|
|
@ -61,93 +79,7 @@ class JWTAuth(HTTPBearer):
|
|||
|
||||
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Invalid token or expired token.")
|
||||
|
||||
if jwt_payload.get("aud", "").startswith("spot") and not request.url.path.startswith("/spot"):
|
||||
# Allow access to spot endpoints only
|
||||
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Unauthorized access (spot).")
|
||||
elif jwt_payload.get("aud", "").startswith("front") and request.url.path.startswith("/spot"):
|
||||
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Unauthorized access endpoint reserved for Spot only.")
|
||||
|
||||
return _get_current_auth_context(request=request, jwt_payload=jwt_payload)
|
||||
|
||||
logger.warning("Invalid authorization code.")
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid authorization code.")
|
||||
|
||||
async def __process_refresh_call(self, request: Request) -> schemas.CurrentContext:
|
||||
if "refreshToken" not in request.cookies:
|
||||
logger.warning("Missing refreshToken cookie.")
|
||||
jwt_payload = None
|
||||
else:
|
||||
jwt_payload = authorizers.jwt_refresh_authorizer(scheme="Bearer", token=request.cookies["refreshToken"])
|
||||
|
||||
if jwt_payload is None or jwt_payload.get("jti") is None:
|
||||
logger.warning("Null refreshToken's payload, or null JTI.")
|
||||
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="Invalid refresh-token or expired refresh-token.")
|
||||
auth_exists = users.refresh_auth_exists(user_id=jwt_payload.get("userId", -1),
|
||||
jwt_jti=jwt_payload["jti"])
|
||||
if not auth_exists:
|
||||
logger.warning("refreshToken's user not found.")
|
||||
logger.warning(jwt_payload)
|
||||
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="Invalid refresh-token or expired refresh-token.")
|
||||
|
||||
credentials: HTTPAuthorizationCredentials = await super(JWTAuth, self).__call__(request)
|
||||
if credentials:
|
||||
if not credentials.scheme == "Bearer":
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="Invalid authentication scheme.")
|
||||
old_jwt_payload = authorizers.jwt_authorizer(scheme=credentials.scheme, token=credentials.credentials,
|
||||
leeway=datetime.timedelta(
|
||||
days=config("JWT_LEEWAY_DAYS", cast=int, default=3)
|
||||
))
|
||||
if old_jwt_payload is None \
|
||||
or old_jwt_payload.get("userId") is None \
|
||||
or old_jwt_payload.get("userId") != jwt_payload.get("userId"):
|
||||
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Invalid token or expired token.")
|
||||
|
||||
return _get_current_auth_context(request=request, jwt_payload=jwt_payload)
|
||||
|
||||
logger.warning("Invalid authorization code (refresh logic).")
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid authorization code for refresh.")
|
||||
|
||||
async def __process_spot_refresh_call(self, request: Request) -> schemas.CurrentContext:
|
||||
if "spotRefreshToken" not in request.cookies:
|
||||
logger.warning("Missing soptRefreshToken cookie.")
|
||||
jwt_payload = None
|
||||
else:
|
||||
jwt_payload = authorizers.jwt_refresh_authorizer(scheme="Bearer", token=request.cookies["spotRefreshToken"])
|
||||
|
||||
if jwt_payload is None or jwt_payload.get("jti") is None:
|
||||
logger.warning("Null spotRefreshToken's payload, or null JTI.")
|
||||
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="Invalid spotRefreshToken or expired refresh-token.")
|
||||
auth_exists = spot.refresh_auth_exists(user_id=jwt_payload.get("userId", -1),
|
||||
jwt_jti=jwt_payload["jti"])
|
||||
if not auth_exists:
|
||||
logger.warning("spotRefreshToken's user not found.")
|
||||
logger.warning(jwt_payload)
|
||||
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="Invalid spotRefreshToken or expired refresh-token.")
|
||||
|
||||
credentials: HTTPAuthorizationCredentials = await super(JWTAuth, self).__call__(request)
|
||||
if credentials:
|
||||
if not credentials.scheme == "Bearer":
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="Invalid spot-authentication scheme.")
|
||||
old_jwt_payload = authorizers.jwt_authorizer(scheme=credentials.scheme, token=credentials.credentials,
|
||||
leeway=datetime.timedelta(
|
||||
days=config("JWT_LEEWAY_DAYS", cast=int, default=3)
|
||||
))
|
||||
if old_jwt_payload is None \
|
||||
or old_jwt_payload.get("userId") is None \
|
||||
or old_jwt_payload.get("userId") != jwt_payload.get("userId"):
|
||||
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="Invalid spot-token or expired token.")
|
||||
|
||||
return _get_current_auth_context(request=request, jwt_payload=jwt_payload)
|
||||
|
||||
logger.warning("Invalid authorization code (spot-refresh logic).")
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="Invalid authorization code for spot-refresh.")
|
||||
|
|
|
|||
|
|
@ -31,8 +31,8 @@ class ProjectAuthorizer:
|
|||
logger.debug(f"unauthorized project {self.project_identifier}:{value}")
|
||||
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="project not found.")
|
||||
else:
|
||||
current_project = schemas.ProjectContext(projectId=current_project["projectId"],
|
||||
projectKey=current_project["projectKey"],
|
||||
platform=current_project["platform"],
|
||||
name=current_project["name"])
|
||||
current_project = schemas.CurrentProjectContext(projectId=current_project["projectId"],
|
||||
projectKey=current_project["projectKey"],
|
||||
platform=current_project["platform"],
|
||||
name=current_project["name"])
|
||||
request.state.currentContext.project = current_project
|
||||
|
|
|
|||
55
api/build.sh
55
api/build.sh
|
|
@ -9,15 +9,12 @@
|
|||
|
||||
# Helper function
|
||||
exit_err() {
|
||||
err_code=$1
|
||||
if [[ $err_code != 0 ]]; then
|
||||
exit $err_code
|
||||
fi
|
||||
err_code=$1
|
||||
if [[ $err_code != 0 ]]; then
|
||||
exit $err_code
|
||||
fi
|
||||
}
|
||||
|
||||
source ../scripts/lib/_docker.sh
|
||||
ARCH=${ARCH:-'amd64'}
|
||||
|
||||
environment=$1
|
||||
git_sha=$(git rev-parse --short HEAD)
|
||||
image_tag=${IMAGE_TAG:-git_sha}
|
||||
|
|
@ -33,31 +30,31 @@ check_prereq() {
|
|||
|
||||
[[ $1 == ee ]] && ee=true
|
||||
[[ $PATCH -eq 1 ]] && {
|
||||
image_tag="$(grep -ER ^.ppVersion ../scripts/helmcharts/openreplay/charts/$chart | xargs | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
|
||||
[[ $ee == "true" ]] && {
|
||||
image_tag="${image_tag}-ee"
|
||||
}
|
||||
image_tag="$(grep -ER ^.ppVersion ../scripts/helmcharts/openreplay/charts/$chart | xargs | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
|
||||
[[ $ee == "true" ]] && {
|
||||
image_tag="${image_tag}-ee"
|
||||
}
|
||||
}
|
||||
update_helm_release() {
|
||||
[[ $ee == "true" ]] && return
|
||||
HELM_TAG="$(grep -iER ^version ../scripts/helmcharts/openreplay/charts/$chart | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
|
||||
# Update the chart version
|
||||
sed -i "s#^version.*#version: $HELM_TAG# g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
|
||||
# Update image tags
|
||||
sed -i "s#ppVersion.*#ppVersion: \"$image_tag\"#g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
|
||||
# Commit the changes
|
||||
git add ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
|
||||
git commit -m "chore(helm): Updating $chart image release"
|
||||
[[ $ee == "true" ]] && return
|
||||
HELM_TAG="$(grep -iER ^version ../scripts/helmcharts/openreplay/charts/$chart | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
|
||||
# Update the chart version
|
||||
sed -i "s#^version.*#version: $HELM_TAG# g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
|
||||
# Update image tags
|
||||
sed -i "s#ppVersion.*#ppVersion: \"$image_tag\"#g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
|
||||
# Commit the changes
|
||||
git add ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
|
||||
git commit -m "chore(helm): Updating $chart image release"
|
||||
}
|
||||
|
||||
function build_api() {
|
||||
function build_api(){
|
||||
destination="_api"
|
||||
[[ $1 == "ee" ]] && {
|
||||
destination="_api_ee"
|
||||
}
|
||||
[[ -d ../${destination} ]] && {
|
||||
echo "Removing previous build cache"
|
||||
rm -rf ../${destination}
|
||||
echo "Removing previous build cache"
|
||||
rm -rf ../${destination}
|
||||
}
|
||||
cp -R ../api ../${destination}
|
||||
cd ../${destination} || exit_err 100
|
||||
|
|
@ -69,16 +66,16 @@ function build_api() {
|
|||
tag="ee-"
|
||||
}
|
||||
mv Dockerfile.dockerignore .dockerignore
|
||||
docker build -f ./Dockerfile --platform linux/${ARCH} --build-arg envarg=$envarg --build-arg GIT_SHA=$git_sha -t ${DOCKER_REPO:-'local'}/${IMAGE_NAME:-'chalice'}:${image_tag} .
|
||||
docker build -f ./Dockerfile --build-arg envarg=$envarg --build-arg GIT_SHA=$git_sha -t ${DOCKER_REPO:-'local'}/chalice:${image_tag} .
|
||||
cd ../api || exit_err 100
|
||||
rm -rf ../${destination}
|
||||
[[ $PUSH_IMAGE -eq 1 ]] && {
|
||||
docker push ${DOCKER_REPO:-'local'}/${IMAGE_NAME:-'chalice'}:${image_tag}
|
||||
docker tag ${DOCKER_REPO:-'local'}/${IMAGE_NAME:-'chalice'}:${image_tag} ${DOCKER_REPO:-'local'}/chalice:${tag}latest
|
||||
docker push ${DOCKER_REPO:-'local'}/${IMAGE_NAME:-'chalice'}:${tag}latest
|
||||
docker push ${DOCKER_REPO:-'local'}/chalice:${image_tag}
|
||||
docker tag ${DOCKER_REPO:-'local'}/chalice:${image_tag} ${DOCKER_REPO:-'local'}/chalice:${tag}latest
|
||||
docker push ${DOCKER_REPO:-'local'}/chalice:${tag}latest
|
||||
}
|
||||
[[ $SIGN_IMAGE -eq 1 ]] && {
|
||||
cosign sign --key $SIGN_KEY ${DOCKER_REPO:-'local'}/${IMAGE_NAME:-'chalice'}:${image_tag}
|
||||
cosign sign --key $SIGN_KEY ${DOCKER_REPO:-'local'}/chalice:${image_tag}
|
||||
}
|
||||
echo "api docker build completed"
|
||||
}
|
||||
|
|
@ -87,5 +84,5 @@ check_prereq
|
|||
build_api $environment
|
||||
echo buil_complete
|
||||
if [[ $PATCH -eq 1 ]]; then
|
||||
update_helm_release
|
||||
update_helm_release
|
||||
fi
|
||||
|
|
|
|||
|
|
@ -10,7 +10,6 @@
|
|||
git_sha=$(git rev-parse --short HEAD)
|
||||
image_tag=${IMAGE_TAG:-git_sha}
|
||||
envarg="default-foss"
|
||||
source ../scripts/lib/_docker.sh
|
||||
check_prereq() {
|
||||
which docker || {
|
||||
echo "Docker not installed, please install docker."
|
||||
|
|
@ -18,26 +17,27 @@ check_prereq() {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
[[ $1 == ee ]] && ee=true
|
||||
[[ $PATCH -eq 1 ]] && {
|
||||
image_tag="$(grep -ER ^.ppVersion ../scripts/helmcharts/openreplay/charts/$chart | xargs | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
|
||||
[[ $ee == "true" ]] && {
|
||||
image_tag="${image_tag}-ee"
|
||||
}
|
||||
image_tag="$(grep -ER ^.ppVersion ../scripts/helmcharts/openreplay/charts/$chart | xargs | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
|
||||
[[ $ee == "true" ]] && {
|
||||
image_tag="${image_tag}-ee"
|
||||
}
|
||||
}
|
||||
update_helm_release() {
|
||||
chart=$1
|
||||
HELM_TAG="$(grep -iER ^version ../scripts/helmcharts/openreplay/charts/$chart | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
|
||||
# Update the chart version
|
||||
sed -i "s#^version.*#version: $HELM_TAG# g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
|
||||
# Update image tags
|
||||
sed -i "s#ppVersion.*#ppVersion: \"$image_tag\"#g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
|
||||
# Commit the changes
|
||||
git add ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
|
||||
git commit -m "chore(helm): Updating $chart image release"
|
||||
chart=$1
|
||||
HELM_TAG="$(grep -iER ^version ../scripts/helmcharts/openreplay/charts/$chart | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
|
||||
# Update the chart version
|
||||
sed -i "s#^version.*#version: $HELM_TAG# g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
|
||||
# Update image tags
|
||||
sed -i "s#ppVersion.*#ppVersion: \"$image_tag\"#g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
|
||||
# Commit the changes
|
||||
git add ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
|
||||
git commit -m "chore(helm): Updating $chart image release"
|
||||
}
|
||||
|
||||
function build_alerts() {
|
||||
function build_alerts(){
|
||||
destination="_alerts"
|
||||
[[ $1 == "ee" ]] && {
|
||||
destination="_alerts_ee"
|
||||
|
|
@ -52,7 +52,7 @@ function build_alerts() {
|
|||
tag="ee-"
|
||||
}
|
||||
mv Dockerfile_alerts.dockerignore .dockerignore
|
||||
docker build -f ./Dockerfile_alerts --platform linux/${ARCH:-"amd64"} --build-arg envarg=$envarg --build-arg GIT_SHA=$git_sha -t ${DOCKER_REPO:-'local'}/alerts:${image_tag} .
|
||||
docker build -f ./Dockerfile_alerts --build-arg envarg=$envarg --build-arg GIT_SHA=$git_sha -t ${DOCKER_REPO:-'local'}/alerts:${image_tag} .
|
||||
cd ../api
|
||||
rm -rf ../${destination}
|
||||
[[ $PUSH_IMAGE -eq 1 ]] && {
|
||||
|
|
@ -69,5 +69,5 @@ function build_alerts() {
|
|||
check_prereq
|
||||
build_alerts $1
|
||||
if [[ $PATCH -eq 1 ]]; then
|
||||
update_helm_release alerts
|
||||
update_helm_release alerts
|
||||
fi
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@
|
|||
|
||||
git_sha1=${IMAGE_TAG:-$(git rev-parse HEAD)}
|
||||
envarg="default-foss"
|
||||
source ../scripts/lib/_docker.sh
|
||||
check_prereq() {
|
||||
which docker || {
|
||||
echo "Docker not installed, please install docker."
|
||||
|
|
@ -18,7 +17,7 @@ check_prereq() {
|
|||
[[ exit -eq 1 ]] && exit 1
|
||||
}
|
||||
|
||||
function build_crons() {
|
||||
function build_crons(){
|
||||
destination="_crons_ee"
|
||||
cp -R ../api ../${destination}
|
||||
cd ../${destination}
|
||||
|
|
@ -29,7 +28,7 @@ function build_crons() {
|
|||
envarg="default-ee"
|
||||
tag="ee-"
|
||||
mv Dockerfile_crons.dockerignore .dockerignore
|
||||
docker build -f ./Dockerfile_crons --platform=linux/${ARCH:-'amd64'} --build-arg envarg=$envarg -t ${DOCKER_REPO:-'local'}/crons:${git_sha1} .
|
||||
docker build -f ./Dockerfile_crons --build-arg envarg=$envarg -t ${DOCKER_REPO:-'local'}/crons:${git_sha1} .
|
||||
cd ../api
|
||||
rm -rf ../${destination}
|
||||
[[ $PUSH_IMAGE -eq 1 ]] && {
|
||||
|
|
@ -47,6 +46,7 @@ check_prereq
|
|||
[[ $1 == "ee" ]] && {
|
||||
build_crons $1
|
||||
} || {
|
||||
echo -e "Crons is only for ee. Rerun the script using \n bash $0 ee"
|
||||
exit 100
|
||||
echo -e "Crons is only for ee. Rerun the script using \n bash $0 ee"
|
||||
exit 100
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1 @@
|
|||
from . import sessions as sessions_legacy
|
||||
|
|
@ -7,13 +7,11 @@ from decouple import config
|
|||
|
||||
import schemas
|
||||
from chalicelib.core import notifications, webhook
|
||||
from chalicelib.core.collaborations.collaboration_msteams import MSTeams
|
||||
from chalicelib.core.collaborations.collaboration_slack import Slack
|
||||
from chalicelib.core.collaboration_msteams import MSTeams
|
||||
from chalicelib.core.collaboration_slack import Slack
|
||||
from chalicelib.utils import pg_client, helper, email_helper, smtp
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get(id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
|
|
@ -126,26 +124,26 @@ def process_notifications(data):
|
|||
try:
|
||||
send_to_slack_batch(notifications_list=notifications_list)
|
||||
except Exception as e:
|
||||
logger.error("!!!Error while sending slack notifications batch")
|
||||
logger.error(str(e))
|
||||
logging.error("!!!Error while sending slack notifications batch")
|
||||
logging.error(str(e))
|
||||
elif t == "msteams":
|
||||
try:
|
||||
send_to_msteams_batch(notifications_list=notifications_list)
|
||||
except Exception as e:
|
||||
logger.error("!!!Error while sending msteams notifications batch")
|
||||
logger.error(str(e))
|
||||
logging.error("!!!Error while sending msteams notifications batch")
|
||||
logging.error(str(e))
|
||||
elif t == "email":
|
||||
try:
|
||||
send_by_email_batch(notifications_list=notifications_list)
|
||||
except Exception as e:
|
||||
logger.error("!!!Error while sending email notifications batch")
|
||||
logger.error(str(e))
|
||||
logging.error("!!!Error while sending email notifications batch")
|
||||
logging.error(str(e))
|
||||
elif t == "webhook":
|
||||
try:
|
||||
webhook.trigger_batch(data_list=notifications_list)
|
||||
except Exception as e:
|
||||
logger.error("!!!Error while sending webhook notifications batch")
|
||||
logger.error(str(e))
|
||||
logging.error("!!!Error while sending webhook notifications batch")
|
||||
logging.error(str(e))
|
||||
|
||||
|
||||
def send_by_email(notification, destination):
|
||||
|
|
@ -160,9 +158,9 @@ def send_by_email(notification, destination):
|
|||
|
||||
def send_by_email_batch(notifications_list):
|
||||
if not smtp.has_smtp():
|
||||
logger.info("no SMTP configuration for email notifications")
|
||||
logging.info("no SMTP configuration for email notifications")
|
||||
if notifications_list is None or len(notifications_list) == 0:
|
||||
logger.info("no email notifications")
|
||||
logging.info("no email notifications")
|
||||
return
|
||||
for n in notifications_list:
|
||||
send_by_email(notification=n.get("notification"), destination=n.get("destination"))
|
||||
|
|
@ -231,5 +229,5 @@ def get_predefined_values():
|
|||
"unit": "count" if v.endswith(".count") else "ms",
|
||||
"predefined": True,
|
||||
"metricId": None,
|
||||
"seriesId": None} for v in values if v != schemas.AlertColumn.CUSTOM]
|
||||
"seriesId": None} for v in values if v != schemas.AlertColumn.custom]
|
||||
return values
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
import logging
|
||||
|
||||
from decouple import config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
if config("EXP_ALERTS", cast=bool, default=False):
|
||||
logging.info(">>> Using experimental alerts")
|
||||
from . import alerts_processor_ch as alerts_processor
|
||||
else:
|
||||
from . import alerts_processor as alerts_processor
|
||||
|
|
@ -1,33 +0,0 @@
|
|||
from chalicelib.core.alerts.modules import TENANT_ID
|
||||
from chalicelib.utils import pg_client, helper
|
||||
|
||||
|
||||
def get_all_alerts():
|
||||
with pg_client.PostgresClient(long_query=True) as cur:
|
||||
query = f"""SELECT {TENANT_ID} AS tenant_id,
|
||||
alert_id,
|
||||
projects.project_id,
|
||||
projects.name AS project_name,
|
||||
detection_method,
|
||||
query,
|
||||
options,
|
||||
(EXTRACT(EPOCH FROM alerts.created_at) * 1000)::BIGINT AS created_at,
|
||||
alerts.name,
|
||||
alerts.series_id,
|
||||
filter,
|
||||
change,
|
||||
COALESCE(metrics.name || '.' || (COALESCE(metric_series.name, 'series ' || index)) || '.count',
|
||||
query ->> 'left') AS series_name
|
||||
FROM public.alerts
|
||||
INNER JOIN projects USING (project_id)
|
||||
LEFT JOIN metric_series USING (series_id)
|
||||
LEFT JOIN metrics USING (metric_id)
|
||||
WHERE alerts.deleted_at ISNULL
|
||||
AND alerts.active
|
||||
AND projects.active
|
||||
AND projects.deleted_at ISNULL
|
||||
AND (alerts.series_id ISNULL OR metric_series.deleted_at ISNULL)
|
||||
ORDER BY alerts.created_at;"""
|
||||
cur.execute(query=query)
|
||||
all_alerts = helper.list_to_camel_case(cur.fetchall())
|
||||
return all_alerts
|
||||
|
|
@ -1,169 +0,0 @@
|
|||
import logging
|
||||
|
||||
from pydantic_core._pydantic_core import ValidationError
|
||||
|
||||
import schemas
|
||||
from chalicelib.core.alerts import alerts, alerts_listener
|
||||
from chalicelib.core.alerts.modules import alert_helpers
|
||||
from chalicelib.core.sessions import sessions_pg as sessions
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
LeftToDb = {
|
||||
schemas.AlertColumn.PERFORMANCE__DOM_CONTENT_LOADED__AVERAGE: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COALESCE(AVG(NULLIF(dom_content_loaded_time ,0)),0)"},
|
||||
schemas.AlertColumn.PERFORMANCE__FIRST_MEANINGFUL_PAINT__AVERAGE: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COALESCE(AVG(NULLIF(first_contentful_paint_time,0)),0)"},
|
||||
schemas.AlertColumn.PERFORMANCE__PAGE_LOAD_TIME__AVERAGE: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)", "formula": "AVG(NULLIF(load_time ,0))"},
|
||||
schemas.AlertColumn.PERFORMANCE__DOM_BUILD_TIME__AVERAGE: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "AVG(NULLIF(dom_building_time,0))"},
|
||||
schemas.AlertColumn.PERFORMANCE__SPEED_INDEX__AVERAGE: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)", "formula": "AVG(NULLIF(speed_index,0))"},
|
||||
schemas.AlertColumn.PERFORMANCE__PAGE_RESPONSE_TIME__AVERAGE: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "AVG(NULLIF(response_time,0))"},
|
||||
schemas.AlertColumn.PERFORMANCE__TTFB__AVERAGE: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "AVG(NULLIF(first_paint_time,0))"},
|
||||
schemas.AlertColumn.PERFORMANCE__TIME_TO_RENDER__AVERAGE: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "AVG(NULLIF(visually_complete,0))"},
|
||||
schemas.AlertColumn.PERFORMANCE__CRASHES__COUNT: {
|
||||
"table": "public.sessions",
|
||||
"formula": "COUNT(DISTINCT session_id)",
|
||||
"condition": "errors_count > 0 AND duration>0"},
|
||||
schemas.AlertColumn.ERRORS__JAVASCRIPT__COUNT: {
|
||||
"table": "events.errors INNER JOIN public.errors AS m_errors USING (error_id)",
|
||||
"formula": "COUNT(DISTINCT session_id)", "condition": "source='js_exception'", "joinSessions": False},
|
||||
schemas.AlertColumn.ERRORS__BACKEND__COUNT: {
|
||||
"table": "events.errors INNER JOIN public.errors AS m_errors USING (error_id)",
|
||||
"formula": "COUNT(DISTINCT session_id)", "condition": "source!='js_exception'", "joinSessions": False},
|
||||
}
|
||||
|
||||
|
||||
def Build(a):
|
||||
now = TimeUTC.now()
|
||||
params = {"project_id": a["projectId"], "now": now}
|
||||
full_args = {}
|
||||
j_s = True
|
||||
main_table = ""
|
||||
if a["seriesId"] is not None:
|
||||
a["filter"]["sort"] = "session_id"
|
||||
a["filter"]["order"] = schemas.SortOrderType.DESC
|
||||
a["filter"]["startDate"] = 0
|
||||
a["filter"]["endDate"] = TimeUTC.now()
|
||||
try:
|
||||
data = schemas.SessionsSearchPayloadSchema.model_validate(a["filter"])
|
||||
except ValidationError:
|
||||
logger.warning("Validation error for:")
|
||||
logger.warning(a["filter"])
|
||||
raise
|
||||
|
||||
full_args, query_part = sessions.search_query_parts(data=data, error_status=None, errors_only=False,
|
||||
issue=None, project_id=a["projectId"], user_id=None,
|
||||
favorite_only=False)
|
||||
subQ = f"""SELECT COUNT(session_id) AS value
|
||||
{query_part}"""
|
||||
else:
|
||||
colDef = LeftToDb[a["query"]["left"]]
|
||||
subQ = f"""SELECT {colDef["formula"]} AS value
|
||||
FROM {colDef["table"]}
|
||||
WHERE project_id = %(project_id)s
|
||||
{"AND " + colDef["condition"] if colDef.get("condition") else ""}"""
|
||||
j_s = colDef.get("joinSessions", True)
|
||||
main_table = colDef["table"]
|
||||
is_ss = main_table == "public.sessions"
|
||||
q = f"""SELECT coalesce(value,0) AS value, coalesce(value,0) {a["query"]["operator"]} {a["query"]["right"]} AS valid"""
|
||||
|
||||
if a["detectionMethod"] == schemas.AlertDetectionMethod.THRESHOLD:
|
||||
if a["seriesId"] is not None:
|
||||
q += f""" FROM ({subQ}) AS stat"""
|
||||
else:
|
||||
q += f""" FROM ({subQ} {"AND timestamp >= %(startDate)s AND timestamp <= %(now)s" if not is_ss else ""}
|
||||
{"AND start_ts >= %(startDate)s AND start_ts <= %(now)s" if j_s else ""}) AS stat"""
|
||||
params = {**params, **full_args, "startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000}
|
||||
else:
|
||||
if a["change"] == schemas.AlertDetectionType.CHANGE:
|
||||
if a["seriesId"] is not None:
|
||||
sub2 = subQ.replace("%(startDate)s", "%(timestamp_sub2)s").replace("%(endDate)s", "%(startDate)s")
|
||||
sub1 = f"SELECT (({subQ})-({sub2})) AS value"
|
||||
q += f" FROM ( {sub1} ) AS stat"
|
||||
params = {**params, **full_args,
|
||||
"startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000,
|
||||
"timestamp_sub2": TimeUTC.now() - 2 * a["options"]["currentPeriod"] * 60 * 1000}
|
||||
else:
|
||||
sub1 = f"""{subQ} {"AND timestamp >= %(startDate)s AND timestamp <= %(now)s" if not is_ss else ""}
|
||||
{"AND start_ts >= %(startDate)s AND start_ts <= %(now)s" if j_s else ""}"""
|
||||
params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000
|
||||
sub2 = f"""{subQ} {"AND timestamp < %(startDate)s AND timestamp >= %(timestamp_sub2)s" if not is_ss else ""}
|
||||
{"AND start_ts < %(startDate)s AND start_ts >= %(timestamp_sub2)s" if j_s else ""}"""
|
||||
params["timestamp_sub2"] = TimeUTC.now() - 2 * a["options"]["currentPeriod"] * 60 * 1000
|
||||
sub1 = f"SELECT (( {sub1} )-( {sub2} )) AS value"
|
||||
q += f" FROM ( {sub1} ) AS stat"
|
||||
|
||||
else:
|
||||
if a["seriesId"] is not None:
|
||||
sub2 = subQ.replace("%(startDate)s", "%(timestamp_sub2)s").replace("%(endDate)s", "%(startDate)s")
|
||||
sub1 = f"SELECT (({subQ})/NULLIF(({sub2}),0)-1)*100 AS value"
|
||||
q += f" FROM ({sub1}) AS stat"
|
||||
params = {**params, **full_args,
|
||||
"startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000,
|
||||
"timestamp_sub2": TimeUTC.now() \
|
||||
- (a["options"]["currentPeriod"] + a["options"]["currentPeriod"]) \
|
||||
* 60 * 1000}
|
||||
else:
|
||||
sub1 = f"""{subQ} {"AND timestamp >= %(startDate)s AND timestamp <= %(now)s" if not is_ss else ""}
|
||||
{"AND start_ts >= %(startDate)s AND start_ts <= %(now)s" if j_s else ""}"""
|
||||
params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000
|
||||
sub2 = f"""{subQ} {"AND timestamp < %(startDate)s AND timestamp >= %(timestamp_sub2)s" if not is_ss else ""}
|
||||
{"AND start_ts < %(startDate)s AND start_ts >= %(timestamp_sub2)s" if j_s else ""}"""
|
||||
params["timestamp_sub2"] = TimeUTC.now() \
|
||||
- (a["options"]["currentPeriod"] + a["options"]["currentPeriod"]) * 60 * 1000
|
||||
sub1 = f"SELECT (({sub1})/NULLIF(({sub2}),0)-1)*100 AS value"
|
||||
q += f" FROM ({sub1}) AS stat"
|
||||
|
||||
return q, params
|
||||
|
||||
|
||||
def process():
|
||||
logger.info("> processing alerts on PG")
|
||||
notifications = []
|
||||
all_alerts = alerts_listener.get_all_alerts()
|
||||
with pg_client.PostgresClient() as cur:
|
||||
for alert in all_alerts:
|
||||
if alert_helpers.can_check(alert):
|
||||
query, params = Build(alert)
|
||||
try:
|
||||
query = cur.mogrify(query, params)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"!!!Error while building alert query for alertId:{alert['alertId']} name: {alert['name']}")
|
||||
logger.error(e)
|
||||
continue
|
||||
logger.debug(alert)
|
||||
logger.debug(query)
|
||||
try:
|
||||
cur.execute(query)
|
||||
result = cur.fetchone()
|
||||
if result["valid"]:
|
||||
logger.info(f"Valid alert, notifying users, alertId:{alert['alertId']} name: {alert['name']}")
|
||||
notifications.append(alert_helpers.generate_notification(alert, result))
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"!!!Error while running alert query for alertId:{alert['alertId']} name: {alert['name']}")
|
||||
logger.error(query)
|
||||
logger.error(e)
|
||||
cur = cur.recreate(rollback=True)
|
||||
if len(notifications) > 0:
|
||||
cur.execute(
|
||||
cur.mogrify(f"""UPDATE public.alerts
|
||||
SET options = options||'{{"lastNotification":{TimeUTC.now()}}}'::jsonb
|
||||
WHERE alert_id IN %(ids)s;""", {"ids": tuple([n["alertId"] for n in notifications])}))
|
||||
if len(notifications) > 0:
|
||||
alerts.process_notifications(notifications)
|
||||
|
|
@ -1,195 +0,0 @@
|
|||
import logging
|
||||
|
||||
from pydantic_core._pydantic_core import ValidationError
|
||||
|
||||
import schemas
|
||||
from chalicelib.utils import pg_client, ch_client, exp_ch_helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.core.alerts import alerts, alerts_listener
|
||||
from chalicelib.core.alerts.modules import alert_helpers
|
||||
from chalicelib.core.sessions import sessions_ch as sessions
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
LeftToDb = {
|
||||
schemas.AlertColumn.PERFORMANCE__DOM_CONTENT_LOADED__AVERAGE: {
|
||||
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS pages",
|
||||
"formula": "COALESCE(AVG(NULLIF(dom_content_loaded_event_time ,0)),0)",
|
||||
"eventType": "LOCATION"
|
||||
},
|
||||
schemas.AlertColumn.PERFORMANCE__FIRST_MEANINGFUL_PAINT__AVERAGE: {
|
||||
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS pages",
|
||||
"formula": "COALESCE(AVG(NULLIF(first_contentful_paint_time,0)),0)",
|
||||
"eventType": "LOCATION"
|
||||
},
|
||||
schemas.AlertColumn.PERFORMANCE__PAGE_LOAD_TIME__AVERAGE: {
|
||||
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS pages",
|
||||
"formula": "AVG(NULLIF(load_event_time ,0))",
|
||||
"eventType": "LOCATION"
|
||||
},
|
||||
schemas.AlertColumn.PERFORMANCE__DOM_BUILD_TIME__AVERAGE: {
|
||||
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS pages",
|
||||
"formula": "AVG(NULLIF(dom_building_time,0))",
|
||||
"eventType": "LOCATION"
|
||||
},
|
||||
schemas.AlertColumn.PERFORMANCE__SPEED_INDEX__AVERAGE: {
|
||||
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS pages",
|
||||
"formula": "AVG(NULLIF(speed_index,0))",
|
||||
"eventType": "LOCATION"
|
||||
},
|
||||
schemas.AlertColumn.PERFORMANCE__PAGE_RESPONSE_TIME__AVERAGE: {
|
||||
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS pages",
|
||||
"formula": "AVG(NULLIF(response_time,0))",
|
||||
"eventType": "LOCATION"
|
||||
},
|
||||
schemas.AlertColumn.PERFORMANCE__TTFB__AVERAGE: {
|
||||
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS pages",
|
||||
"formula": "AVG(NULLIF(first_contentful_paint_time,0))",
|
||||
"eventType": "LOCATION"
|
||||
},
|
||||
schemas.AlertColumn.PERFORMANCE__TIME_TO_RENDER__AVERAGE: {
|
||||
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS pages",
|
||||
"formula": "AVG(NULLIF(visually_complete,0))",
|
||||
"eventType": "LOCATION"
|
||||
},
|
||||
schemas.AlertColumn.PERFORMANCE__CRASHES__COUNT: {
|
||||
"table": lambda timestamp: f"{exp_ch_helper.get_main_sessions_table(timestamp)} AS sessions",
|
||||
"formula": "COUNT(DISTINCT session_id)",
|
||||
"condition": "duration>0 AND errors_count>0"
|
||||
},
|
||||
schemas.AlertColumn.ERRORS__JAVASCRIPT__COUNT: {
|
||||
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS errors",
|
||||
"eventType": "ERROR",
|
||||
"formula": "COUNT(DISTINCT session_id)",
|
||||
"condition": "source='js_exception'"
|
||||
},
|
||||
schemas.AlertColumn.ERRORS__BACKEND__COUNT: {
|
||||
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS errors",
|
||||
"eventType": "ERROR",
|
||||
"formula": "COUNT(DISTINCT session_id)",
|
||||
"condition": "source!='js_exception'"
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def Build(a):
|
||||
now = TimeUTC.now()
|
||||
params = {"project_id": a["projectId"], "now": now}
|
||||
full_args = {}
|
||||
if a["seriesId"] is not None:
|
||||
a["filter"]["sort"] = "session_id"
|
||||
a["filter"]["order"] = schemas.SortOrderType.DESC
|
||||
a["filter"]["startDate"] = 0
|
||||
a["filter"]["endDate"] = TimeUTC.now()
|
||||
try:
|
||||
data = schemas.SessionsSearchPayloadSchema.model_validate(a["filter"])
|
||||
except ValidationError:
|
||||
logger.warning("Validation error for:")
|
||||
logger.warning(a["filter"])
|
||||
raise
|
||||
|
||||
full_args, query_part = sessions.search_query_parts_ch(data=data, error_status=None, errors_only=False,
|
||||
issue=None, project_id=a["projectId"], user_id=None,
|
||||
favorite_only=False)
|
||||
subQ = f"""SELECT COUNT(session_id) AS value
|
||||
{query_part}"""
|
||||
else:
|
||||
colDef = LeftToDb[a["query"]["left"]]
|
||||
params["event_type"] = LeftToDb[a["query"]["left"]].get("eventType")
|
||||
subQ = f"""SELECT {colDef["formula"]} AS value
|
||||
FROM {colDef["table"](now)}
|
||||
WHERE project_id = %(project_id)s
|
||||
{"AND event_type=%(event_type)s" if params["event_type"] else ""}
|
||||
{"AND " + colDef["condition"] if colDef.get("condition") else ""}"""
|
||||
|
||||
q = f"""SELECT coalesce(value,0) AS value, coalesce(value,0) {a["query"]["operator"]} {a["query"]["right"]} AS valid"""
|
||||
|
||||
if a["detectionMethod"] == schemas.AlertDetectionMethod.THRESHOLD:
|
||||
if a["seriesId"] is not None:
|
||||
q += f""" FROM ({subQ}) AS stat"""
|
||||
else:
|
||||
q += f""" FROM ({subQ}
|
||||
AND datetime>=toDateTime(%(startDate)s/1000)
|
||||
AND datetime<=toDateTime(%(now)s/1000) ) AS stat"""
|
||||
params = {**params, **full_args, "startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000}
|
||||
else:
|
||||
if a["change"] == schemas.AlertDetectionType.CHANGE:
|
||||
if a["seriesId"] is not None:
|
||||
sub2 = subQ.replace("%(startDate)s", "%(timestamp_sub2)s").replace("%(endDate)s", "%(startDate)s")
|
||||
sub1 = f"SELECT (({subQ})-({sub2})) AS value"
|
||||
q += f" FROM ( {sub1} ) AS stat"
|
||||
params = {**params, **full_args,
|
||||
"startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000,
|
||||
"timestamp_sub2": TimeUTC.now() - 2 * a["options"]["currentPeriod"] * 60 * 1000}
|
||||
else:
|
||||
sub1 = f"""{subQ} AND datetime>=toDateTime(%(startDate)s/1000)
|
||||
AND datetime<=toDateTime(%(now)s/1000)"""
|
||||
params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000
|
||||
sub2 = f"""{subQ} AND datetime<toDateTime(%(startDate)s/1000)
|
||||
AND datetime>=toDateTime(%(timestamp_sub2)s/1000)"""
|
||||
params["timestamp_sub2"] = TimeUTC.now() - 2 * a["options"]["currentPeriod"] * 60 * 1000
|
||||
sub1 = f"SELECT (( {sub1} )-( {sub2} )) AS value"
|
||||
q += f" FROM ( {sub1} ) AS stat"
|
||||
|
||||
else:
|
||||
if a["seriesId"] is not None:
|
||||
sub2 = subQ.replace("%(startDate)s", "%(timestamp_sub2)s").replace("%(endDate)s", "%(startDate)s")
|
||||
sub1 = f"SELECT (({subQ})/NULLIF(({sub2}),0)-1)*100 AS value"
|
||||
q += f" FROM ({sub1}) AS stat"
|
||||
params = {**params, **full_args,
|
||||
"startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000,
|
||||
"timestamp_sub2": TimeUTC.now() \
|
||||
- (a["options"]["currentPeriod"] + a["options"]["currentPeriod"]) \
|
||||
* 60 * 1000}
|
||||
else:
|
||||
sub1 = f"""{subQ} AND datetime>=toDateTime(%(startDate)s/1000)
|
||||
AND datetime<=toDateTime(%(now)s/1000)"""
|
||||
params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000
|
||||
sub2 = f"""{subQ} AND datetime<toDateTime(%(startDate)s/1000)
|
||||
AND datetime>=toDateTime(%(timestamp_sub2)s/1000)"""
|
||||
params["timestamp_sub2"] = TimeUTC.now() \
|
||||
- (a["options"]["currentPeriod"] + a["options"]["currentPeriod"]) * 60 * 1000
|
||||
sub1 = f"SELECT (({sub1})/NULLIF(({sub2}),0)-1)*100 AS value"
|
||||
q += f" FROM ({sub1}) AS stat"
|
||||
|
||||
return q, params
|
||||
|
||||
|
||||
def process():
|
||||
logger.info("> processing alerts on CH")
|
||||
notifications = []
|
||||
all_alerts = alerts_listener.get_all_alerts()
|
||||
with pg_client.PostgresClient() as cur, ch_client.ClickHouseClient() as ch_cur:
|
||||
for alert in all_alerts:
|
||||
if alert["query"]["left"] != "CUSTOM":
|
||||
continue
|
||||
if alert_helpers.can_check(alert):
|
||||
query, params = Build(alert)
|
||||
try:
|
||||
query = ch_cur.format(query=query, parameters=params)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"!!!Error while building alert query for alertId:{alert['alertId']} name: {alert['name']}")
|
||||
logger.error(e)
|
||||
continue
|
||||
logger.debug(alert)
|
||||
logger.debug(query)
|
||||
try:
|
||||
result = ch_cur.execute(query=query)
|
||||
if len(result) > 0:
|
||||
result = result[0]
|
||||
|
||||
if result["valid"]:
|
||||
logger.info("Valid alert, notifying users")
|
||||
notifications.append(alert_helpers.generate_notification(alert, result))
|
||||
except Exception as e:
|
||||
logger.error(f"!!!Error while running alert query for alertId:{alert['alertId']}")
|
||||
logger.error(str(e))
|
||||
logger.error(query)
|
||||
if len(notifications) > 0:
|
||||
cur.execute(
|
||||
cur.mogrify(f"""UPDATE public.alerts
|
||||
SET options = options||'{{"lastNotification":{TimeUTC.now()}}}'::jsonb
|
||||
WHERE alert_id IN %(ids)s;""", {"ids": tuple([n["alertId"] for n in notifications])}))
|
||||
if len(notifications) > 0:
|
||||
alerts.process_notifications(notifications)
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
TENANT_ID = "-1"
|
||||
|
||||
from . import helpers as alert_helpers
|
||||
|
|
@ -1,74 +0,0 @@
|
|||
import decimal
|
||||
import logging
|
||||
|
||||
import schemas
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
# This is the frequency of execution for each threshold
|
||||
TimeInterval = {
|
||||
15: 3,
|
||||
30: 5,
|
||||
60: 10,
|
||||
120: 20,
|
||||
240: 30,
|
||||
1440: 60,
|
||||
}
|
||||
|
||||
|
||||
def __format_value(x):
|
||||
if x % 1 == 0:
|
||||
x = int(x)
|
||||
else:
|
||||
x = round(x, 2)
|
||||
return f"{x:,}"
|
||||
|
||||
|
||||
def can_check(a) -> bool:
|
||||
now = TimeUTC.now()
|
||||
|
||||
repetitionBase = a["options"]["currentPeriod"] \
|
||||
if a["detectionMethod"] == schemas.AlertDetectionMethod.CHANGE \
|
||||
and a["options"]["currentPeriod"] > a["options"]["previousPeriod"] \
|
||||
else a["options"]["previousPeriod"]
|
||||
|
||||
if TimeInterval.get(repetitionBase) is None:
|
||||
logger.error(f"repetitionBase: {repetitionBase} NOT FOUND")
|
||||
return False
|
||||
|
||||
return (a["options"]["renotifyInterval"] <= 0 or
|
||||
a["options"].get("lastNotification") is None or
|
||||
a["options"]["lastNotification"] <= 0 or
|
||||
((now - a["options"]["lastNotification"]) > a["options"]["renotifyInterval"] * 60 * 1000)) \
|
||||
and ((now - a["createdAt"]) % (TimeInterval[repetitionBase] * 60 * 1000)) < 60 * 1000
|
||||
|
||||
|
||||
def generate_notification(alert, result):
|
||||
left = __format_value(result['value'])
|
||||
right = __format_value(alert['query']['right'])
|
||||
return {
|
||||
"alertId": alert["alertId"],
|
||||
"tenantId": alert["tenantId"],
|
||||
"title": alert["name"],
|
||||
"description": f"{alert['seriesName']} = {left} ({alert['query']['operator']} {right}).",
|
||||
"buttonText": "Check metrics for more details",
|
||||
"buttonUrl": f"/{alert['projectId']}/metrics",
|
||||
"imageUrl": None,
|
||||
"projectId": alert["projectId"],
|
||||
"projectName": alert["projectName"],
|
||||
"options": {"source": "ALERT", "sourceId": alert["alertId"],
|
||||
"sourceMeta": alert["detectionMethod"],
|
||||
"message": alert["options"]["message"], "projectId": alert["projectId"],
|
||||
"data": {"title": alert["name"],
|
||||
"limitValue": alert["query"]["right"],
|
||||
"actualValue": float(result["value"]) \
|
||||
if isinstance(result["value"], decimal.Decimal) \
|
||||
else result["value"],
|
||||
"operator": alert["query"]["operator"],
|
||||
"trigger": alert["query"]["left"],
|
||||
"alertId": alert["alertId"],
|
||||
"detectionMethod": alert["detectionMethod"],
|
||||
"currentPeriod": alert["options"]["currentPeriod"],
|
||||
"previousPeriod": alert["options"]["previousPeriod"],
|
||||
"createdAt": TimeUTC.now()}},
|
||||
}
|
||||
32
api/chalicelib/core/alerts_listener.py
Normal file
32
api/chalicelib/core/alerts_listener.py
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
from chalicelib.utils import pg_client, helper
|
||||
|
||||
|
||||
def get_all_alerts():
|
||||
with pg_client.PostgresClient(long_query=True) as cur:
|
||||
query = """SELECT -1 AS tenant_id,
|
||||
alert_id,
|
||||
projects.project_id,
|
||||
projects.name AS project_name,
|
||||
detection_method,
|
||||
query,
|
||||
options,
|
||||
(EXTRACT(EPOCH FROM alerts.created_at) * 1000)::BIGINT AS created_at,
|
||||
alerts.name,
|
||||
alerts.series_id,
|
||||
filter,
|
||||
change,
|
||||
COALESCE(metrics.name || '.' || (COALESCE(metric_series.name, 'series ' || index)) || '.count',
|
||||
query ->> 'left') AS series_name
|
||||
FROM public.alerts
|
||||
INNER JOIN projects USING (project_id)
|
||||
LEFT JOIN metric_series USING (series_id)
|
||||
LEFT JOIN metrics USING (metric_id)
|
||||
WHERE alerts.deleted_at ISNULL
|
||||
AND alerts.active
|
||||
AND projects.active
|
||||
AND projects.deleted_at ISNULL
|
||||
AND (alerts.series_id ISNULL OR metric_series.deleted_at ISNULL)
|
||||
ORDER BY alerts.created_at;"""
|
||||
cur.execute(query=query)
|
||||
all_alerts = helper.list_to_camel_case(cur.fetchall())
|
||||
return all_alerts
|
||||
262
api/chalicelib/core/alerts_processor.py
Normal file
262
api/chalicelib/core/alerts_processor.py
Normal file
|
|
@ -0,0 +1,262 @@
|
|||
import decimal
|
||||
import logging
|
||||
|
||||
from decouple import config
|
||||
from pydantic_core._pydantic_core import ValidationError
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import alerts
|
||||
from chalicelib.core import alerts_listener
|
||||
from chalicelib.core import sessions
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
logging.basicConfig(level=config("LOGLEVEL", default=logging.INFO))
|
||||
|
||||
LeftToDb = {
|
||||
schemas.AlertColumn.performance__dom_content_loaded__average: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COALESCE(AVG(NULLIF(dom_content_loaded_time ,0)),0)"},
|
||||
schemas.AlertColumn.performance__first_meaningful_paint__average: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COALESCE(AVG(NULLIF(first_contentful_paint_time,0)),0)"},
|
||||
schemas.AlertColumn.performance__page_load_time__average: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)", "formula": "AVG(NULLIF(load_time ,0))"},
|
||||
schemas.AlertColumn.performance__dom_build_time__average: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "AVG(NULLIF(dom_building_time,0))"},
|
||||
schemas.AlertColumn.performance__speed_index__average: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)", "formula": "AVG(NULLIF(speed_index,0))"},
|
||||
schemas.AlertColumn.performance__page_response_time__average: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "AVG(NULLIF(response_time,0))"},
|
||||
schemas.AlertColumn.performance__ttfb__average: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "AVG(NULLIF(first_paint_time,0))"},
|
||||
schemas.AlertColumn.performance__time_to_render__average: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "AVG(NULLIF(visually_complete,0))"},
|
||||
schemas.AlertColumn.performance__image_load_time__average: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "AVG(NULLIF(resources.duration,0))", "condition": "type='img'"},
|
||||
schemas.AlertColumn.performance__request_load_time__average: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "AVG(NULLIF(resources.duration,0))", "condition": "type='fetch'"},
|
||||
schemas.AlertColumn.resources__load_time__average: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "AVG(NULLIF(resources.duration,0))"},
|
||||
schemas.AlertColumn.resources__missing__count: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COUNT(DISTINCT url_hostpath)", "condition": "success= FALSE AND type='img'"},
|
||||
schemas.AlertColumn.errors__4xx_5xx__count: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)", "formula": "COUNT(session_id)",
|
||||
"condition": "status/100!=2"},
|
||||
schemas.AlertColumn.errors__4xx__count: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COUNT(session_id)", "condition": "status/100=4"},
|
||||
schemas.AlertColumn.errors__5xx__count: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COUNT(session_id)", "condition": "status/100=5"},
|
||||
schemas.AlertColumn.errors__javascript__impacted_sessions__count: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COUNT(DISTINCT session_id)", "condition": "success= FALSE AND type='script'"},
|
||||
schemas.AlertColumn.performance__crashes__count: {
|
||||
"table": "public.sessions",
|
||||
"formula": "COUNT(DISTINCT session_id)",
|
||||
"condition": "errors_count > 0 AND duration>0"},
|
||||
schemas.AlertColumn.errors__javascript__count: {
|
||||
"table": "events.errors INNER JOIN public.errors AS m_errors USING (error_id)",
|
||||
"formula": "COUNT(DISTINCT session_id)", "condition": "source='js_exception'", "joinSessions": False},
|
||||
schemas.AlertColumn.errors__backend__count: {
|
||||
"table": "events.errors INNER JOIN public.errors AS m_errors USING (error_id)",
|
||||
"formula": "COUNT(DISTINCT session_id)", "condition": "source!='js_exception'", "joinSessions": False},
|
||||
}
|
||||
|
||||
# This is the frequency of execution for each threshold
|
||||
TimeInterval = {
|
||||
15: 3,
|
||||
30: 5,
|
||||
60: 10,
|
||||
120: 20,
|
||||
240: 30,
|
||||
1440: 60,
|
||||
}
|
||||
|
||||
|
||||
def can_check(a) -> bool:
|
||||
now = TimeUTC.now()
|
||||
|
||||
repetitionBase = a["options"]["currentPeriod"] \
|
||||
if a["detectionMethod"] == schemas.AlertDetectionMethod.change \
|
||||
and a["options"]["currentPeriod"] > a["options"]["previousPeriod"] \
|
||||
else a["options"]["previousPeriod"]
|
||||
|
||||
if TimeInterval.get(repetitionBase) is None:
|
||||
logging.error(f"repetitionBase: {repetitionBase} NOT FOUND")
|
||||
return False
|
||||
|
||||
return (a["options"]["renotifyInterval"] <= 0 or
|
||||
a["options"].get("lastNotification") is None or
|
||||
a["options"]["lastNotification"] <= 0 or
|
||||
((now - a["options"]["lastNotification"]) > a["options"]["renotifyInterval"] * 60 * 1000)) \
|
||||
and ((now - a["createdAt"]) % (TimeInterval[repetitionBase] * 60 * 1000)) < 60 * 1000
|
||||
|
||||
|
||||
def Build(a):
|
||||
now = TimeUTC.now()
|
||||
params = {"project_id": a["projectId"], "now": now}
|
||||
full_args = {}
|
||||
j_s = True
|
||||
main_table = ""
|
||||
if a["seriesId"] is not None:
|
||||
a["filter"]["sort"] = "session_id"
|
||||
a["filter"]["order"] = schemas.SortOrderType.desc
|
||||
a["filter"]["startDate"] = 0
|
||||
a["filter"]["endDate"] = TimeUTC.now()
|
||||
try:
|
||||
data = schemas.SessionsSearchPayloadSchema.model_validate(a["filter"])
|
||||
except ValidationError:
|
||||
logging.warning("Validation error for:")
|
||||
logging.warning(a["filter"])
|
||||
raise
|
||||
|
||||
full_args, query_part = sessions.search_query_parts(data=data, error_status=None, errors_only=False,
|
||||
issue=None, project_id=a["projectId"], user_id=None,
|
||||
favorite_only=False)
|
||||
subQ = f"""SELECT COUNT(session_id) AS value
|
||||
{query_part}"""
|
||||
else:
|
||||
colDef = LeftToDb[a["query"]["left"]]
|
||||
subQ = f"""SELECT {colDef["formula"]} AS value
|
||||
FROM {colDef["table"]}
|
||||
WHERE project_id = %(project_id)s
|
||||
{"AND " + colDef["condition"] if colDef.get("condition") else ""}"""
|
||||
j_s = colDef.get("joinSessions", True)
|
||||
main_table = colDef["table"]
|
||||
is_ss = main_table == "public.sessions"
|
||||
q = f"""SELECT coalesce(value,0) AS value, coalesce(value,0) {a["query"]["operator"]} {a["query"]["right"]} AS valid"""
|
||||
|
||||
if a["detectionMethod"] == schemas.AlertDetectionMethod.threshold:
|
||||
if a["seriesId"] is not None:
|
||||
q += f""" FROM ({subQ}) AS stat"""
|
||||
else:
|
||||
q += f""" FROM ({subQ} {"AND timestamp >= %(startDate)s AND timestamp <= %(now)s" if not is_ss else ""}
|
||||
{"AND start_ts >= %(startDate)s AND start_ts <= %(now)s" if j_s else ""}) AS stat"""
|
||||
params = {**params, **full_args, "startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000}
|
||||
else:
|
||||
if a["change"] == schemas.AlertDetectionType.change:
|
||||
if a["seriesId"] is not None:
|
||||
sub2 = subQ.replace("%(startDate)s", "%(timestamp_sub2)s").replace("%(endDate)s", "%(startDate)s")
|
||||
sub1 = f"SELECT (({subQ})-({sub2})) AS value"
|
||||
q += f" FROM ( {sub1} ) AS stat"
|
||||
params = {**params, **full_args,
|
||||
"startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000,
|
||||
"timestamp_sub2": TimeUTC.now() - 2 * a["options"]["currentPeriod"] * 60 * 1000}
|
||||
else:
|
||||
sub1 = f"""{subQ} {"AND timestamp >= %(startDate)s AND timestamp <= %(now)s" if not is_ss else ""}
|
||||
{"AND start_ts >= %(startDate)s AND start_ts <= %(now)s" if j_s else ""}"""
|
||||
params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000
|
||||
sub2 = f"""{subQ} {"AND timestamp < %(startDate)s AND timestamp >= %(timestamp_sub2)s" if not is_ss else ""}
|
||||
{"AND start_ts < %(startDate)s AND start_ts >= %(timestamp_sub2)s" if j_s else ""}"""
|
||||
params["timestamp_sub2"] = TimeUTC.now() - 2 * a["options"]["currentPeriod"] * 60 * 1000
|
||||
sub1 = f"SELECT (( {sub1} )-( {sub2} )) AS value"
|
||||
q += f" FROM ( {sub1} ) AS stat"
|
||||
|
||||
else:
|
||||
if a["seriesId"] is not None:
|
||||
sub2 = subQ.replace("%(startDate)s", "%(timestamp_sub2)s").replace("%(endDate)s", "%(startDate)s")
|
||||
sub1 = f"SELECT (({subQ})/NULLIF(({sub2}),0)-1)*100 AS value"
|
||||
q += f" FROM ({sub1}) AS stat"
|
||||
params = {**params, **full_args,
|
||||
"startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000,
|
||||
"timestamp_sub2": TimeUTC.now() \
|
||||
- (a["options"]["currentPeriod"] + a["options"]["currentPeriod"]) \
|
||||
* 60 * 1000}
|
||||
else:
|
||||
sub1 = f"""{subQ} {"AND timestamp >= %(startDate)s AND timestamp <= %(now)s" if not is_ss else ""}
|
||||
{"AND start_ts >= %(startDate)s AND start_ts <= %(now)s" if j_s else ""}"""
|
||||
params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000
|
||||
sub2 = f"""{subQ} {"AND timestamp < %(startDate)s AND timestamp >= %(timestamp_sub2)s" if not is_ss else ""}
|
||||
{"AND start_ts < %(startDate)s AND start_ts >= %(timestamp_sub2)s" if j_s else ""}"""
|
||||
params["timestamp_sub2"] = TimeUTC.now() \
|
||||
- (a["options"]["currentPeriod"] + a["options"]["currentPeriod"]) * 60 * 1000
|
||||
sub1 = f"SELECT (({sub1})/NULLIF(({sub2}),0)-1)*100 AS value"
|
||||
q += f" FROM ({sub1}) AS stat"
|
||||
|
||||
return q, params
|
||||
|
||||
|
||||
def process():
|
||||
notifications = []
|
||||
all_alerts = alerts_listener.get_all_alerts()
|
||||
with pg_client.PostgresClient() as cur:
|
||||
for alert in all_alerts:
|
||||
if can_check(alert):
|
||||
query, params = Build(alert)
|
||||
try:
|
||||
query = cur.mogrify(query, params)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
f"!!!Error while building alert query for alertId:{alert['alertId']} name: {alert['name']}")
|
||||
logging.error(e)
|
||||
continue
|
||||
logging.debug(alert)
|
||||
logging.debug(query)
|
||||
try:
|
||||
cur.execute(query)
|
||||
result = cur.fetchone()
|
||||
if result["valid"]:
|
||||
logging.info(f"Valid alert, notifying users, alertId:{alert['alertId']} name: {alert['name']}")
|
||||
notifications.append(generate_notification(alert, result))
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
f"!!!Error while running alert query for alertId:{alert['alertId']} name: {alert['name']}")
|
||||
logging.error(query)
|
||||
logging.error(e)
|
||||
cur = cur.recreate(rollback=True)
|
||||
if len(notifications) > 0:
|
||||
cur.execute(
|
||||
cur.mogrify(f"""UPDATE public.alerts
|
||||
SET options = options||'{{"lastNotification":{TimeUTC.now()}}}'::jsonb
|
||||
WHERE alert_id IN %(ids)s;""", {"ids": tuple([n["alertId"] for n in notifications])}))
|
||||
if len(notifications) > 0:
|
||||
alerts.process_notifications(notifications)
|
||||
|
||||
|
||||
def __format_value(x):
|
||||
if x % 1 == 0:
|
||||
x = int(x)
|
||||
else:
|
||||
x = round(x, 2)
|
||||
return f"{x:,}"
|
||||
|
||||
|
||||
def generate_notification(alert, result):
|
||||
left = __format_value(result['value'])
|
||||
right = __format_value(alert['query']['right'])
|
||||
return {
|
||||
"alertId": alert["alertId"],
|
||||
"tenantId": alert["tenantId"],
|
||||
"title": alert["name"],
|
||||
"description": f"{alert['seriesName']} = {left} ({alert['query']['operator']} {right}).",
|
||||
"buttonText": "Check metrics for more details",
|
||||
"buttonUrl": f"/{alert['projectId']}/metrics",
|
||||
"imageUrl": None,
|
||||
"projectId": alert["projectId"],
|
||||
"projectName": alert["projectName"],
|
||||
"options": {"source": "ALERT", "sourceId": alert["alertId"],
|
||||
"sourceMeta": alert["detectionMethod"],
|
||||
"message": alert["options"]["message"], "projectId": alert["projectId"],
|
||||
"data": {"title": alert["name"],
|
||||
"limitValue": alert["query"]["right"],
|
||||
"actualValue": float(result["value"]) \
|
||||
if isinstance(result["value"], decimal.Decimal) \
|
||||
else result["value"],
|
||||
"operator": alert["query"]["operator"],
|
||||
"trigger": alert["query"]["left"],
|
||||
"alertId": alert["alertId"],
|
||||
"detectionMethod": alert["detectionMethod"],
|
||||
"currentPeriod": alert["options"]["currentPeriod"],
|
||||
"previousPeriod": alert["options"]["previousPeriod"],
|
||||
"createdAt": TimeUTC.now()}},
|
||||
}
|
||||
|
|
@ -1,4 +1,3 @@
|
|||
import logging
|
||||
from os import access, R_OK
|
||||
from os.path import exists as path_exists, getsize
|
||||
|
||||
|
|
@ -11,10 +10,22 @@ import schemas
|
|||
from chalicelib.core import projects
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
ASSIST_KEY = config("ASSIST_KEY")
|
||||
ASSIST_URL = config("ASSIST_URL") % ASSIST_KEY
|
||||
SESSION_PROJECTION_COLS = """s.project_id,
|
||||
s.session_id::text AS session_id,
|
||||
s.user_uuid,
|
||||
s.user_id,
|
||||
s.user_agent,
|
||||
s.user_os,
|
||||
s.user_browser,
|
||||
s.user_device,
|
||||
s.user_device_type,
|
||||
s.user_country,
|
||||
s.start_ts,
|
||||
s.user_anonymous_id,
|
||||
s.platform
|
||||
"""
|
||||
|
||||
|
||||
def get_live_sessions_ws_user_id(project_id, user_id):
|
||||
|
|
@ -41,7 +52,7 @@ def get_live_sessions_ws(project_id, body: schemas.LiveSessionsSearchPayloadSche
|
|||
"sort": {"key": body.sort, "order": body.order}
|
||||
}
|
||||
for f in body.filters:
|
||||
if f.type == schemas.LiveFilterType.METADATA:
|
||||
if f.type == schemas.LiveFilterType.metadata:
|
||||
data["filter"][f.source] = {"values": f.value, "operator": f.operator}
|
||||
|
||||
else:
|
||||
|
|
@ -55,21 +66,21 @@ def __get_live_sessions_ws(project_id, data):
|
|||
results = requests.post(ASSIST_URL + config("assist") + f"/{project_key}",
|
||||
json=data, timeout=config("assistTimeout", cast=int, default=5))
|
||||
if results.status_code != 200:
|
||||
logger.error(f"!! issue with the peer-server code:{results.status_code} for __get_live_sessions_ws")
|
||||
logger.error(results.text)
|
||||
print(f"!! issue with the peer-server code:{results.status_code} for __get_live_sessions_ws")
|
||||
print(results.text)
|
||||
return {"total": 0, "sessions": []}
|
||||
live_peers = results.json().get("data", [])
|
||||
except requests.exceptions.Timeout:
|
||||
logger.error("!! Timeout getting Assist response")
|
||||
print("!! Timeout getting Assist response")
|
||||
live_peers = {"total": 0, "sessions": []}
|
||||
except Exception as e:
|
||||
logger.error("!! Issue getting Live-Assist response")
|
||||
logger.exception(e)
|
||||
logger.error("expected JSON, received:")
|
||||
print("!! Issue getting Live-Assist response")
|
||||
print(str(e))
|
||||
print("expected JSON, received:")
|
||||
try:
|
||||
logger.error(results.text)
|
||||
print(results.text)
|
||||
except:
|
||||
logger.error("couldn't get response")
|
||||
print("couldn't get response")
|
||||
live_peers = {"total": 0, "sessions": []}
|
||||
_live_peers = live_peers
|
||||
if "sessions" in live_peers:
|
||||
|
|
@ -95,7 +106,7 @@ def __get_agent_token(project_id, project_key, session_id):
|
|||
"aud": f"openreplay:agent"
|
||||
},
|
||||
key=config("ASSIST_JWT_SECRET"),
|
||||
algorithm=config("JWT_ALGORITHM")
|
||||
algorithm=config("jwt_algorithm")
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -105,8 +116,8 @@ def get_live_session_by_id(project_id, session_id):
|
|||
results = requests.get(ASSIST_URL + config("assist") + f"/{project_key}/{session_id}",
|
||||
timeout=config("assistTimeout", cast=int, default=5))
|
||||
if results.status_code != 200:
|
||||
logger.error(f"!! issue with the peer-server code:{results.status_code} for get_live_session_by_id")
|
||||
logger.error(results.text)
|
||||
print(f"!! issue with the peer-server code:{results.status_code} for get_live_session_by_id")
|
||||
print(results.text)
|
||||
return None
|
||||
results = results.json().get("data")
|
||||
if results is None:
|
||||
|
|
@ -114,16 +125,16 @@ def get_live_session_by_id(project_id, session_id):
|
|||
results["live"] = True
|
||||
results["agentToken"] = __get_agent_token(project_id=project_id, project_key=project_key, session_id=session_id)
|
||||
except requests.exceptions.Timeout:
|
||||
logger.error("!! Timeout getting Assist response")
|
||||
print("!! Timeout getting Assist response")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error("!! Issue getting Assist response")
|
||||
logger.exception(e)
|
||||
logger.error("expected JSON, received:")
|
||||
print("!! Issue getting Assist response")
|
||||
print(str(e))
|
||||
print("expected JSON, received:")
|
||||
try:
|
||||
logger.error(results.text)
|
||||
print(results.text)
|
||||
except:
|
||||
logger.error("couldn't get response")
|
||||
print("couldn't get response")
|
||||
return None
|
||||
return results
|
||||
|
||||
|
|
@ -135,21 +146,21 @@ def is_live(project_id, session_id, project_key=None):
|
|||
results = requests.get(ASSIST_URL + config("assistList") + f"/{project_key}/{session_id}",
|
||||
timeout=config("assistTimeout", cast=int, default=5))
|
||||
if results.status_code != 200:
|
||||
logger.error(f"!! issue with the peer-server code:{results.status_code} for is_live")
|
||||
logger.error(results.text)
|
||||
print(f"!! issue with the peer-server code:{results.status_code} for is_live")
|
||||
print(results.text)
|
||||
return False
|
||||
results = results.json().get("data")
|
||||
except requests.exceptions.Timeout:
|
||||
logger.error("!! Timeout getting Assist response")
|
||||
print("!! Timeout getting Assist response")
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error("!! Issue getting Assist response")
|
||||
logger.exception(e)
|
||||
logger.error("expected JSON, received:")
|
||||
print("!! Issue getting Assist response")
|
||||
print(str(e))
|
||||
print("expected JSON, received:")
|
||||
try:
|
||||
logger.error(results.text)
|
||||
print(results.text)
|
||||
except:
|
||||
logger.error("couldn't get response")
|
||||
print("couldn't get response")
|
||||
return False
|
||||
return str(session_id) == results
|
||||
|
||||
|
|
@ -164,27 +175,32 @@ def autocomplete(project_id, q: str, key: str = None):
|
|||
ASSIST_URL + config("assistList") + f"/{project_key}/autocomplete",
|
||||
params=params, timeout=config("assistTimeout", cast=int, default=5))
|
||||
if results.status_code != 200:
|
||||
logger.error(f"!! issue with the peer-server code:{results.status_code} for autocomplete")
|
||||
logger.error(results.text)
|
||||
print(f"!! issue with the peer-server code:{results.status_code} for autocomplete")
|
||||
print(results.text)
|
||||
return {"errors": [f"Something went wrong wile calling assist:{results.text}"]}
|
||||
results = results.json().get("data", [])
|
||||
except requests.exceptions.Timeout:
|
||||
logger.error("!! Timeout getting Assist response")
|
||||
print("!! Timeout getting Assist response")
|
||||
return {"errors": ["Assist request timeout"]}
|
||||
except Exception as e:
|
||||
logger.error("!! Issue getting Assist response")
|
||||
logger.exception(e)
|
||||
logger.error("expected JSON, received:")
|
||||
print("!! Issue getting Assist response")
|
||||
print(str(e))
|
||||
print("expected JSON, received:")
|
||||
try:
|
||||
logger.error(results.text)
|
||||
print(results.text)
|
||||
except:
|
||||
logger.error("couldn't get response")
|
||||
print("couldn't get response")
|
||||
return {"errors": ["Something went wrong wile calling assist"]}
|
||||
for r in results:
|
||||
r["type"] = __change_keys(r["type"])
|
||||
return {"data": results}
|
||||
|
||||
|
||||
def get_ice_servers():
|
||||
return config("iceServers") if config("iceServers", default=None) is not None \
|
||||
and len(config("iceServers")) > 0 else None
|
||||
|
||||
|
||||
def __get_efs_path():
|
||||
efs_path = config("FS_DIR")
|
||||
if not path_exists(efs_path):
|
||||
|
|
@ -242,46 +258,46 @@ def session_exists(project_id, session_id):
|
|||
results = requests.get(ASSIST_URL + config("assist") + f"/{project_key}/{session_id}",
|
||||
timeout=config("assistTimeout", cast=int, default=5))
|
||||
if results.status_code != 200:
|
||||
logger.error(f"!! issue with the peer-server code:{results.status_code} for session_exists")
|
||||
logger.error(results.text)
|
||||
print(f"!! issue with the peer-server code:{results.status_code} for session_exists")
|
||||
print(results.text)
|
||||
return None
|
||||
results = results.json().get("data")
|
||||
if results is None:
|
||||
return False
|
||||
return True
|
||||
except requests.exceptions.Timeout:
|
||||
logger.error("!! Timeout getting Assist response")
|
||||
print("!! Timeout getting Assist response")
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error("!! Issue getting Assist response")
|
||||
logger.exception(e)
|
||||
logger.error("expected JSON, received:")
|
||||
print("!! Issue getting Assist response")
|
||||
print(str(e))
|
||||
print("expected JSON, received:")
|
||||
try:
|
||||
logger.error(results.text)
|
||||
print(results.text)
|
||||
except:
|
||||
logger.error("couldn't get response")
|
||||
print("couldn't get response")
|
||||
return False
|
||||
|
||||
|
||||
def __change_keys(key):
|
||||
return {
|
||||
"PAGETITLE": schemas.LiveFilterType.PAGE_TITLE.value,
|
||||
"PAGETITLE": schemas.LiveFilterType.page_title.value,
|
||||
"ACTIVE": "active",
|
||||
"LIVE": "live",
|
||||
"SESSIONID": schemas.LiveFilterType.SESSION_ID.value,
|
||||
"METADATA": schemas.LiveFilterType.METADATA.value,
|
||||
"USERID": schemas.LiveFilterType.USER_ID.value,
|
||||
"USERUUID": schemas.LiveFilterType.USER_UUID.value,
|
||||
"SESSIONID": schemas.LiveFilterType.session_id.value,
|
||||
"METADATA": schemas.LiveFilterType.metadata.value,
|
||||
"USERID": schemas.LiveFilterType.user_id.value,
|
||||
"USERUUID": schemas.LiveFilterType.user_UUID.value,
|
||||
"PROJECTKEY": "projectKey",
|
||||
"REVID": schemas.LiveFilterType.REV_ID.value,
|
||||
"REVID": schemas.LiveFilterType.rev_id.value,
|
||||
"TIMESTAMP": "timestamp",
|
||||
"TRACKERVERSION": schemas.LiveFilterType.TRACKER_VERSION.value,
|
||||
"TRACKERVERSION": schemas.LiveFilterType.tracker_version.value,
|
||||
"ISSNIPPET": "isSnippet",
|
||||
"USEROS": schemas.LiveFilterType.USER_OS.value,
|
||||
"USERBROWSER": schemas.LiveFilterType.USER_BROWSER.value,
|
||||
"USERBROWSERVERSION": schemas.LiveFilterType.USER_BROWSER_VERSION.value,
|
||||
"USERDEVICE": schemas.LiveFilterType.USER_DEVICE.value,
|
||||
"USERDEVICETYPE": schemas.LiveFilterType.USER_DEVICE_TYPE.value,
|
||||
"USERCOUNTRY": schemas.LiveFilterType.USER_COUNTRY.value,
|
||||
"USEROS": schemas.LiveFilterType.user_os.value,
|
||||
"USERBROWSER": schemas.LiveFilterType.user_browser.value,
|
||||
"USERBROWSERVERSION": schemas.LiveFilterType.user_browser_version.value,
|
||||
"USERDEVICE": schemas.LiveFilterType.user_device.value,
|
||||
"USERDEVICETYPE": schemas.LiveFilterType.user_device_type.value,
|
||||
"USERCOUNTRY": schemas.LiveFilterType.user_country.value,
|
||||
"PROJECTID": "projectId"
|
||||
}.get(key.upper(), key)
|
||||
|
|
|
|||
|
|
@ -4,40 +4,30 @@ import jwt
|
|||
from decouple import config
|
||||
|
||||
from chalicelib.core import tenants
|
||||
from chalicelib.core import users, spot
|
||||
from chalicelib.core import users
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_supported_audience():
|
||||
return [users.AUDIENCE, spot.AUDIENCE]
|
||||
|
||||
|
||||
def is_spot_token(token: str) -> bool:
|
||||
try:
|
||||
decoded_token = jwt.decode(token, options={"verify_signature": False, "verify_exp": False})
|
||||
audience = decoded_token.get("aud")
|
||||
return audience == spot.AUDIENCE
|
||||
except jwt.InvalidTokenError:
|
||||
logger.error(f"Invalid token for is_spot_token: {token}")
|
||||
raise
|
||||
|
||||
|
||||
def jwt_authorizer(scheme: str, token: str, leeway=0) -> dict | None:
|
||||
def jwt_authorizer(scheme: str, token: str, leeway=0):
|
||||
if scheme.lower() != "bearer":
|
||||
return None
|
||||
try:
|
||||
payload = jwt.decode(jwt=token,
|
||||
key=config("JWT_SECRET") if not is_spot_token(token) else config("JWT_SPOT_SECRET"),
|
||||
algorithms=config("JWT_ALGORITHM"),
|
||||
audience=get_supported_audience(),
|
||||
leeway=leeway)
|
||||
payload = jwt.decode(
|
||||
token,
|
||||
config("jwt_secret"),
|
||||
algorithms=config("jwt_algorithm"),
|
||||
audience=[f"front:{helper.get_stage_name()}"],
|
||||
leeway=leeway
|
||||
)
|
||||
except jwt.ExpiredSignatureError:
|
||||
logger.debug("! JWT Expired signature")
|
||||
return None
|
||||
except BaseException as e:
|
||||
logger.warning("! JWT Base Exception", exc_info=e)
|
||||
logger.warning("! JWT Base Exception")
|
||||
logger.debug(e)
|
||||
return None
|
||||
return payload
|
||||
|
||||
|
|
@ -46,51 +36,62 @@ def jwt_refresh_authorizer(scheme: str, token: str):
|
|||
if scheme.lower() != "bearer":
|
||||
return None
|
||||
try:
|
||||
payload = jwt.decode(jwt=token,
|
||||
key=config("JWT_REFRESH_SECRET") if not is_spot_token(token) \
|
||||
else config("JWT_SPOT_REFRESH_SECRET"),
|
||||
algorithms=config("JWT_ALGORITHM"),
|
||||
audience=get_supported_audience())
|
||||
payload = jwt.decode(
|
||||
token,
|
||||
config("JWT_REFRESH_SECRET"),
|
||||
algorithms=config("jwt_algorithm"),
|
||||
audience=[f"front:{helper.get_stage_name()}"]
|
||||
)
|
||||
except jwt.ExpiredSignatureError:
|
||||
logger.debug("! JWT-refresh Expired signature")
|
||||
return None
|
||||
except BaseException as e:
|
||||
logger.error("! JWT-refresh Base Exception", exc_info=e)
|
||||
logger.warning("! JWT-refresh Base Exception")
|
||||
logger.debug(e)
|
||||
return None
|
||||
return payload
|
||||
|
||||
|
||||
def generate_jwt(user_id, tenant_id, iat, aud, for_spot=False):
|
||||
def jwt_context(context):
|
||||
user = users.get(user_id=context["userId"], tenant_id=context["tenantId"])
|
||||
if user is None:
|
||||
return None
|
||||
return {
|
||||
"tenantId": context["tenantId"],
|
||||
"userId": context["userId"],
|
||||
**user
|
||||
}
|
||||
|
||||
|
||||
def generate_jwt(user_id, tenant_id, iat, aud):
|
||||
token = jwt.encode(
|
||||
payload={
|
||||
"userId": user_id,
|
||||
"tenantId": tenant_id,
|
||||
"exp": iat + (config("JWT_EXPIRATION", cast=int) if not for_spot
|
||||
else config("JWT_SPOT_EXPIRATION", cast=int)),
|
||||
"exp": iat + config("JWT_EXPIRATION", cast=int),
|
||||
"iss": config("JWT_ISSUER"),
|
||||
"iat": iat,
|
||||
"aud": aud
|
||||
},
|
||||
key=config("JWT_SECRET") if not for_spot else config("JWT_SPOT_SECRET"),
|
||||
algorithm=config("JWT_ALGORITHM")
|
||||
key=config("jwt_secret"),
|
||||
algorithm=config("jwt_algorithm")
|
||||
)
|
||||
return token
|
||||
|
||||
|
||||
def generate_jwt_refresh(user_id, tenant_id, iat, aud, jwt_jti, for_spot=False):
|
||||
def generate_jwt_refresh(user_id, tenant_id, iat, aud, jwt_jti):
|
||||
token = jwt.encode(
|
||||
payload={
|
||||
"userId": user_id,
|
||||
"tenantId": tenant_id,
|
||||
"exp": iat + (config("JWT_REFRESH_EXPIRATION", cast=int) if not for_spot
|
||||
else config("JWT_SPOT_REFRESH_EXPIRATION", cast=int)),
|
||||
"exp": iat + config("JWT_REFRESH_EXPIRATION", cast=int),
|
||||
"iss": config("JWT_ISSUER"),
|
||||
"iat": iat,
|
||||
"aud": aud,
|
||||
"jti": jwt_jti
|
||||
},
|
||||
key=config("JWT_REFRESH_SECRET") if not for_spot else config("JWT_SPOT_REFRESH_SECRET"),
|
||||
algorithm=config("JWT_ALGORITHM")
|
||||
key=config("JWT_REFRESH_SECRET"),
|
||||
algorithm=config("jwt_algorithm")
|
||||
)
|
||||
return token
|
||||
|
||||
|
|
|
|||
331
api/chalicelib/core/autocomplete.py
Normal file
331
api/chalicelib/core/autocomplete.py
Normal file
|
|
@ -0,0 +1,331 @@
|
|||
import schemas
|
||||
from chalicelib.core import countries, events, metadata
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils.event_filter_definition import Event
|
||||
|
||||
TABLE = "public.autocomplete"
|
||||
|
||||
|
||||
def __get_autocomplete_table(value, project_id):
|
||||
autocomplete_events = [schemas.FilterType.rev_id,
|
||||
schemas.EventType.click,
|
||||
schemas.FilterType.user_device,
|
||||
schemas.FilterType.user_id,
|
||||
schemas.FilterType.user_browser,
|
||||
schemas.FilterType.user_os,
|
||||
schemas.EventType.custom,
|
||||
schemas.FilterType.user_country,
|
||||
schemas.FilterType.user_city,
|
||||
schemas.FilterType.user_state,
|
||||
schemas.EventType.location,
|
||||
schemas.EventType.input]
|
||||
autocomplete_events.sort()
|
||||
sub_queries = []
|
||||
c_list = []
|
||||
for e in autocomplete_events:
|
||||
if e == schemas.FilterType.user_country:
|
||||
c_list = countries.get_country_code_autocomplete(value)
|
||||
if len(c_list) > 0:
|
||||
sub_queries.append(f"""(SELECT DISTINCT ON(value) '{e.value}' AS _type, value
|
||||
FROM {TABLE}
|
||||
WHERE project_id = %(project_id)s
|
||||
AND type= '{e.value.upper()}'
|
||||
AND value IN %(c_list)s)""")
|
||||
continue
|
||||
sub_queries.append(f"""(SELECT '{e.value}' AS _type, value
|
||||
FROM {TABLE}
|
||||
WHERE project_id = %(project_id)s
|
||||
AND type= '{e.value.upper()}'
|
||||
AND value ILIKE %(svalue)s
|
||||
ORDER BY value
|
||||
LIMIT 5)""")
|
||||
if len(value) > 2:
|
||||
sub_queries.append(f"""(SELECT '{e.value}' AS _type, value
|
||||
FROM {TABLE}
|
||||
WHERE project_id = %(project_id)s
|
||||
AND type= '{e.value.upper()}'
|
||||
AND value ILIKE %(value)s
|
||||
ORDER BY value
|
||||
LIMIT 5)""")
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(" UNION DISTINCT ".join(sub_queries) + ";",
|
||||
{"project_id": project_id,
|
||||
"value": helper.string_to_sql_like(value),
|
||||
"svalue": helper.string_to_sql_like("^" + value),
|
||||
"c_list": tuple(c_list)
|
||||
})
|
||||
try:
|
||||
cur.execute(query)
|
||||
except Exception as err:
|
||||
print("--------- AUTOCOMPLETE SEARCH QUERY EXCEPTION -----------")
|
||||
print(query.decode('UTF-8'))
|
||||
print("--------- VALUE -----------")
|
||||
print(value)
|
||||
print("--------------------")
|
||||
raise err
|
||||
results = cur.fetchall()
|
||||
for r in results:
|
||||
r["type"] = r.pop("_type")
|
||||
results = helper.list_to_camel_case(results)
|
||||
return results
|
||||
|
||||
|
||||
def __generic_query(typename, value_length=None):
|
||||
if typename == schemas.FilterType.user_country:
|
||||
return f"""SELECT DISTINCT value, type
|
||||
FROM {TABLE}
|
||||
WHERE
|
||||
project_id = %(project_id)s
|
||||
AND type='{typename.upper()}'
|
||||
AND value IN %(value)s
|
||||
ORDER BY value"""
|
||||
|
||||
if value_length is None or value_length > 2:
|
||||
return f"""(SELECT DISTINCT value, type
|
||||
FROM {TABLE}
|
||||
WHERE
|
||||
project_id = %(project_id)s
|
||||
AND type='{typename.upper()}'
|
||||
AND value ILIKE %(svalue)s
|
||||
ORDER BY value
|
||||
LIMIT 5)
|
||||
UNION DISTINCT
|
||||
(SELECT DISTINCT value, type
|
||||
FROM {TABLE}
|
||||
WHERE
|
||||
project_id = %(project_id)s
|
||||
AND type='{typename.upper()}'
|
||||
AND value ILIKE %(value)s
|
||||
ORDER BY value
|
||||
LIMIT 5);"""
|
||||
return f"""SELECT DISTINCT value, type
|
||||
FROM {TABLE}
|
||||
WHERE
|
||||
project_id = %(project_id)s
|
||||
AND type='{typename.upper()}'
|
||||
AND value ILIKE %(svalue)s
|
||||
ORDER BY value
|
||||
LIMIT 10;"""
|
||||
|
||||
|
||||
def __generic_autocomplete(event: Event):
|
||||
def f(project_id, value, key=None, source=None):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = __generic_query(event.ui_type, value_length=len(value))
|
||||
params = {"project_id": project_id, "value": helper.string_to_sql_like(value),
|
||||
"svalue": helper.string_to_sql_like("^" + value)}
|
||||
cur.execute(cur.mogrify(query, params))
|
||||
return helper.list_to_camel_case(cur.fetchall())
|
||||
|
||||
return f
|
||||
|
||||
|
||||
def __generic_autocomplete_metas(typename):
|
||||
def f(project_id, text):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
params = {"project_id": project_id, "value": helper.string_to_sql_like(text),
|
||||
"svalue": helper.string_to_sql_like("^" + text)}
|
||||
|
||||
if typename == schemas.FilterType.user_country:
|
||||
params["value"] = tuple(countries.get_country_code_autocomplete(text))
|
||||
if len(params["value"]) == 0:
|
||||
return []
|
||||
|
||||
query = cur.mogrify(__generic_query(typename, value_length=len(text)), params)
|
||||
cur.execute(query)
|
||||
rows = cur.fetchall()
|
||||
return rows
|
||||
|
||||
return f
|
||||
|
||||
|
||||
def __errors_query(source=None, value_length=None):
|
||||
if value_length is None or value_length > 2:
|
||||
return f"""((SELECT DISTINCT ON(lg.message)
|
||||
lg.message AS value,
|
||||
source,
|
||||
'{events.EventType.ERROR.ui_type}' AS type
|
||||
FROM {events.EventType.ERROR.table} INNER JOIN public.errors AS lg USING (error_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.message ILIKE %(svalue)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
{"AND source = %(source)s" if source is not None else ""}
|
||||
LIMIT 5)
|
||||
UNION DISTINCT
|
||||
(SELECT DISTINCT ON(lg.name)
|
||||
lg.name AS value,
|
||||
source,
|
||||
'{events.EventType.ERROR.ui_type}' AS type
|
||||
FROM {events.EventType.ERROR.table} INNER JOIN public.errors AS lg USING (error_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.name ILIKE %(svalue)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
{"AND source = %(source)s" if source is not None else ""}
|
||||
LIMIT 5)
|
||||
UNION DISTINCT
|
||||
(SELECT DISTINCT ON(lg.message)
|
||||
lg.message AS value,
|
||||
source,
|
||||
'{events.EventType.ERROR.ui_type}' AS type
|
||||
FROM {events.EventType.ERROR.table} INNER JOIN public.errors AS lg USING (error_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.message ILIKE %(value)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
{"AND source = %(source)s" if source is not None else ""}
|
||||
LIMIT 5)
|
||||
UNION DISTINCT
|
||||
(SELECT DISTINCT ON(lg.name)
|
||||
lg.name AS value,
|
||||
source,
|
||||
'{events.EventType.ERROR.ui_type}' AS type
|
||||
FROM {events.EventType.ERROR.table} INNER JOIN public.errors AS lg USING (error_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.name ILIKE %(value)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
{"AND source = %(source)s" if source is not None else ""}
|
||||
LIMIT 5));"""
|
||||
return f"""((SELECT DISTINCT ON(lg.message)
|
||||
lg.message AS value,
|
||||
source,
|
||||
'{events.EventType.ERROR.ui_type}' AS type
|
||||
FROM {events.EventType.ERROR.table} INNER JOIN public.errors AS lg USING (error_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.message ILIKE %(svalue)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
{"AND source = %(source)s" if source is not None else ""}
|
||||
LIMIT 5)
|
||||
UNION DISTINCT
|
||||
(SELECT DISTINCT ON(lg.name)
|
||||
lg.name AS value,
|
||||
source,
|
||||
'{events.EventType.ERROR.ui_type}' AS type
|
||||
FROM {events.EventType.ERROR.table} INNER JOIN public.errors AS lg USING (error_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.name ILIKE %(svalue)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
{"AND source = %(source)s" if source is not None else ""}
|
||||
LIMIT 5));"""
|
||||
|
||||
|
||||
def __search_errors(project_id, value, key=None, source=None):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(__errors_query(source,
|
||||
value_length=len(value)),
|
||||
{"project_id": project_id, "value": helper.string_to_sql_like(value),
|
||||
"svalue": helper.string_to_sql_like("^" + value),
|
||||
"source": source}))
|
||||
results = helper.list_to_camel_case(cur.fetchall())
|
||||
return results
|
||||
|
||||
|
||||
def __search_errors_ios(project_id, value, key=None, source=None):
|
||||
if len(value) > 2:
|
||||
query = f"""(SELECT DISTINCT ON(lg.reason)
|
||||
lg.reason AS value,
|
||||
'{events.EventType.CRASH_IOS.ui_type}' AS type
|
||||
FROM {events.EventType.CRASH_IOS.table} INNER JOIN public.crashes_ios AS lg USING (crash_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
AND lg.reason ILIKE %(svalue)s
|
||||
LIMIT 5)
|
||||
UNION ALL
|
||||
(SELECT DISTINCT ON(lg.name)
|
||||
lg.name AS value,
|
||||
'{events.EventType.CRASH_IOS.ui_type}' AS type
|
||||
FROM {events.EventType.CRASH_IOS.table} INNER JOIN public.crashes_ios AS lg USING (crash_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
AND lg.name ILIKE %(svalue)s
|
||||
LIMIT 5)
|
||||
UNION ALL
|
||||
(SELECT DISTINCT ON(lg.reason)
|
||||
lg.reason AS value,
|
||||
'{events.EventType.CRASH_IOS.ui_type}' AS type
|
||||
FROM {events.EventType.CRASH_IOS.table} INNER JOIN public.crashes_ios AS lg USING (crash_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
AND lg.reason ILIKE %(value)s
|
||||
LIMIT 5)
|
||||
UNION ALL
|
||||
(SELECT DISTINCT ON(lg.name)
|
||||
lg.name AS value,
|
||||
'{events.EventType.CRASH_IOS.ui_type}' AS type
|
||||
FROM {events.EventType.CRASH_IOS.table} INNER JOIN public.crashes_ios AS lg USING (crash_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
AND lg.name ILIKE %(value)s
|
||||
LIMIT 5);"""
|
||||
else:
|
||||
query = f"""(SELECT DISTINCT ON(lg.reason)
|
||||
lg.reason AS value,
|
||||
'{events.EventType.CRASH_IOS.ui_type}' AS type
|
||||
FROM {events.EventType.CRASH_IOS.table} INNER JOIN public.crashes_ios AS lg USING (crash_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
AND lg.reason ILIKE %(svalue)s
|
||||
LIMIT 5)
|
||||
UNION ALL
|
||||
(SELECT DISTINCT ON(lg.name)
|
||||
lg.name AS value,
|
||||
'{events.EventType.CRASH_IOS.ui_type}' AS type
|
||||
FROM {events.EventType.CRASH_IOS.table} INNER JOIN public.crashes_ios AS lg USING (crash_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
AND lg.name ILIKE %(svalue)s
|
||||
LIMIT 5);"""
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(cur.mogrify(query, {"project_id": project_id, "value": helper.string_to_sql_like(value),
|
||||
"svalue": helper.string_to_sql_like("^" + value)}))
|
||||
results = helper.list_to_camel_case(cur.fetchall())
|
||||
return results
|
||||
|
||||
|
||||
def __search_metadata(project_id, value, key=None, source=None):
|
||||
meta_keys = metadata.get(project_id=project_id)
|
||||
meta_keys = {m["key"]: m["index"] for m in meta_keys}
|
||||
if len(meta_keys) == 0 or key is not None and key not in meta_keys.keys():
|
||||
return []
|
||||
sub_from = []
|
||||
if key is not None:
|
||||
meta_keys = {key: meta_keys[key]}
|
||||
|
||||
for k in meta_keys.keys():
|
||||
colname = metadata.index_to_colname(meta_keys[k])
|
||||
if len(value) > 2:
|
||||
sub_from.append(f"""((SELECT DISTINCT ON ({colname}) {colname} AS value, '{k}' AS key
|
||||
FROM public.sessions
|
||||
WHERE project_id = %(project_id)s
|
||||
AND {colname} ILIKE %(svalue)s LIMIT 5)
|
||||
UNION
|
||||
(SELECT DISTINCT ON ({colname}) {colname} AS value, '{k}' AS key
|
||||
FROM public.sessions
|
||||
WHERE project_id = %(project_id)s
|
||||
AND {colname} ILIKE %(value)s LIMIT 5))
|
||||
""")
|
||||
else:
|
||||
sub_from.append(f"""(SELECT DISTINCT ON ({colname}) {colname} AS value, '{k}' AS key
|
||||
FROM public.sessions
|
||||
WHERE project_id = %(project_id)s
|
||||
AND {colname} ILIKE %(svalue)s LIMIT 5)""")
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(cur.mogrify(f"""\
|
||||
SELECT key, value, 'METADATA' AS TYPE
|
||||
FROM({" UNION ALL ".join(sub_from)}) AS all_metas
|
||||
LIMIT 5;""", {"project_id": project_id, "value": helper.string_to_sql_like(value),
|
||||
"svalue": helper.string_to_sql_like("^" + value)}))
|
||||
results = helper.list_to_camel_case(cur.fetchall())
|
||||
return results
|
||||
|
|
@ -1,439 +0,0 @@
|
|||
import logging
|
||||
import schemas
|
||||
from chalicelib.core import countries, events, metadata
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils.event_filter_definition import Event
|
||||
from chalicelib.utils.or_cache import CachedResponse
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
TABLE = "public.autocomplete"
|
||||
|
||||
|
||||
def __get_autocomplete_table(value, project_id):
|
||||
autocomplete_events = [schemas.FilterType.REV_ID,
|
||||
schemas.EventType.CLICK,
|
||||
schemas.FilterType.USER_DEVICE,
|
||||
schemas.FilterType.USER_ID,
|
||||
schemas.FilterType.USER_BROWSER,
|
||||
schemas.FilterType.USER_OS,
|
||||
schemas.EventType.CUSTOM,
|
||||
schemas.FilterType.USER_COUNTRY,
|
||||
schemas.FilterType.USER_CITY,
|
||||
schemas.FilterType.USER_STATE,
|
||||
schemas.EventType.LOCATION,
|
||||
schemas.EventType.INPUT]
|
||||
autocomplete_events.sort()
|
||||
sub_queries = []
|
||||
c_list = []
|
||||
for e in autocomplete_events:
|
||||
if e == schemas.FilterType.USER_COUNTRY:
|
||||
c_list = countries.get_country_code_autocomplete(value)
|
||||
if len(c_list) > 0:
|
||||
sub_queries.append(f"""(SELECT DISTINCT ON(value) '{e.value}' AS _type, value
|
||||
FROM {TABLE}
|
||||
WHERE project_id = %(project_id)s
|
||||
AND type= '{e.value.upper()}'
|
||||
AND value IN %(c_list)s)""")
|
||||
continue
|
||||
sub_queries.append(f"""(SELECT '{e.value}' AS _type, value
|
||||
FROM {TABLE}
|
||||
WHERE project_id = %(project_id)s
|
||||
AND type= '{e.value.upper()}'
|
||||
AND value ILIKE %(svalue)s
|
||||
ORDER BY value
|
||||
LIMIT 5)""")
|
||||
if len(value) > 2:
|
||||
sub_queries.append(f"""(SELECT '{e.value}' AS _type, value
|
||||
FROM {TABLE}
|
||||
WHERE project_id = %(project_id)s
|
||||
AND type= '{e.value.upper()}'
|
||||
AND value ILIKE %(value)s
|
||||
ORDER BY value
|
||||
LIMIT 5)""")
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(" UNION DISTINCT ".join(sub_queries) + ";",
|
||||
{"project_id": project_id,
|
||||
"value": helper.string_to_sql_like(value),
|
||||
"svalue": helper.string_to_sql_like("^" + value),
|
||||
"c_list": tuple(c_list)
|
||||
})
|
||||
try:
|
||||
cur.execute(query)
|
||||
except Exception as err:
|
||||
logger.exception("--------- AUTOCOMPLETE SEARCH QUERY EXCEPTION -----------")
|
||||
logger.exception(query.decode('UTF-8'))
|
||||
logger.exception("--------- VALUE -----------")
|
||||
logger.exception(value)
|
||||
logger.exception("--------------------")
|
||||
raise err
|
||||
results = cur.fetchall()
|
||||
for r in results:
|
||||
r["type"] = r.pop("_type")
|
||||
results = helper.list_to_camel_case(results)
|
||||
return results
|
||||
|
||||
|
||||
def __generic_query(typename, value_length=None):
|
||||
if typename == schemas.FilterType.USER_COUNTRY:
|
||||
return f"""SELECT DISTINCT value, type
|
||||
FROM {TABLE}
|
||||
WHERE
|
||||
project_id = %(project_id)s
|
||||
AND type='{typename.upper()}'
|
||||
AND value IN %(value)s
|
||||
ORDER BY value"""
|
||||
|
||||
if value_length is None or value_length > 2:
|
||||
return f"""SELECT DISTINCT ON(value,type) value, type
|
||||
((SELECT DISTINCT value, type
|
||||
FROM {TABLE}
|
||||
WHERE
|
||||
project_id = %(project_id)s
|
||||
AND type='{typename.upper()}'
|
||||
AND value ILIKE %(svalue)s
|
||||
ORDER BY value
|
||||
LIMIT 5)
|
||||
UNION DISTINCT
|
||||
(SELECT DISTINCT value, type
|
||||
FROM {TABLE}
|
||||
WHERE
|
||||
project_id = %(project_id)s
|
||||
AND type='{typename.upper()}'
|
||||
AND value ILIKE %(value)s
|
||||
ORDER BY value
|
||||
LIMIT 5)) AS raw;"""
|
||||
return f"""SELECT DISTINCT value, type
|
||||
FROM {TABLE}
|
||||
WHERE
|
||||
project_id = %(project_id)s
|
||||
AND type='{typename.upper()}'
|
||||
AND value ILIKE %(svalue)s
|
||||
ORDER BY value
|
||||
LIMIT 10;"""
|
||||
|
||||
|
||||
def __generic_autocomplete(event: Event):
|
||||
def f(project_id, value, key=None, source=None):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = __generic_query(event.ui_type, value_length=len(value))
|
||||
params = {"project_id": project_id, "value": helper.string_to_sql_like(value),
|
||||
"svalue": helper.string_to_sql_like("^" + value)}
|
||||
cur.execute(cur.mogrify(query, params))
|
||||
return helper.list_to_camel_case(cur.fetchall())
|
||||
|
||||
return f
|
||||
|
||||
|
||||
def generic_autocomplete_metas(typename):
|
||||
def f(project_id, text):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
params = {"project_id": project_id, "value": helper.string_to_sql_like(text),
|
||||
"svalue": helper.string_to_sql_like("^" + text)}
|
||||
|
||||
if typename == schemas.FilterType.USER_COUNTRY:
|
||||
params["value"] = tuple(countries.get_country_code_autocomplete(text))
|
||||
if len(params["value"]) == 0:
|
||||
return []
|
||||
|
||||
query = cur.mogrify(__generic_query(typename, value_length=len(text)), params)
|
||||
cur.execute(query)
|
||||
rows = cur.fetchall()
|
||||
return rows
|
||||
|
||||
return f
|
||||
|
||||
|
||||
def __errors_query(source=None, value_length=None):
|
||||
if value_length is None or value_length > 2:
|
||||
return f"""((SELECT DISTINCT ON(lg.message)
|
||||
lg.message AS value,
|
||||
source,
|
||||
'{events.EventType.ERROR.ui_type}' AS type
|
||||
FROM {events.EventType.ERROR.table} INNER JOIN public.errors AS lg USING (error_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.message ILIKE %(svalue)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
{"AND source = %(source)s" if source is not None else ""}
|
||||
LIMIT 5)
|
||||
UNION DISTINCT
|
||||
(SELECT DISTINCT ON(lg.name)
|
||||
lg.name AS value,
|
||||
source,
|
||||
'{events.EventType.ERROR.ui_type}' AS type
|
||||
FROM {events.EventType.ERROR.table} INNER JOIN public.errors AS lg USING (error_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.name ILIKE %(svalue)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
{"AND source = %(source)s" if source is not None else ""}
|
||||
LIMIT 5)
|
||||
UNION DISTINCT
|
||||
(SELECT DISTINCT ON(lg.message)
|
||||
lg.message AS value,
|
||||
source,
|
||||
'{events.EventType.ERROR.ui_type}' AS type
|
||||
FROM {events.EventType.ERROR.table} INNER JOIN public.errors AS lg USING (error_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.message ILIKE %(value)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
{"AND source = %(source)s" if source is not None else ""}
|
||||
LIMIT 5)
|
||||
UNION DISTINCT
|
||||
(SELECT DISTINCT ON(lg.name)
|
||||
lg.name AS value,
|
||||
source,
|
||||
'{events.EventType.ERROR.ui_type}' AS type
|
||||
FROM {events.EventType.ERROR.table} INNER JOIN public.errors AS lg USING (error_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.name ILIKE %(value)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
{"AND source = %(source)s" if source is not None else ""}
|
||||
LIMIT 5));"""
|
||||
return f"""((SELECT DISTINCT ON(lg.message)
|
||||
lg.message AS value,
|
||||
source,
|
||||
'{events.EventType.ERROR.ui_type}' AS type
|
||||
FROM {events.EventType.ERROR.table} INNER JOIN public.errors AS lg USING (error_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.message ILIKE %(svalue)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
{"AND source = %(source)s" if source is not None else ""}
|
||||
LIMIT 5)
|
||||
UNION DISTINCT
|
||||
(SELECT DISTINCT ON(lg.name)
|
||||
lg.name AS value,
|
||||
source,
|
||||
'{events.EventType.ERROR.ui_type}' AS type
|
||||
FROM {events.EventType.ERROR.table} INNER JOIN public.errors AS lg USING (error_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.name ILIKE %(svalue)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
{"AND source = %(source)s" if source is not None else ""}
|
||||
LIMIT 5));"""
|
||||
|
||||
|
||||
def __search_errors(project_id, value, key=None, source=None):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(__errors_query(source,
|
||||
value_length=len(value)),
|
||||
{"project_id": project_id, "value": helper.string_to_sql_like(value),
|
||||
"svalue": helper.string_to_sql_like("^" + value),
|
||||
"source": source}))
|
||||
results = helper.list_to_camel_case(cur.fetchall())
|
||||
return results
|
||||
|
||||
|
||||
def __search_errors_mobile(project_id, value, key=None, source=None):
|
||||
if len(value) > 2:
|
||||
query = f"""(SELECT DISTINCT ON(lg.reason)
|
||||
lg.reason AS value,
|
||||
'{events.EventType.CRASH_MOBILE.ui_type}' AS type
|
||||
FROM {events.EventType.CRASH_MOBILE.table} INNER JOIN public.crashes_ios AS lg USING (crash_ios_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
AND lg.reason ILIKE %(svalue)s
|
||||
LIMIT 5)
|
||||
UNION ALL
|
||||
(SELECT DISTINCT ON(lg.name)
|
||||
lg.name AS value,
|
||||
'{events.EventType.CRASH_MOBILE.ui_type}' AS type
|
||||
FROM {events.EventType.CRASH_MOBILE.table} INNER JOIN public.crashes_ios AS lg USING (crash_ios_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
AND lg.name ILIKE %(svalue)s
|
||||
LIMIT 5)
|
||||
UNION ALL
|
||||
(SELECT DISTINCT ON(lg.reason)
|
||||
lg.reason AS value,
|
||||
'{events.EventType.CRASH_MOBILE.ui_type}' AS type
|
||||
FROM {events.EventType.CRASH_MOBILE.table} INNER JOIN public.crashes_ios AS lg USING (crash_ios_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
AND lg.reason ILIKE %(value)s
|
||||
LIMIT 5)
|
||||
UNION ALL
|
||||
(SELECT DISTINCT ON(lg.name)
|
||||
lg.name AS value,
|
||||
'{events.EventType.CRASH_MOBILE.ui_type}' AS type
|
||||
FROM {events.EventType.CRASH_MOBILE.table} INNER JOIN public.crashes_ios AS lg USING (crash_ios_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
AND lg.name ILIKE %(value)s
|
||||
LIMIT 5);"""
|
||||
else:
|
||||
query = f"""(SELECT DISTINCT ON(lg.reason)
|
||||
lg.reason AS value,
|
||||
'{events.EventType.CRASH_MOBILE.ui_type}' AS type
|
||||
FROM {events.EventType.CRASH_MOBILE.table} INNER JOIN public.crashes_ios AS lg USING (crash_ios_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
AND lg.reason ILIKE %(svalue)s
|
||||
LIMIT 5)
|
||||
UNION ALL
|
||||
(SELECT DISTINCT ON(lg.name)
|
||||
lg.name AS value,
|
||||
'{events.EventType.CRASH_MOBILE.ui_type}' AS type
|
||||
FROM {events.EventType.CRASH_MOBILE.table} INNER JOIN public.crashes_ios AS lg USING (crash_ios_id) LEFT JOIN public.sessions AS s USING(session_id)
|
||||
WHERE
|
||||
s.project_id = %(project_id)s
|
||||
AND lg.project_id = %(project_id)s
|
||||
AND lg.name ILIKE %(svalue)s
|
||||
LIMIT 5);"""
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(cur.mogrify(query, {"project_id": project_id, "value": helper.string_to_sql_like(value),
|
||||
"svalue": helper.string_to_sql_like("^" + value)}))
|
||||
results = helper.list_to_camel_case(cur.fetchall())
|
||||
return results
|
||||
|
||||
|
||||
def __search_metadata(project_id, value, key=None, source=None):
|
||||
meta_keys = metadata.get(project_id=project_id)
|
||||
meta_keys = {m["key"]: m["index"] for m in meta_keys}
|
||||
if len(meta_keys) == 0 or key is not None and key not in meta_keys.keys():
|
||||
return []
|
||||
sub_from = []
|
||||
if key is not None:
|
||||
meta_keys = {key: meta_keys[key]}
|
||||
|
||||
for k in meta_keys.keys():
|
||||
colname = metadata.index_to_colname(meta_keys[k])
|
||||
if len(value) > 2:
|
||||
sub_from.append(f"""((SELECT DISTINCT ON ({colname}) {colname} AS value, '{k}' AS key
|
||||
FROM public.sessions
|
||||
WHERE project_id = %(project_id)s
|
||||
AND {colname} ILIKE %(svalue)s LIMIT 5)
|
||||
UNION
|
||||
(SELECT DISTINCT ON ({colname}) {colname} AS value, '{k}' AS key
|
||||
FROM public.sessions
|
||||
WHERE project_id = %(project_id)s
|
||||
AND {colname} ILIKE %(value)s LIMIT 5))
|
||||
""")
|
||||
else:
|
||||
sub_from.append(f"""(SELECT DISTINCT ON ({colname}) {colname} AS value, '{k}' AS key
|
||||
FROM public.sessions
|
||||
WHERE project_id = %(project_id)s
|
||||
AND {colname} ILIKE %(svalue)s LIMIT 5)""")
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(cur.mogrify(f"""\
|
||||
SELECT DISTINCT ON(key, value) key, value, 'METADATA' AS TYPE
|
||||
FROM({" UNION ALL ".join(sub_from)}) AS all_metas
|
||||
LIMIT 5;""", {"project_id": project_id, "value": helper.string_to_sql_like(value),
|
||||
"svalue": helper.string_to_sql_like("^" + value)}))
|
||||
results = helper.list_to_camel_case(cur.fetchall())
|
||||
return results
|
||||
|
||||
|
||||
TYPE_TO_COLUMN = {
|
||||
schemas.EventType.CLICK: "label",
|
||||
schemas.EventType.INPUT: "label",
|
||||
schemas.EventType.LOCATION: "path",
|
||||
schemas.EventType.CUSTOM: "name",
|
||||
schemas.FetchFilterType.FETCH_URL: "path",
|
||||
schemas.GraphqlFilterType.GRAPHQL_NAME: "name",
|
||||
schemas.EventType.STATE_ACTION: "name",
|
||||
# For ERROR, sessions search is happening over name OR message,
|
||||
# for simplicity top 10 is using name only
|
||||
schemas.EventType.ERROR: "name",
|
||||
schemas.FilterType.USER_COUNTRY: "user_country",
|
||||
schemas.FilterType.USER_CITY: "user_city",
|
||||
schemas.FilterType.USER_STATE: "user_state",
|
||||
schemas.FilterType.USER_ID: "user_id",
|
||||
schemas.FilterType.USER_ANONYMOUS_ID: "user_anonymous_id",
|
||||
schemas.FilterType.USER_OS: "user_os",
|
||||
schemas.FilterType.USER_BROWSER: "user_browser",
|
||||
schemas.FilterType.USER_DEVICE: "user_device",
|
||||
schemas.FilterType.PLATFORM: "platform",
|
||||
schemas.FilterType.REV_ID: "rev_id",
|
||||
schemas.FilterType.REFERRER: "referrer",
|
||||
schemas.FilterType.UTM_SOURCE: "utm_source",
|
||||
schemas.FilterType.UTM_MEDIUM: "utm_medium",
|
||||
schemas.FilterType.UTM_CAMPAIGN: "utm_campaign",
|
||||
}
|
||||
|
||||
TYPE_TO_TABLE = {
|
||||
schemas.EventType.CLICK: "events.clicks",
|
||||
schemas.EventType.INPUT: "events.inputs",
|
||||
schemas.EventType.LOCATION: "events.pages",
|
||||
schemas.EventType.CUSTOM: "events_common.customs",
|
||||
schemas.FetchFilterType.FETCH_URL: "events_common.requests",
|
||||
schemas.GraphqlFilterType.GRAPHQL_NAME: "events.graphql",
|
||||
schemas.EventType.STATE_ACTION: "events.state_actions",
|
||||
}
|
||||
|
||||
|
||||
def is_top_supported(event_type):
|
||||
return TYPE_TO_COLUMN.get(event_type, False)
|
||||
|
||||
|
||||
@CachedResponse(table="or_cache.autocomplete_top_values", ttl=5 * 60)
|
||||
def get_top_values(project_id, event_type, event_key=None):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
if schemas.FilterType.has_value(event_type):
|
||||
if event_type == schemas.FilterType.METADATA \
|
||||
and (event_key is None \
|
||||
or (colname := metadata.get_colname_by_key(project_id=project_id, key=event_key)) is None) \
|
||||
or event_type != schemas.FilterType.METADATA \
|
||||
and (colname := TYPE_TO_COLUMN.get(event_type)) is None:
|
||||
return []
|
||||
|
||||
query = f"""WITH raw AS (SELECT DISTINCT {colname} AS c_value,
|
||||
COUNT(1) OVER (PARTITION BY {colname}) AS row_count,
|
||||
COUNT(1) OVER () AS total_count
|
||||
FROM public.sessions
|
||||
WHERE project_id = %(project_id)s
|
||||
AND {colname} IS NOT NULL
|
||||
AND sessions.duration IS NOT NULL
|
||||
AND sessions.duration > 0
|
||||
ORDER BY row_count DESC
|
||||
LIMIT 10)
|
||||
SELECT c_value AS value, row_count, trunc(row_count * 100 / total_count, 2) AS row_percentage
|
||||
FROM raw;"""
|
||||
elif event_type == schemas.EventType.ERROR:
|
||||
colname = TYPE_TO_COLUMN.get(event_type)
|
||||
query = f"""WITH raw AS (SELECT DISTINCT {colname} AS c_value,
|
||||
COUNT(1) OVER (PARTITION BY {colname}) AS row_count,
|
||||
COUNT(1) OVER () AS total_count
|
||||
FROM public.errors
|
||||
WHERE project_id = %(project_id)s
|
||||
AND {colname} IS NOT NULL
|
||||
AND {colname} != ''
|
||||
ORDER BY row_count DESC
|
||||
LIMIT 10)
|
||||
SELECT c_value AS value, row_count, trunc(row_count * 100 / total_count,2) AS row_percentage
|
||||
FROM raw;"""
|
||||
else:
|
||||
colname = TYPE_TO_COLUMN.get(event_type)
|
||||
table = TYPE_TO_TABLE.get(event_type)
|
||||
query = f"""WITH raw AS (SELECT DISTINCT {colname} AS c_value,
|
||||
COUNT(1) OVER (PARTITION BY {colname}) AS row_count,
|
||||
COUNT(1) OVER () AS total_count
|
||||
FROM {table} INNER JOIN public.sessions USING(session_id)
|
||||
WHERE project_id = %(project_id)s
|
||||
AND {colname} IS NOT NULL
|
||||
AND {colname} != ''
|
||||
AND sessions.duration IS NOT NULL
|
||||
AND sessions.duration > 0
|
||||
ORDER BY row_count DESC
|
||||
LIMIT 10)
|
||||
SELECT c_value AS value, row_count, trunc(row_count * 100 / total_count,2) AS row_percentage
|
||||
FROM raw;"""
|
||||
params = {"project_id": project_id}
|
||||
query = cur.mogrify(query, params)
|
||||
logger.debug("--------------------")
|
||||
logger.debug(query)
|
||||
logger.debug("--------------------")
|
||||
cur.execute(query=query)
|
||||
results = cur.fetchall()
|
||||
return helper.list_to_camel_case(results)
|
||||
|
|
@ -1,8 +1,7 @@
|
|||
from chalicelib.core import projects
|
||||
from chalicelib.core import users
|
||||
from chalicelib.core.log_tools import datadog, stackdriver, sentry
|
||||
from chalicelib.core.modules import TENANT_CONDITION
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.core import projects, log_tool_datadog, log_tool_stackdriver, log_tool_sentry
|
||||
|
||||
from chalicelib.core import users
|
||||
|
||||
|
||||
def get_state(tenant_id):
|
||||
|
|
@ -13,61 +12,47 @@ def get_state(tenant_id):
|
|||
|
||||
if len(pids) > 0:
|
||||
cur.execute(
|
||||
cur.mogrify(
|
||||
"""SELECT EXISTS(( SELECT 1
|
||||
cur.mogrify("""SELECT EXISTS(( SELECT 1
|
||||
FROM public.sessions AS s
|
||||
WHERE s.project_id IN %(ids)s)) AS exists;""",
|
||||
{"ids": tuple(pids)},
|
||||
)
|
||||
{"ids": tuple(pids)})
|
||||
)
|
||||
recorded = cur.fetchone()["exists"]
|
||||
meta = False
|
||||
if recorded:
|
||||
query = cur.mogrify(
|
||||
f"""SELECT EXISTS((SELECT 1
|
||||
cur.execute("""SELECT EXISTS((SELECT 1
|
||||
FROM public.projects AS p
|
||||
LEFT JOIN LATERAL ( SELECT 1
|
||||
FROM public.sessions
|
||||
WHERE sessions.project_id = p.project_id
|
||||
AND sessions.user_id IS NOT NULL
|
||||
LIMIT 1) AS sessions(user_id) ON (TRUE)
|
||||
WHERE {TENANT_CONDITION} AND p.deleted_at ISNULL
|
||||
WHERE p.deleted_at ISNULL
|
||||
AND ( sessions.user_id IS NOT NULL OR p.metadata_1 IS NOT NULL
|
||||
OR p.metadata_2 IS NOT NULL OR p.metadata_3 IS NOT NULL
|
||||
OR p.metadata_4 IS NOT NULL OR p.metadata_5 IS NOT NULL
|
||||
OR p.metadata_6 IS NOT NULL OR p.metadata_7 IS NOT NULL
|
||||
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
|
||||
OR p.metadata_10 IS NOT NULL )
|
||||
)) AS exists;""",
|
||||
{"tenant_id": tenant_id},
|
||||
)
|
||||
cur.execute(query)
|
||||
)) AS exists;""")
|
||||
|
||||
meta = cur.fetchone()["exists"]
|
||||
|
||||
return [
|
||||
{
|
||||
"task": "Install OpenReplay",
|
||||
"done": recorded,
|
||||
"URL": "https://docs.openreplay.com/getting-started/quick-start",
|
||||
},
|
||||
{
|
||||
"task": "Identify Users",
|
||||
"done": meta,
|
||||
"URL": "https://docs.openreplay.com/data-privacy-security/metadata",
|
||||
},
|
||||
{
|
||||
"task": "Invite Team Members",
|
||||
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
|
||||
"URL": "https://app.openreplay.com/client/manage-users",
|
||||
},
|
||||
{
|
||||
"task": "Integrations",
|
||||
"done": len(datadog.get_all(tenant_id=tenant_id)) > 0
|
||||
or len(sentry.get_all(tenant_id=tenant_id)) > 0
|
||||
or len(stackdriver.get_all(tenant_id=tenant_id)) > 0,
|
||||
"URL": "https://docs.openreplay.com/integrations",
|
||||
},
|
||||
{"task": "Install OpenReplay",
|
||||
"done": recorded,
|
||||
"URL": "https://docs.openreplay.com/getting-started/quick-start"},
|
||||
{"task": "Identify Users",
|
||||
"done": meta,
|
||||
"URL": "https://docs.openreplay.com/data-privacy-security/metadata"},
|
||||
{"task": "Invite Team Members",
|
||||
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
|
||||
"URL": "https://app.openreplay.com/client/manage-users"},
|
||||
{"task": "Integrations",
|
||||
"done": len(log_tool_datadog.get_all(tenant_id=tenant_id)) > 0 \
|
||||
or len(log_tool_sentry.get_all(tenant_id=tenant_id)) > 0 \
|
||||
or len(log_tool_stackdriver.get_all(tenant_id=tenant_id)) > 0,
|
||||
"URL": "https://docs.openreplay.com/integrations"}
|
||||
]
|
||||
|
||||
|
||||
|
|
@ -78,66 +63,52 @@ def get_state_installing(tenant_id):
|
|||
|
||||
if len(pids) > 0:
|
||||
cur.execute(
|
||||
cur.mogrify(
|
||||
"""SELECT EXISTS(( SELECT 1
|
||||
cur.mogrify("""SELECT EXISTS(( SELECT 1
|
||||
FROM public.sessions AS s
|
||||
WHERE s.project_id IN %(ids)s)) AS exists;""",
|
||||
{"ids": tuple(pids)},
|
||||
)
|
||||
{"ids": tuple(pids)})
|
||||
)
|
||||
recorded = cur.fetchone()["exists"]
|
||||
|
||||
return {
|
||||
"task": "Install OpenReplay",
|
||||
"done": recorded,
|
||||
"URL": "https://docs.openreplay.com/getting-started/quick-start",
|
||||
}
|
||||
return {"task": "Install OpenReplay",
|
||||
"done": recorded,
|
||||
"URL": "https://docs.openreplay.com/getting-started/quick-start"}
|
||||
|
||||
|
||||
def get_state_identify_users(tenant_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
f"""SELECT EXISTS((SELECT 1
|
||||
cur.execute("""SELECT EXISTS((SELECT 1
|
||||
FROM public.projects AS p
|
||||
LEFT JOIN LATERAL ( SELECT 1
|
||||
FROM public.sessions
|
||||
WHERE sessions.project_id = p.project_id
|
||||
AND sessions.user_id IS NOT NULL
|
||||
LIMIT 1) AS sessions(user_id) ON (TRUE)
|
||||
WHERE {TENANT_CONDITION} AND p.deleted_at ISNULL
|
||||
WHERE p.deleted_at ISNULL
|
||||
AND ( sessions.user_id IS NOT NULL OR p.metadata_1 IS NOT NULL
|
||||
OR p.metadata_2 IS NOT NULL OR p.metadata_3 IS NOT NULL
|
||||
OR p.metadata_4 IS NOT NULL OR p.metadata_5 IS NOT NULL
|
||||
OR p.metadata_6 IS NOT NULL OR p.metadata_7 IS NOT NULL
|
||||
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
|
||||
OR p.metadata_10 IS NOT NULL )
|
||||
)) AS exists;""",
|
||||
{"tenant_id": tenant_id},
|
||||
)
|
||||
cur.execute(query)
|
||||
)) AS exists;""")
|
||||
|
||||
meta = cur.fetchone()["exists"]
|
||||
|
||||
return {
|
||||
"task": "Identify Users",
|
||||
"done": meta,
|
||||
"URL": "https://docs.openreplay.com/data-privacy-security/metadata",
|
||||
}
|
||||
return {"task": "Identify Users",
|
||||
"done": meta,
|
||||
"URL": "https://docs.openreplay.com/data-privacy-security/metadata"}
|
||||
|
||||
|
||||
def get_state_manage_users(tenant_id):
|
||||
return {
|
||||
"task": "Invite Team Members",
|
||||
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
|
||||
"URL": "https://app.openreplay.com/client/manage-users",
|
||||
}
|
||||
return {"task": "Invite Team Members",
|
||||
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
|
||||
"URL": "https://app.openreplay.com/client/manage-users"}
|
||||
|
||||
|
||||
def get_state_integrations(tenant_id):
|
||||
return {
|
||||
"task": "Integrations",
|
||||
"done": len(datadog.get_all(tenant_id=tenant_id)) > 0
|
||||
or len(sentry.get_all(tenant_id=tenant_id)) > 0
|
||||
or len(stackdriver.get_all(tenant_id=tenant_id)) > 0,
|
||||
"URL": "https://docs.openreplay.com/integrations",
|
||||
}
|
||||
return {"task": "Integrations",
|
||||
"done": len(log_tool_datadog.get_all(tenant_id=tenant_id)) > 0 \
|
||||
or len(log_tool_sentry.get_all(tenant_id=tenant_id)) > 0 \
|
||||
or len(log_tool_stackdriver.get_all(tenant_id=tenant_id)) > 0,
|
||||
"URL": "https://docs.openreplay.com/integrations"}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils import pg_client, helper
|
||||
from chalicelib.utils.storage import StorageClient
|
||||
from decouple import config
|
||||
|
||||
|
|
@ -13,23 +13,17 @@ def get_canvas_presigned_urls(session_id, project_id):
|
|||
{"project_id": project_id, "session_id": session_id})
|
||||
)
|
||||
rows = cur.fetchall()
|
||||
urls = []
|
||||
|
||||
for i in range(len(rows)):
|
||||
params = {
|
||||
"sessionId": session_id,
|
||||
"projectId": project_id,
|
||||
"recordingId": rows[i]["recording_id"]
|
||||
}
|
||||
oldKey = "%(sessionId)s/%(recordingId)s.mp4" % params
|
||||
key = config("CANVAS_PATTERN", default="%(sessionId)s/%(recordingId)s.tar.zst") % params
|
||||
urls.append(StorageClient.get_presigned_url_for_sharing(
|
||||
key = config("CANVAS_PATTERN", default="%(sessionId)/%(recordingId)s.mp4") % params
|
||||
rows[i] = StorageClient.get_presigned_url_for_sharing(
|
||||
bucket=config("CANVAS_BUCKET", default=config("sessions_bucket")),
|
||||
expires_in=config("PRESIGNED_URL_EXPIRATION", cast=int, default=900),
|
||||
key=key
|
||||
))
|
||||
urls.append(StorageClient.get_presigned_url_for_sharing(
|
||||
bucket=config("CANVAS_BUCKET", default=config("sessions_bucket")),
|
||||
expires_in=config("PRESIGNED_URL_EXPIRATION", cast=int, default=900),
|
||||
key=oldKey
|
||||
))
|
||||
return urls
|
||||
)
|
||||
return rows
|
||||
|
|
|
|||
77
api/chalicelib/core/click_maps.py
Normal file
77
api/chalicelib/core/click_maps.py
Normal file
|
|
@ -0,0 +1,77 @@
|
|||
import schemas
|
||||
from chalicelib.core import sessions_mobs, sessions_legacy as sessions_search, events
|
||||
from chalicelib.utils import pg_client, helper
|
||||
|
||||
SESSION_PROJECTION_COLS = """s.project_id,
|
||||
s.session_id::text AS session_id,
|
||||
s.user_uuid,
|
||||
s.user_id,
|
||||
s.user_os,
|
||||
s.user_browser,
|
||||
s.user_device,
|
||||
s.user_device_type,
|
||||
s.user_country,
|
||||
s.start_ts,
|
||||
s.duration,
|
||||
s.events_count,
|
||||
s.pages_count,
|
||||
s.errors_count,
|
||||
s.user_anonymous_id,
|
||||
s.platform,
|
||||
s.issue_score,
|
||||
to_jsonb(s.issue_types) AS issue_types,
|
||||
favorite_sessions.session_id NOTNULL AS favorite,
|
||||
COALESCE((SELECT TRUE
|
||||
FROM public.user_viewed_sessions AS fs
|
||||
WHERE s.session_id = fs.session_id
|
||||
AND fs.user_id = %(userId)s LIMIT 1), FALSE) AS viewed """
|
||||
|
||||
|
||||
def search_short_session(data: schemas.ClickMapSessionsSearch, project_id, user_id, include_mobs: bool = True):
|
||||
no_platform = True
|
||||
for f in data.filters:
|
||||
if f.type == schemas.FilterType.platform:
|
||||
no_platform = False
|
||||
break
|
||||
if no_platform:
|
||||
data.filters.append(schemas.SessionSearchFilterSchema(type=schemas.FilterType.platform,
|
||||
value=[schemas.PlatformType.desktop],
|
||||
operator=schemas.SearchEventOperator._is))
|
||||
|
||||
full_args, query_part = sessions_search.search_query_parts(data=data, error_status=None, errors_only=False,
|
||||
favorite_only=data.bookmarked, issue=None,
|
||||
project_id=project_id, user_id=user_id)
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
data.order = schemas.SortOrderType.desc
|
||||
data.sort = 'duration'
|
||||
|
||||
# meta_keys = metadata.get(project_id=project_id)
|
||||
meta_keys = []
|
||||
main_query = cur.mogrify(f"""SELECT {SESSION_PROJECTION_COLS}
|
||||
{"," if len(meta_keys) > 0 else ""}{",".join([f'metadata_{m["index"]}' for m in meta_keys])}
|
||||
{query_part}
|
||||
ORDER BY {data.sort} {data.order.value}
|
||||
LIMIT 1;""", full_args)
|
||||
# print("--------------------")
|
||||
# print(main_query)
|
||||
# print("--------------------")
|
||||
try:
|
||||
cur.execute(main_query)
|
||||
except Exception as err:
|
||||
print("--------- CLICK MAP SHORT SESSION SEARCH QUERY EXCEPTION -----------")
|
||||
print(main_query.decode('UTF-8'))
|
||||
print("--------- PAYLOAD -----------")
|
||||
print(data.model_dump_json())
|
||||
print("--------------------")
|
||||
raise err
|
||||
|
||||
session = cur.fetchone()
|
||||
if session:
|
||||
if include_mobs:
|
||||
session['domURL'] = sessions_mobs.get_urls(session_id=session["session_id"], project_id=project_id)
|
||||
session['mobsUrl'] = sessions_mobs.get_urls_depercated(session_id=session["session_id"])
|
||||
session['events'] = events.get_by_session_id(project_id=project_id, session_id=session["session_id"],
|
||||
event_type=schemas.EventType.location)
|
||||
|
||||
return helper.dict_to_camel_case(session)
|
||||
|
|
@ -6,7 +6,7 @@ from fastapi import HTTPException, status
|
|||
|
||||
import schemas
|
||||
from chalicelib.core import webhook
|
||||
from chalicelib.core.collaborations.collaboration_base import BaseCollaboration
|
||||
from chalicelib.core.collaboration_base import BaseCollaboration
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -15,34 +15,28 @@ class MSTeams(BaseCollaboration):
|
|||
@classmethod
|
||||
def add(cls, tenant_id, data: schemas.AddCollaborationSchema):
|
||||
if webhook.exists_by_name(tenant_id=tenant_id, name=data.name, exclude_id=None,
|
||||
webhook_type=schemas.WebhookType.MSTEAMS):
|
||||
webhook_type=schemas.WebhookType.msteams):
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=f"name already exists.")
|
||||
if cls.say_hello(data.url):
|
||||
return webhook.add(tenant_id=tenant_id,
|
||||
endpoint=data.url.unicode_string(),
|
||||
webhook_type=schemas.WebhookType.MSTEAMS,
|
||||
webhook_type=schemas.WebhookType.msteams,
|
||||
name=data.name)
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def say_hello(cls, url):
|
||||
try:
|
||||
r = requests.post(
|
||||
url=url,
|
||||
json={
|
||||
"@type": "MessageCard",
|
||||
"@context": "https://schema.org/extensions",
|
||||
"summary": "Welcome to OpenReplay",
|
||||
"title": "Welcome to OpenReplay"
|
||||
},
|
||||
timeout=3)
|
||||
if r.status_code != 200:
|
||||
logger.warning("MSTeams integration failed")
|
||||
logger.warning(r.text)
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.warning("!!! MSTeams integration failed")
|
||||
logger.exception(e)
|
||||
r = requests.post(
|
||||
url=url,
|
||||
json={
|
||||
"@type": "MessageCard",
|
||||
"@context": "https://schema.org/extensions",
|
||||
"summary": "Welcome to OpenReplay",
|
||||
"title": "Welcome to OpenReplay"
|
||||
})
|
||||
if r.status_code != 200:
|
||||
logging.warning("MSTeams integration failed")
|
||||
logging.warning(r.text)
|
||||
return False
|
||||
return True
|
||||
|
||||
|
|
@ -57,15 +51,15 @@ class MSTeams(BaseCollaboration):
|
|||
json=body,
|
||||
timeout=5)
|
||||
if r.status_code != 200:
|
||||
logger.warning(f"!! issue sending msteams raw; webhookId:{webhook_id} code:{r.status_code}")
|
||||
logger.warning(r.text)
|
||||
logging.warning(f"!! issue sending msteams raw; webhookId:{webhook_id} code:{r.status_code}")
|
||||
logging.warning(r.text)
|
||||
return None
|
||||
except requests.exceptions.Timeout:
|
||||
logger.warning(f"!! Timeout sending msteams raw webhookId:{webhook_id}")
|
||||
logging.warning(f"!! Timeout sending msteams raw webhookId:{webhook_id}")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.warning(f"!! Issue sending msteams raw webhookId:{webhook_id}")
|
||||
logger.warning(e)
|
||||
logging.warning(f"!! Issue sending msteams raw webhookId:{webhook_id}")
|
||||
logging.warning(e)
|
||||
return None
|
||||
return {"data": r.text}
|
||||
|
||||
|
|
@ -74,7 +68,7 @@ class MSTeams(BaseCollaboration):
|
|||
integration = cls.get_integration(tenant_id=tenant_id, integration_id=webhook_id)
|
||||
if integration is None:
|
||||
return {"errors": ["msteams integration not found"]}
|
||||
logger.debug(f"====> sending msteams batch notification: {len(attachments)}")
|
||||
logging.debug(f"====> sending msteams batch notification: {len(attachments)}")
|
||||
for i in range(0, len(attachments), 50):
|
||||
part = attachments[i:i + 50]
|
||||
for j in range(1, len(part), 2):
|
||||
|
|
@ -88,8 +82,8 @@ class MSTeams(BaseCollaboration):
|
|||
"sections": part
|
||||
})
|
||||
if r.status_code != 200:
|
||||
logger.warning("!!!! something went wrong")
|
||||
logger.warning(r.text)
|
||||
logging.warning("!!!! something went wrong")
|
||||
logging.warning(r.text)
|
||||
|
||||
@classmethod
|
||||
def __share(cls, tenant_id, integration_id, attachement, extra=None):
|
||||
|
|
@ -163,9 +157,9 @@ class MSTeams(BaseCollaboration):
|
|||
def get_integration(cls, tenant_id, integration_id=None):
|
||||
if integration_id is not None:
|
||||
return webhook.get_webhook(tenant_id=tenant_id, webhook_id=integration_id,
|
||||
webhook_type=schemas.WebhookType.MSTEAMS)
|
||||
webhook_type=schemas.WebhookType.msteams)
|
||||
|
||||
integrations = webhook.get_by_type(tenant_id=tenant_id, webhook_type=schemas.WebhookType.MSTEAMS)
|
||||
integrations = webhook.get_by_type(tenant_id=tenant_id, webhook_type=schemas.WebhookType.msteams)
|
||||
if integrations is None or len(integrations) == 0:
|
||||
return None
|
||||
return integrations[0]
|
||||
|
|
@ -6,19 +6,19 @@ from fastapi import HTTPException, status
|
|||
|
||||
import schemas
|
||||
from chalicelib.core import webhook
|
||||
from chalicelib.core.collaborations.collaboration_base import BaseCollaboration
|
||||
from chalicelib.core.collaboration_base import BaseCollaboration
|
||||
|
||||
|
||||
class Slack(BaseCollaboration):
|
||||
@classmethod
|
||||
def add(cls, tenant_id, data: schemas.AddCollaborationSchema):
|
||||
if webhook.exists_by_name(tenant_id=tenant_id, name=data.name, exclude_id=None,
|
||||
webhook_type=schemas.WebhookType.SLACK):
|
||||
webhook_type=schemas.WebhookType.slack):
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=f"name already exists.")
|
||||
if cls.say_hello(data.url):
|
||||
return webhook.add(tenant_id=tenant_id,
|
||||
endpoint=data.url.unicode_string(),
|
||||
webhook_type=schemas.WebhookType.SLACK,
|
||||
webhook_type=schemas.WebhookType.slack,
|
||||
name=data.name)
|
||||
return None
|
||||
|
||||
|
|
@ -118,9 +118,9 @@ class Slack(BaseCollaboration):
|
|||
def get_integration(cls, tenant_id, integration_id=None):
|
||||
if integration_id is not None:
|
||||
return webhook.get_webhook(tenant_id=tenant_id, webhook_id=integration_id,
|
||||
webhook_type=schemas.WebhookType.SLACK)
|
||||
webhook_type=schemas.WebhookType.slack)
|
||||
|
||||
integrations = webhook.get_by_type(tenant_id=tenant_id, webhook_type=schemas.WebhookType.SLACK)
|
||||
integrations = webhook.get_by_type(tenant_id=tenant_id, webhook_type=schemas.WebhookType.slack)
|
||||
if integrations is None or len(integrations) == 0:
|
||||
return None
|
||||
return integrations[0]
|
||||
|
|
@ -1 +0,0 @@
|
|||
from . import collaboration_base as _
|
||||
688
api/chalicelib/core/custom_metrics.py
Normal file
688
api/chalicelib/core/custom_metrics.py
Normal file
|
|
@ -0,0 +1,688 @@
|
|||
import json
|
||||
import logging
|
||||
|
||||
from decouple import config
|
||||
from fastapi import HTTPException, status
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import sessions, funnels, errors, issues, click_maps, sessions_mobs, product_analytics, \
|
||||
custom_metrics_predefined
|
||||
from chalicelib.utils import helper, pg_client
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.utils.storage import StorageClient
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
PIE_CHART_GROUP = 5
|
||||
|
||||
|
||||
# TODO: refactor this to split
|
||||
# timeseries /
|
||||
# table of errors / table of issues / table of browsers / table of devices / table of countries / table of URLs
|
||||
# remove "table of" calls from this function
|
||||
def __try_live(project_id, data: schemas.CardSchema):
|
||||
results = []
|
||||
for i, s in enumerate(data.series):
|
||||
results.append(sessions.search2_series(data=s.filter, project_id=project_id, density=data.density,
|
||||
view_type=data.view_type, metric_type=data.metric_type,
|
||||
metric_of=data.metric_of, metric_value=data.metric_value))
|
||||
if data.view_type == schemas.MetricTimeseriesViewType.progress:
|
||||
r = {"count": results[-1]}
|
||||
diff = s.filter.endTimestamp - s.filter.startTimestamp
|
||||
s.filter.endTimestamp = s.filter.startTimestamp
|
||||
s.filter.startTimestamp = s.filter.endTimestamp - diff
|
||||
r["previousCount"] = sessions.search2_series(data=s.filter, project_id=project_id, density=data.density,
|
||||
view_type=data.view_type, metric_type=data.metric_type,
|
||||
metric_of=data.metric_of, metric_value=data.metric_value)
|
||||
r["countProgress"] = helper.__progress(old_val=r["previousCount"], new_val=r["count"])
|
||||
r["seriesName"] = s.name if s.name else i + 1
|
||||
r["seriesId"] = s.series_id if s.series_id else None
|
||||
results[-1] = r
|
||||
elif data.view_type == schemas.MetricTableViewType.pie_chart:
|
||||
if len(results[i].get("values", [])) > PIE_CHART_GROUP:
|
||||
results[i]["values"] = results[i]["values"][:PIE_CHART_GROUP] \
|
||||
+ [{
|
||||
"name": "Others", "group": True,
|
||||
"sessionCount": sum(r["sessionCount"] for r in results[i]["values"][PIE_CHART_GROUP:])
|
||||
}]
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def __get_table_of_series(project_id, data: schemas.CardSchema):
|
||||
results = []
|
||||
for i, s in enumerate(data.series):
|
||||
results.append(sessions.search2_table(data=s.filter, project_id=project_id, density=data.density,
|
||||
metric_of=data.metric_of, metric_value=data.metric_value))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def __get_funnel_chart(project_id: int, data: schemas.CardFunnel, user_id: int = None):
|
||||
if len(data.series) == 0:
|
||||
return {
|
||||
"stages": [],
|
||||
"totalDropDueToIssues": 0
|
||||
}
|
||||
return funnels.get_top_insights_on_the_fly_widget(project_id=project_id, data=data.series[0].filter)
|
||||
|
||||
|
||||
def __get_errors_list(project_id, user_id, data: schemas.CardSchema):
|
||||
if len(data.series) == 0:
|
||||
return {
|
||||
"total": 0,
|
||||
"errors": []
|
||||
}
|
||||
return errors.search(data.series[0].filter, project_id=project_id, user_id=user_id)
|
||||
|
||||
|
||||
def __get_sessions_list(project_id, user_id, data: schemas.CardSchema):
|
||||
if len(data.series) == 0:
|
||||
logger.debug("empty series")
|
||||
return {
|
||||
"total": 0,
|
||||
"sessions": []
|
||||
}
|
||||
return sessions.search_sessions(data=data.series[0].filter, project_id=project_id, user_id=user_id)
|
||||
|
||||
|
||||
def __get_click_map_chart(project_id, user_id, data: schemas.CardClickMap, include_mobs: bool = True):
|
||||
if len(data.series) == 0:
|
||||
return None
|
||||
return click_maps.search_short_session(project_id=project_id, user_id=user_id,
|
||||
data=schemas.ClickMapSessionsSearch(
|
||||
**data.series[0].filter.model_dump()),
|
||||
include_mobs=include_mobs)
|
||||
|
||||
|
||||
def __get_path_analysis_chart(project_id: int, user_id: int, data: schemas.CardPathAnalysis):
|
||||
if len(data.series) == 0:
|
||||
data.series.append(
|
||||
schemas.CardPathAnalysisSeriesSchema(startTimestamp=data.startTimestamp, endTimestamp=data.endTimestamp))
|
||||
elif not isinstance(data.series[0].filter, schemas.PathAnalysisSchema):
|
||||
data.series[0].filter = schemas.PathAnalysisSchema()
|
||||
|
||||
return product_analytics.path_analysis(project_id=project_id, data=data)
|
||||
|
||||
|
||||
def __get_timeseries_chart(project_id: int, data: schemas.CardTimeSeries, user_id: int = None):
|
||||
series_charts = __try_live(project_id=project_id, data=data)
|
||||
if data.view_type == schemas.MetricTimeseriesViewType.progress:
|
||||
return series_charts
|
||||
results = [{}] * len(series_charts[0])
|
||||
for i in range(len(results)):
|
||||
for j, series_chart in enumerate(series_charts):
|
||||
results[i] = {**results[i], "timestamp": series_chart[i]["timestamp"],
|
||||
data.series[j].name if data.series[j].name else j + 1: series_chart[i]["count"]}
|
||||
return results
|
||||
|
||||
|
||||
def not_supported(**args):
|
||||
raise Exception("not supported")
|
||||
|
||||
|
||||
def __get_table_of_user_ids(project_id: int, data: schemas.CardTable, user_id: int = None):
|
||||
return __get_table_of_series(project_id=project_id, data=data)
|
||||
|
||||
|
||||
def __get_table_of_sessions(project_id: int, data: schemas.CardTable, user_id):
|
||||
return __get_sessions_list(project_id=project_id, user_id=user_id, data=data)
|
||||
|
||||
|
||||
def __get_table_of_errors(project_id: int, data: schemas.CardTable, user_id: int):
|
||||
return __get_errors_list(project_id=project_id, user_id=user_id, data=data)
|
||||
|
||||
|
||||
def __get_table_of_issues(project_id: int, data: schemas.CardTable, user_id: int = None):
|
||||
return __get_table_of_series(project_id=project_id, data=data)
|
||||
|
||||
|
||||
def __get_table_of_browsers(project_id: int, data: schemas.CardTable, user_id: int = None):
|
||||
return __get_table_of_series(project_id=project_id, data=data)
|
||||
|
||||
|
||||
def __get_table_of_devises(project_id: int, data: schemas.CardTable, user_id: int = None):
|
||||
return __get_table_of_series(project_id=project_id, data=data)
|
||||
|
||||
|
||||
def __get_table_of_countries(project_id: int, data: schemas.CardTable, user_id: int = None):
|
||||
return __get_table_of_series(project_id=project_id, data=data)
|
||||
|
||||
|
||||
def __get_table_of_urls(project_id: int, data: schemas.CardTable, user_id: int = None):
|
||||
return __get_table_of_series(project_id=project_id, data=data)
|
||||
|
||||
|
||||
def __get_table_chart(project_id: int, data: schemas.CardTable, user_id: int):
|
||||
supported = {
|
||||
schemas.MetricOfTable.sessions: __get_table_of_sessions,
|
||||
schemas.MetricOfTable.errors: __get_table_of_errors,
|
||||
schemas.MetricOfTable.user_id: __get_table_of_user_ids,
|
||||
schemas.MetricOfTable.issues: __get_table_of_issues,
|
||||
schemas.MetricOfTable.user_browser: __get_table_of_browsers,
|
||||
schemas.MetricOfTable.user_device: __get_table_of_devises,
|
||||
schemas.MetricOfTable.user_country: __get_table_of_countries,
|
||||
schemas.MetricOfTable.visited_url: __get_table_of_urls,
|
||||
}
|
||||
return supported.get(data.metric_of, not_supported)(project_id=project_id, data=data, user_id=user_id)
|
||||
|
||||
|
||||
def get_chart(project_id: int, data: schemas.CardSchema, user_id: int):
|
||||
if data.is_predefined:
|
||||
return custom_metrics_predefined.get_metric(key=data.metric_of,
|
||||
project_id=project_id,
|
||||
data=data.model_dump())
|
||||
|
||||
supported = {
|
||||
schemas.MetricType.timeseries: __get_timeseries_chart,
|
||||
schemas.MetricType.table: __get_table_chart,
|
||||
schemas.MetricType.click_map: __get_click_map_chart,
|
||||
schemas.MetricType.funnel: __get_funnel_chart,
|
||||
schemas.MetricType.insights: not_supported,
|
||||
schemas.MetricType.pathAnalysis: __get_path_analysis_chart
|
||||
}
|
||||
return supported.get(data.metric_type, not_supported)(project_id=project_id, data=data, user_id=user_id)
|
||||
|
||||
|
||||
def __merge_metric_with_data(metric: schemas.CardSchema,
|
||||
data: schemas.CardSessionsSchema) -> schemas.CardSchema:
|
||||
metric.startTimestamp = data.startTimestamp
|
||||
metric.endTimestamp = data.endTimestamp
|
||||
metric.page = data.page
|
||||
metric.limit = data.limit
|
||||
metric.density = data.density
|
||||
if data.series is not None and len(data.series) > 0:
|
||||
metric.series = data.series
|
||||
|
||||
if len(data.filters) > 0:
|
||||
for s in metric.series:
|
||||
s.filter.filters += data.filters
|
||||
metric = schemas.CardSchema(**metric.model_dump(by_alias=True))
|
||||
return metric
|
||||
|
||||
|
||||
def get_sessions_by_card_id(project_id, user_id, metric_id, data: schemas.CardSessionsSchema):
|
||||
card: dict = get_card(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
|
||||
if card is None:
|
||||
return None
|
||||
metric: schemas.CardSchema = schemas.CardSchema(**card)
|
||||
metric: schemas.CardSchema = __merge_metric_with_data(metric=metric, data=data)
|
||||
results = []
|
||||
for s in metric.series:
|
||||
results.append({"seriesId": s.series_id, "seriesName": s.name,
|
||||
**sessions.search_sessions(data=s.filter, project_id=project_id, user_id=user_id)})
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def get_funnel_issues(project_id, user_id, metric_id, data: schemas.CardSessionsSchema):
|
||||
raw_metric: dict = get_card(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
|
||||
if raw_metric is None:
|
||||
return None
|
||||
metric: schemas.CardSchema = schemas.CardSchema(**raw_metric)
|
||||
metric: schemas.CardSchema = __merge_metric_with_data(metric=metric, data=data)
|
||||
if metric is None:
|
||||
return None
|
||||
for s in metric.series:
|
||||
return {"seriesId": s.series_id, "seriesName": s.name,
|
||||
**funnels.get_issues_on_the_fly_widget(project_id=project_id, data=s.filter)}
|
||||
|
||||
|
||||
def get_errors_list(project_id, user_id, metric_id, data: schemas.CardSessionsSchema):
|
||||
raw_metric: dict = get_card(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
|
||||
if raw_metric is None:
|
||||
return None
|
||||
metric: schemas.CardSchema = schemas.CardSchema(**raw_metric)
|
||||
metric: schemas.CardSchema = __merge_metric_with_data(metric=metric, data=data)
|
||||
if metric is None:
|
||||
return None
|
||||
for s in metric.series:
|
||||
return {"seriesId": s.series_id, "seriesName": s.name,
|
||||
**errors.search(data=s.filter, project_id=project_id, user_id=user_id)}
|
||||
|
||||
|
||||
def get_sessions(project_id, user_id, data: schemas.CardSessionsSchema):
|
||||
results = []
|
||||
if len(data.series) == 0:
|
||||
return results
|
||||
for s in data.series:
|
||||
if len(data.filters) > 0:
|
||||
s.filter.filters += data.filters
|
||||
s.filter = schemas.SessionsSearchPayloadSchema(**s.filter.model_dump(by_alias=True))
|
||||
|
||||
results.append({"seriesId": None, "seriesName": s.name,
|
||||
**sessions.search_sessions(data=s.filter, project_id=project_id, user_id=user_id)})
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def __get_funnel_issues(project_id: int, user_id: int, data: schemas.CardFunnel):
|
||||
if len(data.series) == 0:
|
||||
return {"data": []}
|
||||
data.series[0].filter.startTimestamp = data.startTimestamp
|
||||
data.series[0].filter.endTimestamp = data.endTimestamp
|
||||
data = funnels.get_issues_on_the_fly_widget(project_id=project_id, data=data.series[0].filter)
|
||||
return {"data": data}
|
||||
|
||||
|
||||
def __get_path_analysis_issues(project_id: int, user_id: int, data: schemas.CardPathAnalysis):
|
||||
if len(data.filters) > 0 or len(data.series) > 0:
|
||||
filters = [f.model_dump(by_alias=True) for f in data.filters] \
|
||||
+ [f.model_dump(by_alias=True) for f in data.series[0].filter.filters]
|
||||
else:
|
||||
return []
|
||||
|
||||
search_data = schemas.SessionsSearchPayloadSchema(
|
||||
startTimestamp=data.startTimestamp,
|
||||
endTimestamp=data.endTimestamp,
|
||||
limit=data.limit,
|
||||
page=data.page,
|
||||
filters=filters
|
||||
)
|
||||
|
||||
for s in data.excludes:
|
||||
search_data.filters.append(schemas.SessionSearchEventSchema2(type=s.type,
|
||||
operator=schemas.SearchEventOperator._not_on,
|
||||
value=s.value))
|
||||
result = sessions.search_table_of_individual_issues(project_id=project_id, data=search_data)
|
||||
return result
|
||||
|
||||
|
||||
def get_issues(project_id: int, user_id: int, data: schemas.CardSchema):
|
||||
if data.is_predefined:
|
||||
return not_supported()
|
||||
if data.metric_of == schemas.MetricOfTable.issues:
|
||||
return __get_table_of_issues(project_id=project_id, user_id=user_id, data=data)
|
||||
supported = {
|
||||
schemas.MetricType.timeseries: not_supported,
|
||||
schemas.MetricType.table: not_supported,
|
||||
schemas.MetricType.click_map: not_supported,
|
||||
schemas.MetricType.funnel: __get_funnel_issues,
|
||||
schemas.MetricType.insights: not_supported,
|
||||
schemas.MetricType.pathAnalysis: __get_path_analysis_issues,
|
||||
}
|
||||
return supported.get(data.metric_type, not_supported)(project_id=project_id, data=data, user_id=user_id)
|
||||
|
||||
|
||||
def __get_path_analysis_card_info(data: schemas.CardPathAnalysis):
|
||||
r = {"start_point": [s.model_dump() for s in data.start_point],
|
||||
"start_type": data.start_type,
|
||||
"excludes": [e.model_dump() for e in data.excludes],
|
||||
"hideExcess": data.hide_excess}
|
||||
return r
|
||||
|
||||
|
||||
def create_card(project_id, user_id, data: schemas.CardSchema, dashboard=False):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
session_data = None
|
||||
if data.metric_type == schemas.MetricType.click_map:
|
||||
session_data = __get_click_map_chart(project_id=project_id, user_id=user_id,
|
||||
data=data, include_mobs=False)
|
||||
if session_data is not None:
|
||||
session_data = json.dumps(session_data)
|
||||
_data = {"session_data": session_data}
|
||||
for i, s in enumerate(data.series):
|
||||
for k in s.model_dump().keys():
|
||||
_data[f"{k}_{i}"] = s.__getattribute__(k)
|
||||
_data[f"index_{i}"] = i
|
||||
_data[f"filter_{i}"] = s.filter.json()
|
||||
series_len = len(data.series)
|
||||
params = {"user_id": user_id, "project_id": project_id, **data.model_dump(), **_data}
|
||||
params["default_config"] = json.dumps(data.default_config.model_dump())
|
||||
params["card_info"] = None
|
||||
if data.metric_type == schemas.MetricType.pathAnalysis:
|
||||
params["card_info"] = json.dumps(__get_path_analysis_card_info(data=data))
|
||||
|
||||
query = """INSERT INTO metrics (project_id, user_id, name, is_public,
|
||||
view_type, metric_type, metric_of, metric_value,
|
||||
metric_format, default_config, thumbnail, data,
|
||||
card_info)
|
||||
VALUES (%(project_id)s, %(user_id)s, %(name)s, %(is_public)s,
|
||||
%(view_type)s, %(metric_type)s, %(metric_of)s, %(metric_value)s,
|
||||
%(metric_format)s, %(default_config)s, %(thumbnail)s, %(session_data)s,
|
||||
%(card_info)s)
|
||||
RETURNING metric_id"""
|
||||
if len(data.series) > 0:
|
||||
query = f"""WITH m AS ({query})
|
||||
INSERT INTO metric_series(metric_id, index, name, filter)
|
||||
VALUES {",".join([f"((SELECT metric_id FROM m), %(index_{i})s, %(name_{i})s, %(filter_{i})s::jsonb)"
|
||||
for i in range(series_len)])}
|
||||
RETURNING metric_id;"""
|
||||
|
||||
query = cur.mogrify(query, params)
|
||||
cur.execute(query)
|
||||
r = cur.fetchone()
|
||||
if dashboard:
|
||||
return r["metric_id"]
|
||||
return {"data": get_card(metric_id=r["metric_id"], project_id=project_id, user_id=user_id)}
|
||||
|
||||
|
||||
def update_card(metric_id, user_id, project_id, data: schemas.CardSchema):
|
||||
metric: dict = get_card(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
|
||||
if metric is None:
|
||||
return None
|
||||
series_ids = [r["seriesId"] for r in metric["series"]]
|
||||
n_series = []
|
||||
d_series_ids = []
|
||||
u_series = []
|
||||
u_series_ids = []
|
||||
params = {"metric_id": metric_id, "is_public": data.is_public, "name": data.name,
|
||||
"user_id": user_id, "project_id": project_id, "view_type": data.view_type,
|
||||
"metric_type": data.metric_type, "metric_of": data.metric_of,
|
||||
"metric_value": data.metric_value, "metric_format": data.metric_format,
|
||||
"config": json.dumps(data.default_config.model_dump()), "thumbnail": data.thumbnail}
|
||||
for i, s in enumerate(data.series):
|
||||
prefix = "u_"
|
||||
if s.index is None:
|
||||
s.index = i
|
||||
if s.series_id is None or s.series_id not in series_ids:
|
||||
n_series.append({"i": i, "s": s})
|
||||
prefix = "n_"
|
||||
else:
|
||||
u_series.append({"i": i, "s": s})
|
||||
u_series_ids.append(s.series_id)
|
||||
ns = s.model_dump()
|
||||
for k in ns.keys():
|
||||
if k == "filter":
|
||||
ns[k] = json.dumps(ns[k])
|
||||
params[f"{prefix}{k}_{i}"] = ns[k]
|
||||
for i in series_ids:
|
||||
if i not in u_series_ids:
|
||||
d_series_ids.append(i)
|
||||
params["d_series_ids"] = tuple(d_series_ids)
|
||||
params["card_info"] = None
|
||||
if data.metric_type == schemas.MetricType.pathAnalysis:
|
||||
params["card_info"] = json.dumps(__get_path_analysis_card_info(data=data))
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
sub_queries = []
|
||||
if len(n_series) > 0:
|
||||
sub_queries.append(f"""\
|
||||
n AS (INSERT INTO metric_series (metric_id, index, name, filter)
|
||||
VALUES {",".join([f"(%(metric_id)s, %(n_index_{s['i']})s, %(n_name_{s['i']})s, %(n_filter_{s['i']})s::jsonb)"
|
||||
for s in n_series])}
|
||||
RETURNING 1)""")
|
||||
if len(u_series) > 0:
|
||||
sub_queries.append(f"""\
|
||||
u AS (UPDATE metric_series
|
||||
SET name=series.name,
|
||||
filter=series.filter,
|
||||
index=series.index
|
||||
FROM (VALUES {",".join([f"(%(u_series_id_{s['i']})s,%(u_index_{s['i']})s,%(u_name_{s['i']})s,%(u_filter_{s['i']})s::jsonb)"
|
||||
for s in u_series])}) AS series(series_id, index, name, filter)
|
||||
WHERE metric_series.metric_id =%(metric_id)s AND metric_series.series_id=series.series_id
|
||||
RETURNING 1)""")
|
||||
if len(d_series_ids) > 0:
|
||||
sub_queries.append("""\
|
||||
d AS (DELETE FROM metric_series WHERE metric_id =%(metric_id)s AND series_id IN %(d_series_ids)s
|
||||
RETURNING 1)""")
|
||||
query = cur.mogrify(f"""\
|
||||
{"WITH " if len(sub_queries) > 0 else ""}{",".join(sub_queries)}
|
||||
UPDATE metrics
|
||||
SET name = %(name)s, is_public= %(is_public)s,
|
||||
view_type= %(view_type)s, metric_type= %(metric_type)s,
|
||||
metric_of= %(metric_of)s, metric_value= %(metric_value)s,
|
||||
metric_format= %(metric_format)s,
|
||||
edited_at = timezone('utc'::text, now()),
|
||||
default_config = %(config)s,
|
||||
thumbnail = %(thumbnail)s,
|
||||
card_info = %(card_info)s
|
||||
WHERE metric_id = %(metric_id)s
|
||||
AND project_id = %(project_id)s
|
||||
AND (user_id = %(user_id)s OR is_public)
|
||||
RETURNING metric_id;""", params)
|
||||
cur.execute(query)
|
||||
return get_card(metric_id=metric_id, project_id=project_id, user_id=user_id)
|
||||
|
||||
|
||||
def search_all(project_id, user_id, data: schemas.SearchCardsSchema, include_series=False):
|
||||
constraints = ["metrics.project_id = %(project_id)s",
|
||||
"metrics.deleted_at ISNULL"]
|
||||
params = {"project_id": project_id, "user_id": user_id,
|
||||
"offset": (data.page - 1) * data.limit,
|
||||
"limit": data.limit, }
|
||||
if data.mine_only:
|
||||
constraints.append("user_id = %(user_id)s")
|
||||
else:
|
||||
constraints.append("(user_id = %(user_id)s OR metrics.is_public)")
|
||||
if data.shared_only:
|
||||
constraints.append("is_public")
|
||||
|
||||
if data.query is not None and len(data.query) > 0:
|
||||
constraints.append("(name ILIKE %(query)s OR owner.owner_email ILIKE %(query)s)")
|
||||
params["query"] = helper.values_for_operator(value=data.query,
|
||||
op=schemas.SearchEventOperator._contains)
|
||||
with pg_client.PostgresClient() as cur:
|
||||
sub_join = ""
|
||||
if include_series:
|
||||
sub_join = """LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(metric_series.* ORDER BY index),'[]'::jsonb) AS series
|
||||
FROM metric_series
|
||||
WHERE metric_series.metric_id = metrics.metric_id
|
||||
AND metric_series.deleted_at ISNULL
|
||||
) AS metric_series ON (TRUE)"""
|
||||
query = cur.mogrify(
|
||||
f"""SELECT metric_id, project_id, user_id, name, is_public, created_at, edited_at,
|
||||
metric_type, metric_of, metric_format, metric_value, view_type, is_pinned,
|
||||
dashboards, owner_email, default_config AS config, thumbnail
|
||||
FROM metrics
|
||||
{sub_join}
|
||||
LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(connected_dashboards.* ORDER BY is_public,name),'[]'::jsonb) AS dashboards
|
||||
FROM (SELECT DISTINCT dashboard_id, name, is_public
|
||||
FROM dashboards INNER JOIN dashboard_widgets USING (dashboard_id)
|
||||
WHERE deleted_at ISNULL
|
||||
AND dashboard_widgets.metric_id = metrics.metric_id
|
||||
AND project_id = %(project_id)s
|
||||
AND ((dashboards.user_id = %(user_id)s OR is_public))) AS connected_dashboards
|
||||
) AS connected_dashboards ON (TRUE)
|
||||
LEFT JOIN LATERAL (SELECT email AS owner_email
|
||||
FROM users
|
||||
WHERE deleted_at ISNULL
|
||||
AND users.user_id = metrics.user_id
|
||||
) AS owner ON (TRUE)
|
||||
WHERE {" AND ".join(constraints)}
|
||||
ORDER BY created_at {data.order.value}
|
||||
LIMIT %(limit)s OFFSET %(offset)s;""", params)
|
||||
cur.execute(query)
|
||||
rows = cur.fetchall()
|
||||
if include_series:
|
||||
for r in rows:
|
||||
for s in r["series"]:
|
||||
s["filter"] = helper.old_search_payload_to_flat(s["filter"])
|
||||
else:
|
||||
for r in rows:
|
||||
r["created_at"] = TimeUTC.datetime_to_timestamp(r["created_at"])
|
||||
r["edited_at"] = TimeUTC.datetime_to_timestamp(r["edited_at"])
|
||||
rows = helper.list_to_camel_case(rows)
|
||||
return rows
|
||||
|
||||
|
||||
def get_all(project_id, user_id):
|
||||
default_search = schemas.SearchCardsSchema()
|
||||
result = rows = search_all(project_id=project_id, user_id=user_id, data=default_search)
|
||||
while len(rows) == default_search.limit:
|
||||
default_search.page += 1
|
||||
rows = search_all(project_id=project_id, user_id=user_id, data=default_search)
|
||||
result += rows
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def delete_card(project_id, metric_id, user_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
UPDATE public.metrics
|
||||
SET deleted_at = timezone('utc'::text, now()), edited_at = timezone('utc'::text, now())
|
||||
WHERE project_id = %(project_id)s
|
||||
AND metric_id = %(metric_id)s
|
||||
AND (user_id = %(user_id)s OR is_public);""",
|
||||
{"metric_id": metric_id, "project_id": project_id, "user_id": user_id})
|
||||
)
|
||||
|
||||
return {"state": "success"}
|
||||
|
||||
|
||||
def __get_path_analysis_attributes(row):
|
||||
card_info = row.pop("cardInfo")
|
||||
row["excludes"] = card_info.get("excludes", [])
|
||||
row["startPoint"] = card_info.get("startPoint", [])
|
||||
row["startType"] = card_info.get("startType", "start")
|
||||
row["hideExcess"] = card_info.get("hideExcess", False)
|
||||
return row
|
||||
|
||||
|
||||
def get_card(metric_id, project_id, user_id, flatten: bool = True, include_data: bool = False):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
f"""SELECT metric_id, project_id, user_id, name, is_public, created_at, deleted_at, edited_at, metric_type,
|
||||
view_type, metric_of, metric_value, metric_format, is_pinned, default_config,
|
||||
default_config AS config,series, dashboards, owner_email, card_info
|
||||
{',data' if include_data else ''}
|
||||
FROM metrics
|
||||
LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(metric_series.* ORDER BY index),'[]'::jsonb) AS series
|
||||
FROM metric_series
|
||||
WHERE metric_series.metric_id = metrics.metric_id
|
||||
AND metric_series.deleted_at ISNULL
|
||||
) AS metric_series ON (TRUE)
|
||||
LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(connected_dashboards.* ORDER BY is_public,name),'[]'::jsonb) AS dashboards
|
||||
FROM (SELECT dashboard_id, name, is_public
|
||||
FROM dashboards INNER JOIN dashboard_widgets USING (dashboard_id)
|
||||
WHERE deleted_at ISNULL
|
||||
AND project_id = %(project_id)s
|
||||
AND ((dashboards.user_id = %(user_id)s OR is_public))
|
||||
AND metric_id = %(metric_id)s) AS connected_dashboards
|
||||
) AS connected_dashboards ON (TRUE)
|
||||
LEFT JOIN LATERAL (SELECT email AS owner_email
|
||||
FROM users
|
||||
WHERE deleted_at ISNULL
|
||||
AND users.user_id = metrics.user_id
|
||||
) AS owner ON (TRUE)
|
||||
WHERE metrics.project_id = %(project_id)s
|
||||
AND metrics.deleted_at ISNULL
|
||||
AND (metrics.user_id = %(user_id)s OR metrics.is_public)
|
||||
AND metrics.metric_id = %(metric_id)s
|
||||
ORDER BY created_at;""",
|
||||
{"metric_id": metric_id, "project_id": project_id, "user_id": user_id}
|
||||
)
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
if row is None:
|
||||
return None
|
||||
row["created_at"] = TimeUTC.datetime_to_timestamp(row["created_at"])
|
||||
row["edited_at"] = TimeUTC.datetime_to_timestamp(row["edited_at"])
|
||||
if flatten:
|
||||
for s in row["series"]:
|
||||
s["filter"] = helper.old_search_payload_to_flat(s["filter"])
|
||||
row = helper.dict_to_camel_case(row)
|
||||
if row["metricType"] == schemas.MetricType.pathAnalysis:
|
||||
row = __get_path_analysis_attributes(row=row)
|
||||
return row
|
||||
|
||||
|
||||
def get_series_for_alert(project_id, user_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(
|
||||
"""SELECT series_id AS value,
|
||||
metrics.name || '.' || (COALESCE(metric_series.name, 'series ' || index)) || '.count' AS name,
|
||||
'count' AS unit,
|
||||
FALSE AS predefined,
|
||||
metric_id,
|
||||
series_id
|
||||
FROM metric_series
|
||||
INNER JOIN metrics USING (metric_id)
|
||||
WHERE metrics.deleted_at ISNULL
|
||||
AND metrics.project_id = %(project_id)s
|
||||
AND metrics.metric_type = 'timeseries'
|
||||
AND (user_id = %(user_id)s OR is_public)
|
||||
ORDER BY name;""",
|
||||
{"project_id": project_id, "user_id": user_id}
|
||||
)
|
||||
)
|
||||
rows = cur.fetchall()
|
||||
return helper.list_to_camel_case(rows)
|
||||
|
||||
|
||||
def change_state(project_id, metric_id, user_id, status):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
UPDATE public.metrics
|
||||
SET active = %(status)s
|
||||
WHERE metric_id = %(metric_id)s
|
||||
AND (user_id = %(user_id)s OR is_public);""",
|
||||
{"metric_id": metric_id, "status": status, "user_id": user_id})
|
||||
)
|
||||
return get_card(metric_id=metric_id, project_id=project_id, user_id=user_id)
|
||||
|
||||
|
||||
def get_funnel_sessions_by_issue(user_id, project_id, metric_id, issue_id,
|
||||
data: schemas.CardSessionsSchema
|
||||
# , range_value=None, start_date=None, end_date=None
|
||||
):
|
||||
card: dict = get_card(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
|
||||
if card is None:
|
||||
return None
|
||||
metric: schemas.CardSchema = schemas.CardSchema(**card)
|
||||
metric: schemas.CardSchema = __merge_metric_with_data(metric=metric, data=data)
|
||||
if metric is None:
|
||||
return None
|
||||
for s in metric.series:
|
||||
s.filter.startTimestamp = data.startTimestamp
|
||||
s.filter.endTimestamp = data.endTimestamp
|
||||
s.filter.limit = data.limit
|
||||
s.filter.page = data.page
|
||||
issues_list = funnels.get_issues_on_the_fly_widget(project_id=project_id, data=s.filter).get("issues", {})
|
||||
issues_list = issues_list.get("significant", []) + issues_list.get("insignificant", [])
|
||||
issue = None
|
||||
for i in issues_list:
|
||||
if i.get("issueId", "") == issue_id:
|
||||
issue = i
|
||||
break
|
||||
if issue is None:
|
||||
issue = issues.get(project_id=project_id, issue_id=issue_id)
|
||||
if issue is not None:
|
||||
issue = {**issue,
|
||||
"affectedSessions": 0,
|
||||
"affectedUsers": 0,
|
||||
"conversionImpact": 0,
|
||||
"lostConversions": 0,
|
||||
"unaffectedSessions": 0}
|
||||
return {"seriesId": s.series_id, "seriesName": s.name,
|
||||
"sessions": sessions.search_sessions(user_id=user_id, project_id=project_id,
|
||||
issue=issue, data=s.filter)
|
||||
if issue is not None else {"total": 0, "sessions": []},
|
||||
"issue": issue}
|
||||
|
||||
|
||||
def make_chart_from_card(project_id, user_id, metric_id, data: schemas.CardSessionsSchema):
|
||||
raw_metric: dict = get_card(metric_id=metric_id, project_id=project_id, user_id=user_id, include_data=True)
|
||||
|
||||
if raw_metric is None:
|
||||
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="card not found")
|
||||
raw_metric["startTimestamp"] = data.startTimestamp
|
||||
raw_metric["endTimestamp"] = data.endTimestamp
|
||||
raw_metric["limit"] = data.limit
|
||||
raw_metric["density"] = data.density
|
||||
metric: schemas.CardSchema = schemas.CardSchema(**raw_metric)
|
||||
|
||||
if metric.is_predefined:
|
||||
return custom_metrics_predefined.get_metric(key=metric.metric_of,
|
||||
project_id=project_id,
|
||||
data=data.model_dump())
|
||||
elif metric.metric_type == schemas.MetricType.click_map:
|
||||
if raw_metric["data"]:
|
||||
keys = sessions_mobs. \
|
||||
__get_mob_keys(project_id=project_id, session_id=raw_metric["data"]["sessionId"])
|
||||
mob_exists = False
|
||||
for k in keys:
|
||||
if StorageClient.exists(bucket=config("sessions_bucket"), key=k):
|
||||
mob_exists = True
|
||||
break
|
||||
if mob_exists:
|
||||
raw_metric["data"]['domURL'] = sessions_mobs.get_urls(session_id=raw_metric["data"]["sessionId"],
|
||||
project_id=project_id)
|
||||
raw_metric["data"]['mobsUrl'] = sessions_mobs.get_urls_depercated(
|
||||
session_id=raw_metric["data"]["sessionId"])
|
||||
return raw_metric["data"]
|
||||
|
||||
return get_chart(project_id=project_id, data=metric, user_id=user_id)
|
||||
61
api/chalicelib/core/custom_metrics_predefined.py
Normal file
61
api/chalicelib/core/custom_metrics_predefined.py
Normal file
|
|
@ -0,0 +1,61 @@
|
|||
import logging
|
||||
from typing import Union
|
||||
|
||||
import logging
|
||||
from typing import Union
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import metrics
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_metric(key: Union[schemas.MetricOfWebVitals, schemas.MetricOfErrors, \
|
||||
schemas.MetricOfPerformance, schemas.MetricOfResources], project_id: int, data: dict):
|
||||
supported = {schemas.MetricOfWebVitals.count_sessions: metrics.get_processed_sessions,
|
||||
schemas.MetricOfWebVitals.avg_image_load_time: metrics.get_application_activity_avg_image_load_time,
|
||||
schemas.MetricOfWebVitals.avg_page_load_time: metrics.get_application_activity_avg_page_load_time,
|
||||
schemas.MetricOfWebVitals.avg_request_load_time: metrics.get_application_activity_avg_request_load_time,
|
||||
schemas.MetricOfWebVitals.avg_dom_content_load_start: metrics.get_page_metrics_avg_dom_content_load_start,
|
||||
schemas.MetricOfWebVitals.avg_first_contentful_pixel: metrics.get_page_metrics_avg_first_contentful_pixel,
|
||||
schemas.MetricOfWebVitals.avg_visited_pages: metrics.get_user_activity_avg_visited_pages,
|
||||
schemas.MetricOfWebVitals.avg_session_duration: metrics.get_user_activity_avg_session_duration,
|
||||
schemas.MetricOfWebVitals.avg_pages_dom_buildtime: metrics.get_pages_dom_build_time,
|
||||
schemas.MetricOfWebVitals.avg_pages_response_time: metrics.get_pages_response_time,
|
||||
schemas.MetricOfWebVitals.avg_response_time: metrics.get_top_metrics_avg_response_time,
|
||||
schemas.MetricOfWebVitals.avg_first_paint: metrics.get_top_metrics_avg_first_paint,
|
||||
schemas.MetricOfWebVitals.avg_dom_content_loaded: metrics.get_top_metrics_avg_dom_content_loaded,
|
||||
schemas.MetricOfWebVitals.avg_till_first_byte: metrics.get_top_metrics_avg_till_first_bit,
|
||||
schemas.MetricOfWebVitals.avg_time_to_interactive: metrics.get_top_metrics_avg_time_to_interactive,
|
||||
schemas.MetricOfWebVitals.count_requests: metrics.get_top_metrics_count_requests,
|
||||
schemas.MetricOfWebVitals.avg_time_to_render: metrics.get_time_to_render,
|
||||
schemas.MetricOfWebVitals.avg_used_js_heap_size: metrics.get_memory_consumption,
|
||||
schemas.MetricOfWebVitals.avg_cpu: metrics.get_avg_cpu,
|
||||
schemas.MetricOfWebVitals.avg_fps: metrics.get_avg_fps,
|
||||
schemas.MetricOfErrors.impacted_sessions_by_js_errors: metrics.get_impacted_sessions_by_js_errors,
|
||||
schemas.MetricOfErrors.domains_errors_4xx: metrics.get_domains_errors_4xx,
|
||||
schemas.MetricOfErrors.domains_errors_5xx: metrics.get_domains_errors_5xx,
|
||||
schemas.MetricOfErrors.errors_per_domains: metrics.get_errors_per_domains,
|
||||
schemas.MetricOfErrors.calls_errors: metrics.get_calls_errors,
|
||||
schemas.MetricOfErrors.errors_per_type: metrics.get_errors_per_type,
|
||||
schemas.MetricOfErrors.resources_by_party: metrics.get_resources_by_party,
|
||||
schemas.MetricOfPerformance.speed_location: metrics.get_speed_index_location,
|
||||
schemas.MetricOfPerformance.slowest_domains: metrics.get_slowest_domains,
|
||||
schemas.MetricOfPerformance.sessions_per_browser: metrics.get_sessions_per_browser,
|
||||
schemas.MetricOfPerformance.time_to_render: metrics.get_time_to_render,
|
||||
schemas.MetricOfPerformance.impacted_sessions_by_slow_pages: metrics.get_impacted_sessions_by_slow_pages,
|
||||
schemas.MetricOfPerformance.memory_consumption: metrics.get_memory_consumption,
|
||||
schemas.MetricOfPerformance.cpu: metrics.get_avg_cpu,
|
||||
schemas.MetricOfPerformance.fps: metrics.get_avg_fps,
|
||||
schemas.MetricOfPerformance.crashes: metrics.get_crashes,
|
||||
schemas.MetricOfPerformance.resources_vs_visually_complete: metrics.get_resources_vs_visually_complete,
|
||||
schemas.MetricOfPerformance.pages_dom_buildtime: metrics.get_pages_dom_build_time,
|
||||
schemas.MetricOfPerformance.pages_response_time: metrics.get_pages_response_time,
|
||||
schemas.MetricOfPerformance.pages_response_time_distribution: metrics.get_pages_response_time_distribution,
|
||||
schemas.MetricOfResources.missing_resources: metrics.get_missing_resources_trend,
|
||||
schemas.MetricOfResources.slowest_resources: metrics.get_slowest_resources,
|
||||
schemas.MetricOfResources.resources_loading_time: metrics.get_resources_loading_time,
|
||||
schemas.MetricOfResources.resource_type_vs_response_end: metrics.resource_type_vs_response_end,
|
||||
schemas.MetricOfResources.resources_count_by_type: metrics.get_resources_count_by_type, }
|
||||
|
||||
return supported.get(key, lambda *args: None)(project_id=project_id, **data)
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
import json
|
||||
|
||||
import schemas
|
||||
from chalicelib.core.metrics import custom_metrics
|
||||
from chalicelib.core import custom_metrics
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
|
@ -34,13 +34,8 @@ def create_dashboard(project_id, user_id, data: schemas.CreateDashboardSchema):
|
|||
|
||||
def get_dashboards(project_id, user_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = f"""SELECT *, owner_email, owner_name
|
||||
pg_query = f"""SELECT *
|
||||
FROM dashboards
|
||||
LEFT JOIN LATERAL (SELECT email AS owner_email, name AS owner_name
|
||||
FROM users
|
||||
WHERE deleted_at ISNULL
|
||||
AND users.user_id = dashboards.user_id
|
||||
) AS owner ON (TRUE)
|
||||
WHERE deleted_at ISNULL
|
||||
AND project_id = %(projectId)s
|
||||
AND (user_id = %(userId)s OR is_public);"""
|
||||
|
|
@ -149,6 +144,30 @@ def update_dashboard(project_id, user_id, dashboard_id, data: schemas.EditDashbo
|
|||
return helper.dict_to_camel_case(row)
|
||||
|
||||
|
||||
def get_widget(project_id, user_id, dashboard_id, widget_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = """SELECT metrics.*, metric_series.series
|
||||
FROM dashboard_widgets
|
||||
INNER JOIN dashboards USING (dashboard_id)
|
||||
INNER JOIN metrics USING (metric_id)
|
||||
LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(metric_series.* ORDER BY index), '[]'::jsonb) AS series
|
||||
FROM metric_series
|
||||
WHERE metric_series.metric_id = metrics.metric_id
|
||||
AND metric_series.deleted_at ISNULL
|
||||
) AS metric_series ON (TRUE)
|
||||
WHERE dashboard_id = %(dashboard_id)s
|
||||
AND widget_id = %(widget_id)s
|
||||
AND (dashboards.is_public OR dashboards.user_id = %(userId)s)
|
||||
AND dashboards.deleted_at IS NULL
|
||||
AND metrics.deleted_at ISNULL
|
||||
AND (metrics.project_id = %(projectId)s OR metrics.project_id ISNULL)
|
||||
AND (metrics.is_public OR metrics.user_id = %(userId)s);"""
|
||||
params = {"userId": user_id, "projectId": project_id, "dashboard_id": dashboard_id, "widget_id": widget_id}
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
row = cur.fetchone()
|
||||
return helper.dict_to_camel_case(row)
|
||||
|
||||
|
||||
def add_widget(project_id, user_id, dashboard_id, data: schemas.AddWidgetToDashboardPayloadSchema):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = """INSERT INTO dashboard_widgets(dashboard_id, metric_id, user_id, config)
|
||||
|
|
@ -204,9 +223,9 @@ def pin_dashboard(project_id, user_id, dashboard_id):
|
|||
return helper.dict_to_camel_case(row)
|
||||
|
||||
|
||||
def create_metric_add_widget(project: schemas.ProjectContext, user_id, dashboard_id, data: schemas.CardSchema):
|
||||
metric_id = custom_metrics.create_card(project=project, user_id=user_id, data=data, dashboard=True)
|
||||
return add_widget(project_id=project.project_id, user_id=user_id, dashboard_id=dashboard_id,
|
||||
def create_metric_add_widget(project_id, user_id, dashboard_id, data: schemas.CardSchema):
|
||||
metric_id = custom_metrics.create_card(project_id=project_id, user_id=user_id, data=data, dashboard=True)
|
||||
return add_widget(project_id=project_id, user_id=user_id, dashboard_id=dashboard_id,
|
||||
data=schemas.AddWidgetToDashboardPayloadSchema(metricId=metric_id))
|
||||
|
||||
# def make_chart_widget(dashboard_id, project_id, user_id, widget_id, data: schemas.CardChartSchema):
|
||||
|
|
@ -1,8 +1,5 @@
|
|||
import logging
|
||||
|
||||
from chalicelib.utils import pg_client
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
from chalicelib.utils import helper, pg_client
|
||||
|
||||
|
||||
class DatabaseRequestHandler:
|
||||
|
|
@ -93,7 +90,7 @@ class DatabaseRequestHandler:
|
|||
if additional_clauses:
|
||||
query += " " + additional_clauses
|
||||
|
||||
logger.debug(f"Query: {query}")
|
||||
logging.info(f"Query: {query}")
|
||||
return query
|
||||
|
||||
def execute_query(self, query, data=None):
|
||||
|
|
|
|||
711
api/chalicelib/core/errors.py
Normal file
711
api/chalicelib/core/errors.py
Normal file
|
|
@ -0,0 +1,711 @@
|
|||
import json
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import sourcemaps, sessions
|
||||
from chalicelib.utils import errors_helper
|
||||
from chalicelib.utils import pg_client, helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.utils.metrics_helper import __get_step_size
|
||||
|
||||
|
||||
def get(error_id, family=False):
|
||||
if family:
|
||||
return get_batch([error_id])
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
"SELECT * FROM events.errors AS e INNER JOIN public.errors AS re USING(error_id) WHERE error_id = %(error_id)s;",
|
||||
{"error_id": error_id})
|
||||
cur.execute(query=query)
|
||||
result = cur.fetchone()
|
||||
if result is not None:
|
||||
result["stacktrace_parsed_at"] = TimeUTC.datetime_to_timestamp(result["stacktrace_parsed_at"])
|
||||
return helper.dict_to_camel_case(result)
|
||||
|
||||
|
||||
def get_batch(error_ids):
|
||||
if len(error_ids) == 0:
|
||||
return []
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
"""
|
||||
WITH RECURSIVE error_family AS (
|
||||
SELECT *
|
||||
FROM public.errors
|
||||
WHERE error_id IN %(error_ids)s
|
||||
UNION
|
||||
SELECT child_errors.*
|
||||
FROM public.errors AS child_errors
|
||||
INNER JOIN error_family ON error_family.error_id = child_errors.parent_error_id OR error_family.parent_error_id = child_errors.error_id
|
||||
)
|
||||
SELECT *
|
||||
FROM error_family;""",
|
||||
{"error_ids": tuple(error_ids)})
|
||||
cur.execute(query=query)
|
||||
errors = cur.fetchall()
|
||||
for e in errors:
|
||||
e["stacktrace_parsed_at"] = TimeUTC.datetime_to_timestamp(e["stacktrace_parsed_at"])
|
||||
return helper.list_to_camel_case(errors)
|
||||
|
||||
|
||||
def __flatten_sort_key_count_version(data, merge_nested=False):
|
||||
if data is None:
|
||||
return []
|
||||
return sorted(
|
||||
[
|
||||
{
|
||||
"name": f'{o["name"]}@{v["version"]}',
|
||||
"count": v["count"]
|
||||
} for o in data for v in o["partition"]
|
||||
],
|
||||
key=lambda o: o["count"], reverse=True) if merge_nested else \
|
||||
[
|
||||
{
|
||||
"name": o["name"],
|
||||
"count": o["count"],
|
||||
} for o in data
|
||||
]
|
||||
|
||||
|
||||
def __process_tags(row):
|
||||
return [
|
||||
{"name": "browser", "partitions": __flatten_sort_key_count_version(data=row.get("browsers_partition"))},
|
||||
{"name": "browser.ver",
|
||||
"partitions": __flatten_sort_key_count_version(data=row.pop("browsers_partition"), merge_nested=True)},
|
||||
{"name": "OS", "partitions": __flatten_sort_key_count_version(data=row.get("os_partition"))},
|
||||
{"name": "OS.ver",
|
||||
"partitions": __flatten_sort_key_count_version(data=row.pop("os_partition"), merge_nested=True)},
|
||||
{"name": "device.family", "partitions": __flatten_sort_key_count_version(data=row.get("device_partition"))},
|
||||
{"name": "device",
|
||||
"partitions": __flatten_sort_key_count_version(data=row.pop("device_partition"), merge_nested=True)},
|
||||
{"name": "country", "partitions": row.pop("country_partition")}
|
||||
]
|
||||
|
||||
|
||||
def get_details(project_id, error_id, user_id, **data):
|
||||
pg_sub_query24 = __get_basic_constraints(time_constraint=False, chart=True, step_size_name="step_size24")
|
||||
pg_sub_query24.append("error_id = %(error_id)s")
|
||||
pg_sub_query30_session = __get_basic_constraints(time_constraint=True, chart=False,
|
||||
startTime_arg_name="startDate30",
|
||||
endTime_arg_name="endDate30", project_key="sessions.project_id")
|
||||
pg_sub_query30_session.append("sessions.start_ts >= %(startDate30)s")
|
||||
pg_sub_query30_session.append("sessions.start_ts <= %(endDate30)s")
|
||||
pg_sub_query30_session.append("error_id = %(error_id)s")
|
||||
pg_sub_query30_err = __get_basic_constraints(time_constraint=True, chart=False, startTime_arg_name="startDate30",
|
||||
endTime_arg_name="endDate30", project_key="errors.project_id")
|
||||
pg_sub_query30_err.append("sessions.project_id = %(project_id)s")
|
||||
pg_sub_query30_err.append("sessions.start_ts >= %(startDate30)s")
|
||||
pg_sub_query30_err.append("sessions.start_ts <= %(endDate30)s")
|
||||
pg_sub_query30_err.append("error_id = %(error_id)s")
|
||||
pg_sub_query30_err.append("source ='js_exception'")
|
||||
pg_sub_query30 = __get_basic_constraints(time_constraint=False, chart=True, step_size_name="step_size30")
|
||||
pg_sub_query30.append("error_id = %(error_id)s")
|
||||
pg_basic_query = __get_basic_constraints(time_constraint=False)
|
||||
pg_basic_query.append("error_id = %(error_id)s")
|
||||
with pg_client.PostgresClient() as cur:
|
||||
data["startDate24"] = TimeUTC.now(-1)
|
||||
data["endDate24"] = TimeUTC.now()
|
||||
data["startDate30"] = TimeUTC.now(-30)
|
||||
data["endDate30"] = TimeUTC.now()
|
||||
density24 = int(data.get("density24", 24))
|
||||
step_size24 = __get_step_size(data["startDate24"], data["endDate24"], density24, factor=1)
|
||||
density30 = int(data.get("density30", 30))
|
||||
step_size30 = __get_step_size(data["startDate30"], data["endDate30"], density30, factor=1)
|
||||
params = {
|
||||
"startDate24": data['startDate24'],
|
||||
"endDate24": data['endDate24'],
|
||||
"startDate30": data['startDate30'],
|
||||
"endDate30": data['endDate30'],
|
||||
"project_id": project_id,
|
||||
"userId": user_id,
|
||||
"step_size24": step_size24,
|
||||
"step_size30": step_size30,
|
||||
"error_id": error_id}
|
||||
|
||||
main_pg_query = f"""\
|
||||
SELECT error_id,
|
||||
name,
|
||||
message,
|
||||
users,
|
||||
sessions,
|
||||
last_occurrence,
|
||||
first_occurrence,
|
||||
last_session_id,
|
||||
browsers_partition,
|
||||
os_partition,
|
||||
device_partition,
|
||||
country_partition,
|
||||
chart24,
|
||||
chart30,
|
||||
custom_tags
|
||||
FROM (SELECT error_id,
|
||||
name,
|
||||
message,
|
||||
COUNT(DISTINCT user_id) AS users,
|
||||
COUNT(DISTINCT session_id) AS sessions
|
||||
FROM public.errors
|
||||
INNER JOIN events.errors AS s_errors USING (error_id)
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_err)}
|
||||
GROUP BY error_id, name, message) AS details
|
||||
INNER JOIN (SELECT MAX(timestamp) AS last_occurrence,
|
||||
MIN(timestamp) AS first_occurrence
|
||||
FROM events.errors
|
||||
WHERE error_id = %(error_id)s
|
||||
GROUP BY error_id) AS time_details ON (TRUE)
|
||||
INNER JOIN (SELECT session_id AS last_session_id,
|
||||
coalesce(custom_tags, '[]')::jsonb AS custom_tags
|
||||
FROM events.errors
|
||||
LEFT JOIN LATERAL (
|
||||
SELECT jsonb_agg(jsonb_build_object(errors_tags.key, errors_tags.value)) AS custom_tags
|
||||
FROM errors_tags
|
||||
WHERE errors_tags.error_id = %(error_id)s
|
||||
AND errors_tags.session_id = errors.session_id
|
||||
AND errors_tags.message_id = errors.message_id) AS errors_tags ON (TRUE)
|
||||
WHERE error_id = %(error_id)s
|
||||
ORDER BY errors.timestamp DESC
|
||||
LIMIT 1) AS last_session_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(browser_details) AS browsers_partition
|
||||
FROM (SELECT *
|
||||
FROM (SELECT user_browser AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors
|
||||
INNER JOIN sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
GROUP BY user_browser
|
||||
ORDER BY count DESC) AS count_per_browser_query
|
||||
INNER JOIN LATERAL (SELECT JSONB_AGG(version_details) AS partition
|
||||
FROM (SELECT user_browser_version AS version,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
AND sessions.user_browser = count_per_browser_query.name
|
||||
GROUP BY user_browser_version
|
||||
ORDER BY count DESC) AS version_details
|
||||
) AS browser_version_details ON (TRUE)) AS browser_details) AS browser_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(os_details) AS os_partition
|
||||
FROM (SELECT *
|
||||
FROM (SELECT user_os AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
GROUP BY user_os
|
||||
ORDER BY count DESC) AS count_per_os_details
|
||||
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_version_details) AS partition
|
||||
FROM (SELECT COALESCE(user_os_version,'unknown') AS version, COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
AND sessions.user_os = count_per_os_details.name
|
||||
GROUP BY user_os_version
|
||||
ORDER BY count DESC) AS count_per_version_details
|
||||
GROUP BY count_per_os_details.name ) AS os_version_details
|
||||
ON (TRUE)) AS os_details) AS os_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(device_details) AS device_partition
|
||||
FROM (SELECT *
|
||||
FROM (SELECT user_device_type AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
GROUP BY user_device_type
|
||||
ORDER BY count DESC) AS count_per_device_details
|
||||
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_device_v_details) AS partition
|
||||
FROM (SELECT CASE
|
||||
WHEN user_device = '' OR user_device ISNULL
|
||||
THEN 'unknown'
|
||||
ELSE user_device END AS version,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
AND sessions.user_device_type = count_per_device_details.name
|
||||
GROUP BY user_device
|
||||
ORDER BY count DESC) AS count_per_device_v_details
|
||||
GROUP BY count_per_device_details.name ) AS device_version_details
|
||||
ON (TRUE)) AS device_details) AS device_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(count_per_country_details) AS country_partition
|
||||
FROM (SELECT user_country AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
GROUP BY user_country
|
||||
ORDER BY count DESC) AS count_per_country_details) AS country_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart24
|
||||
FROM (SELECT generated_timestamp AS timestamp,
|
||||
COUNT(session_id) AS count
|
||||
FROM generate_series(%(startDate24)s, %(endDate24)s, %(step_size24)s) AS generated_timestamp
|
||||
LEFT JOIN LATERAL (SELECT DISTINCT session_id
|
||||
FROM events.errors
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query24)}
|
||||
) AS chart_details ON (TRUE)
|
||||
GROUP BY generated_timestamp
|
||||
ORDER BY generated_timestamp) AS chart_details) AS chart_details24 ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart30
|
||||
FROM (SELECT generated_timestamp AS timestamp,
|
||||
COUNT(session_id) AS count
|
||||
FROM generate_series(%(startDate30)s, %(endDate30)s, %(step_size30)s) AS generated_timestamp
|
||||
LEFT JOIN LATERAL (SELECT DISTINCT session_id
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30)}) AS chart_details
|
||||
ON (TRUE)
|
||||
GROUP BY timestamp
|
||||
ORDER BY timestamp) AS chart_details) AS chart_details30 ON (TRUE);
|
||||
"""
|
||||
|
||||
# print("--------------------")
|
||||
# print(cur.mogrify(main_pg_query, params))
|
||||
# print("--------------------")
|
||||
cur.execute(cur.mogrify(main_pg_query, params))
|
||||
row = cur.fetchone()
|
||||
if row is None:
|
||||
return {"errors": ["error not found"]}
|
||||
row["tags"] = __process_tags(row)
|
||||
|
||||
query = cur.mogrify(
|
||||
f"""SELECT error_id, status, session_id, start_ts,
|
||||
parent_error_id,session_id, user_anonymous_id,
|
||||
user_id, user_uuid, user_browser, user_browser_version,
|
||||
user_os, user_os_version, user_device, payload,
|
||||
FALSE AS favorite,
|
||||
True AS viewed
|
||||
FROM public.errors AS pe
|
||||
INNER JOIN events.errors AS ee USING (error_id)
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
WHERE pe.project_id = %(project_id)s
|
||||
AND error_id = %(error_id)s
|
||||
ORDER BY start_ts DESC
|
||||
LIMIT 1;""",
|
||||
{"project_id": project_id, "error_id": error_id, "user_id": user_id})
|
||||
cur.execute(query=query)
|
||||
status = cur.fetchone()
|
||||
|
||||
if status is not None:
|
||||
row["stack"] = errors_helper.format_first_stack_frame(status).pop("stack")
|
||||
row["status"] = status.pop("status")
|
||||
row["parent_error_id"] = status.pop("parent_error_id")
|
||||
row["favorite"] = status.pop("favorite")
|
||||
row["viewed"] = status.pop("viewed")
|
||||
row["last_hydrated_session"] = status
|
||||
else:
|
||||
row["stack"] = []
|
||||
row["last_hydrated_session"] = None
|
||||
row["status"] = "untracked"
|
||||
row["parent_error_id"] = None
|
||||
row["favorite"] = False
|
||||
row["viewed"] = False
|
||||
return {"data": helper.dict_to_camel_case(row)}
|
||||
|
||||
|
||||
def get_details_chart(project_id, error_id, user_id, **data):
|
||||
pg_sub_query = __get_basic_constraints()
|
||||
pg_sub_query.append("error_id = %(error_id)s")
|
||||
pg_sub_query_chart = __get_basic_constraints(time_constraint=False, chart=True)
|
||||
pg_sub_query_chart.append("error_id = %(error_id)s")
|
||||
with pg_client.PostgresClient() as cur:
|
||||
if data.get("startDate") is None:
|
||||
data["startDate"] = TimeUTC.now(-7)
|
||||
else:
|
||||
data["startDate"] = int(data["startDate"])
|
||||
if data.get("endDate") is None:
|
||||
data["endDate"] = TimeUTC.now()
|
||||
else:
|
||||
data["endDate"] = int(data["endDate"])
|
||||
density = int(data.get("density", 7))
|
||||
step_size = __get_step_size(data["startDate"], data["endDate"], density, factor=1)
|
||||
params = {
|
||||
"startDate": data['startDate'],
|
||||
"endDate": data['endDate'],
|
||||
"project_id": project_id,
|
||||
"userId": user_id,
|
||||
"step_size": step_size,
|
||||
"error_id": error_id}
|
||||
|
||||
main_pg_query = f"""\
|
||||
SELECT %(error_id)s AS error_id,
|
||||
browsers_partition,
|
||||
os_partition,
|
||||
device_partition,
|
||||
country_partition,
|
||||
chart
|
||||
FROM (SELECT jsonb_agg(browser_details) AS browsers_partition
|
||||
FROM (SELECT *
|
||||
FROM (SELECT user_browser AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
GROUP BY user_browser
|
||||
ORDER BY count DESC) AS count_per_browser_query
|
||||
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_version_details) AS partition
|
||||
FROM (SELECT user_browser_version AS version,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
AND user_browser = count_per_browser_query.name
|
||||
GROUP BY user_browser_version
|
||||
ORDER BY count DESC) AS count_per_version_details) AS browesr_version_details
|
||||
ON (TRUE)) AS browser_details) AS browser_details
|
||||
INNER JOIN (SELECT jsonb_agg(os_details) AS os_partition
|
||||
FROM (SELECT *
|
||||
FROM (SELECT user_os AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
GROUP BY user_os
|
||||
ORDER BY count DESC) AS count_per_os_details
|
||||
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_version_query) AS partition
|
||||
FROM (SELECT COALESCE(user_os_version, 'unknown') AS version,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
AND user_os = count_per_os_details.name
|
||||
GROUP BY user_os_version
|
||||
ORDER BY count DESC) AS count_per_version_query
|
||||
) AS os_version_query ON (TRUE)) AS os_details) AS os_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(device_details) AS device_partition
|
||||
FROM (SELECT *
|
||||
FROM (SELECT user_device_type AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
GROUP BY user_device_type
|
||||
ORDER BY count DESC) AS count_per_device_details
|
||||
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_device_details) AS partition
|
||||
FROM (SELECT CASE
|
||||
WHEN user_device = '' OR user_device ISNULL
|
||||
THEN 'unknown'
|
||||
ELSE user_device END AS version,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
AND user_device_type = count_per_device_details.name
|
||||
GROUP BY user_device_type, user_device
|
||||
ORDER BY count DESC) AS count_per_device_details
|
||||
) AS device_version_details ON (TRUE)) AS device_details) AS device_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(count_per_country_details) AS country_partition
|
||||
FROM (SELECT user_country AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
GROUP BY user_country
|
||||
ORDER BY count DESC) AS count_per_country_details) AS country_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart
|
||||
FROM (SELECT generated_timestamp AS timestamp,
|
||||
COUNT(session_id) AS count
|
||||
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp
|
||||
LEFT JOIN LATERAL (SELECT DISTINCT session_id
|
||||
FROM events.errors
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||
) AS chart_details ON (TRUE)
|
||||
GROUP BY generated_timestamp
|
||||
ORDER BY generated_timestamp) AS chart_details) AS chart_details ON (TRUE);"""
|
||||
|
||||
cur.execute(cur.mogrify(main_pg_query, params))
|
||||
row = cur.fetchone()
|
||||
if row is None:
|
||||
return {"errors": ["error not found"]}
|
||||
row["tags"] = __process_tags(row)
|
||||
return {"data": helper.dict_to_camel_case(row)}
|
||||
|
||||
|
||||
def __get_basic_constraints(platform=None, time_constraint=True, startTime_arg_name="startDate",
|
||||
endTime_arg_name="endDate", chart=False, step_size_name="step_size",
|
||||
project_key="project_id"):
|
||||
if project_key is None:
|
||||
ch_sub_query = []
|
||||
else:
|
||||
ch_sub_query = [f"{project_key} =%(project_id)s"]
|
||||
if time_constraint:
|
||||
ch_sub_query += [f"timestamp >= %({startTime_arg_name})s",
|
||||
f"timestamp < %({endTime_arg_name})s"]
|
||||
if chart:
|
||||
ch_sub_query += [f"timestamp >= generated_timestamp",
|
||||
f"timestamp < generated_timestamp + %({step_size_name})s"]
|
||||
if platform == schemas.PlatformType.mobile:
|
||||
ch_sub_query.append("user_device_type = 'mobile'")
|
||||
elif platform == schemas.PlatformType.desktop:
|
||||
ch_sub_query.append("user_device_type = 'desktop'")
|
||||
return ch_sub_query
|
||||
|
||||
|
||||
def __get_sort_key(key):
|
||||
return {
|
||||
schemas.ErrorSort.occurrence: "max_datetime",
|
||||
schemas.ErrorSort.users_count: "users",
|
||||
schemas.ErrorSort.sessions_count: "sessions"
|
||||
}.get(key, 'max_datetime')
|
||||
|
||||
|
||||
def search(data: schemas.SearchErrorsSchema, project_id, user_id):
|
||||
empty_response = {
|
||||
'total': 0,
|
||||
'errors': []
|
||||
}
|
||||
|
||||
platform = None
|
||||
for f in data.filters:
|
||||
if f.type == schemas.FilterType.platform and len(f.value) > 0:
|
||||
platform = f.value[0]
|
||||
pg_sub_query = __get_basic_constraints(platform, project_key="sessions.project_id")
|
||||
pg_sub_query += ["sessions.start_ts>=%(startDate)s", "sessions.start_ts<%(endDate)s", "source ='js_exception'",
|
||||
"pe.project_id=%(project_id)s"]
|
||||
# To ignore Script error
|
||||
pg_sub_query.append("pe.message!='Script error.'")
|
||||
pg_sub_query_chart = __get_basic_constraints(platform, time_constraint=False, chart=True, project_key=None)
|
||||
# pg_sub_query_chart.append("source ='js_exception'")
|
||||
pg_sub_query_chart.append("errors.error_id =details.error_id")
|
||||
statuses = []
|
||||
error_ids = None
|
||||
if data.startTimestamp is None:
|
||||
data.startTimestamp = TimeUTC.now(-30)
|
||||
if data.endTimestamp is None:
|
||||
data.endTimestamp = TimeUTC.now(1)
|
||||
if len(data.events) > 0 or len(data.filters) > 0:
|
||||
print("-- searching for sessions before errors")
|
||||
statuses = sessions.search_sessions(data=data, project_id=project_id, user_id=user_id, errors_only=True,
|
||||
error_status=data.status)
|
||||
if len(statuses) == 0:
|
||||
return empty_response
|
||||
error_ids = [e["errorId"] for e in statuses]
|
||||
with pg_client.PostgresClient() as cur:
|
||||
step_size = __get_step_size(data.startTimestamp, data.endTimestamp, data.density, factor=1)
|
||||
sort = __get_sort_key('datetime')
|
||||
if data.sort is not None:
|
||||
sort = __get_sort_key(data.sort)
|
||||
order = schemas.SortOrderType.desc
|
||||
if data.order is not None:
|
||||
order = data.order
|
||||
extra_join = ""
|
||||
|
||||
params = {
|
||||
"startDate": data.startTimestamp,
|
||||
"endDate": data.endTimestamp,
|
||||
"project_id": project_id,
|
||||
"userId": user_id,
|
||||
"step_size": step_size}
|
||||
if data.status != schemas.ErrorStatus.all:
|
||||
pg_sub_query.append("status = %(error_status)s")
|
||||
params["error_status"] = data.status
|
||||
if data.limit is not None and data.page is not None:
|
||||
params["errors_offset"] = (data.page - 1) * data.limit
|
||||
params["errors_limit"] = data.limit
|
||||
else:
|
||||
params["errors_offset"] = 0
|
||||
params["errors_limit"] = 200
|
||||
|
||||
if error_ids is not None:
|
||||
params["error_ids"] = tuple(error_ids)
|
||||
pg_sub_query.append("error_id IN %(error_ids)s")
|
||||
# if data.bookmarked:
|
||||
# pg_sub_query.append("ufe.user_id = %(userId)s")
|
||||
# extra_join += " INNER JOIN public.user_favorite_errors AS ufe USING (error_id)"
|
||||
if data.query is not None and len(data.query) > 0:
|
||||
pg_sub_query.append("(pe.name ILIKE %(error_query)s OR pe.message ILIKE %(error_query)s)")
|
||||
params["error_query"] = helper.values_for_operator(value=data.query,
|
||||
op=schemas.SearchEventOperator._contains)
|
||||
|
||||
main_pg_query = f"""SELECT full_count,
|
||||
error_id,
|
||||
name,
|
||||
message,
|
||||
users,
|
||||
sessions,
|
||||
last_occurrence,
|
||||
first_occurrence,
|
||||
chart
|
||||
FROM (SELECT COUNT(details) OVER () AS full_count, details.*
|
||||
FROM (SELECT error_id,
|
||||
name,
|
||||
message,
|
||||
COUNT(DISTINCT COALESCE(user_id,user_uuid::text)) AS users,
|
||||
COUNT(DISTINCT session_id) AS sessions,
|
||||
MAX(timestamp) AS max_datetime,
|
||||
MIN(timestamp) AS min_datetime
|
||||
FROM events.errors
|
||||
INNER JOIN public.errors AS pe USING (error_id)
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
{extra_join}
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
GROUP BY error_id, name, message
|
||||
ORDER BY {sort} {order}) AS details
|
||||
LIMIT %(errors_limit)s OFFSET %(errors_offset)s
|
||||
) AS details
|
||||
INNER JOIN LATERAL (SELECT MAX(timestamp) AS last_occurrence,
|
||||
MIN(timestamp) AS first_occurrence
|
||||
FROM events.errors
|
||||
WHERE errors.error_id = details.error_id) AS time_details ON (TRUE)
|
||||
INNER JOIN LATERAL (SELECT jsonb_agg(chart_details) AS chart
|
||||
FROM (SELECT generated_timestamp AS timestamp,
|
||||
COUNT(session_id) AS count
|
||||
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp
|
||||
LEFT JOIN LATERAL (SELECT DISTINCT session_id
|
||||
FROM events.errors
|
||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||
) AS sessions ON (TRUE)
|
||||
GROUP BY timestamp
|
||||
ORDER BY timestamp) AS chart_details) AS chart_details ON (TRUE);"""
|
||||
|
||||
# print("--------------------")
|
||||
# print(cur.mogrify(main_pg_query, params))
|
||||
# print("--------------------")
|
||||
|
||||
cur.execute(cur.mogrify(main_pg_query, params))
|
||||
rows = cur.fetchall()
|
||||
total = 0 if len(rows) == 0 else rows[0]["full_count"]
|
||||
|
||||
if total == 0:
|
||||
rows = []
|
||||
else:
|
||||
if len(statuses) == 0:
|
||||
query = cur.mogrify(
|
||||
"""SELECT error_id,
|
||||
COALESCE((SELECT TRUE
|
||||
FROM public.user_viewed_errors AS ve
|
||||
WHERE errors.error_id = ve.error_id
|
||||
AND ve.user_id = %(user_id)s LIMIT 1), FALSE) AS viewed
|
||||
FROM public.errors
|
||||
WHERE project_id = %(project_id)s AND error_id IN %(error_ids)s;""",
|
||||
{"project_id": project_id, "error_ids": tuple([r["error_id"] for r in rows]),
|
||||
"user_id": user_id})
|
||||
cur.execute(query=query)
|
||||
statuses = helper.list_to_camel_case(cur.fetchall())
|
||||
statuses = {
|
||||
s["errorId"]: s for s in statuses
|
||||
}
|
||||
|
||||
for r in rows:
|
||||
r.pop("full_count")
|
||||
if r["error_id"] in statuses:
|
||||
r["viewed"] = statuses[r["error_id"]]["viewed"]
|
||||
else:
|
||||
r["viewed"] = False
|
||||
|
||||
return {
|
||||
'total': total,
|
||||
'errors': helper.list_to_camel_case(rows)
|
||||
}
|
||||
|
||||
|
||||
def __save_stacktrace(error_id, data):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
"""UPDATE public.errors
|
||||
SET stacktrace=%(data)s::jsonb, stacktrace_parsed_at=timezone('utc'::text, now())
|
||||
WHERE error_id = %(error_id)s;""",
|
||||
{"error_id": error_id, "data": json.dumps(data)})
|
||||
cur.execute(query=query)
|
||||
|
||||
|
||||
def get_trace(project_id, error_id):
|
||||
error = get(error_id=error_id, family=False)
|
||||
if error is None:
|
||||
return {"errors": ["error not found"]}
|
||||
if error.get("source", "") != "js_exception":
|
||||
return {"errors": ["this source of errors doesn't have a sourcemap"]}
|
||||
if error.get("payload") is None:
|
||||
return {"errors": ["null payload"]}
|
||||
if error.get("stacktrace") is not None:
|
||||
return {"sourcemapUploaded": True,
|
||||
"trace": error.get("stacktrace"),
|
||||
"preparsed": True}
|
||||
trace, all_exists = sourcemaps.get_traces_group(project_id=project_id, payload=error["payload"])
|
||||
if all_exists:
|
||||
__save_stacktrace(error_id=error_id, data=trace)
|
||||
return {"sourcemapUploaded": all_exists,
|
||||
"trace": trace,
|
||||
"preparsed": False}
|
||||
|
||||
|
||||
def get_sessions(start_date, end_date, project_id, user_id, error_id):
|
||||
extra_constraints = ["s.project_id = %(project_id)s",
|
||||
"s.start_ts >= %(startDate)s",
|
||||
"s.start_ts <= %(endDate)s",
|
||||
"e.error_id = %(error_id)s"]
|
||||
if start_date is None:
|
||||
start_date = TimeUTC.now(-7)
|
||||
if end_date is None:
|
||||
end_date = TimeUTC.now()
|
||||
|
||||
params = {
|
||||
"startDate": start_date,
|
||||
"endDate": end_date,
|
||||
"project_id": project_id,
|
||||
"userId": user_id,
|
||||
"error_id": error_id}
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
f"""SELECT s.project_id,
|
||||
s.session_id::text AS session_id,
|
||||
s.user_uuid,
|
||||
s.user_id,
|
||||
s.user_agent,
|
||||
s.user_os,
|
||||
s.user_browser,
|
||||
s.user_device,
|
||||
s.user_country,
|
||||
s.start_ts,
|
||||
s.duration,
|
||||
s.events_count,
|
||||
s.pages_count,
|
||||
s.errors_count,
|
||||
s.issue_types,
|
||||
COALESCE((SELECT TRUE
|
||||
FROM public.user_favorite_sessions AS fs
|
||||
WHERE s.session_id = fs.session_id
|
||||
AND fs.user_id = %(userId)s LIMIT 1), FALSE) AS favorite,
|
||||
COALESCE((SELECT TRUE
|
||||
FROM public.user_viewed_sessions AS fs
|
||||
WHERE s.session_id = fs.session_id
|
||||
AND fs.user_id = %(userId)s LIMIT 1), FALSE) AS viewed
|
||||
FROM public.sessions AS s INNER JOIN events.errors AS e USING (session_id)
|
||||
WHERE {" AND ".join(extra_constraints)}
|
||||
ORDER BY s.start_ts DESC;""",
|
||||
params)
|
||||
cur.execute(query=query)
|
||||
sessions_list = []
|
||||
total = cur.rowcount
|
||||
row = cur.fetchone()
|
||||
while row is not None and len(sessions_list) < 100:
|
||||
sessions_list.append(row)
|
||||
row = cur.fetchone()
|
||||
|
||||
return {
|
||||
'total': total,
|
||||
'sessions': helper.list_to_camel_case(sessions_list)
|
||||
}
|
||||
|
||||
|
||||
ACTION_STATE = {
|
||||
"unsolve": 'unresolved',
|
||||
"solve": 'resolved',
|
||||
"ignore": 'ignored'
|
||||
}
|
||||
|
||||
|
||||
def change_state(project_id, user_id, error_id, action):
|
||||
errors = get(error_id, family=True)
|
||||
print(len(errors))
|
||||
status = ACTION_STATE.get(action)
|
||||
if errors is None or len(errors) == 0:
|
||||
return {"errors": ["error not found"]}
|
||||
if errors[0]["status"] == status:
|
||||
return {"errors": [f"error is already {status}"]}
|
||||
|
||||
if errors[0]["status"] == ACTION_STATE["solve"] and status == ACTION_STATE["ignore"]:
|
||||
return {"errors": [f"state transition not permitted {errors[0]['status']} -> {status}"]}
|
||||
|
||||
params = {
|
||||
"userId": user_id,
|
||||
"error_ids": tuple([e["errorId"] for e in errors]),
|
||||
"status": status}
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
"""UPDATE public.errors
|
||||
SET status = %(status)s
|
||||
WHERE error_id IN %(error_ids)s
|
||||
RETURNING status""",
|
||||
params)
|
||||
cur.execute(query=query)
|
||||
row = cur.fetchone()
|
||||
if row is not None:
|
||||
for e in errors:
|
||||
e["status"] = row["status"]
|
||||
return {"data": errors}
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
import logging
|
||||
|
||||
from decouple import config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
from . import errors_pg as errors_legacy
|
||||
|
||||
if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
|
||||
logger.info(">>> Using experimental error search")
|
||||
from . import errors_ch as errors
|
||||
else:
|
||||
from . import errors_pg as errors
|
||||
|
|
@ -1,409 +0,0 @@
|
|||
import schemas
|
||||
from chalicelib.core import metadata
|
||||
from chalicelib.core.errors import errors_legacy
|
||||
from chalicelib.core.errors.modules import errors_helper
|
||||
from chalicelib.core.errors.modules import sessions
|
||||
from chalicelib.utils import ch_client, exp_ch_helper
|
||||
from chalicelib.utils import helper, metrics_helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
|
||||
def _multiple_values(values, value_key="value"):
|
||||
query_values = {}
|
||||
if values is not None and isinstance(values, list):
|
||||
for i in range(len(values)):
|
||||
k = f"{value_key}_{i}"
|
||||
query_values[k] = values[i]
|
||||
return query_values
|
||||
|
||||
|
||||
def __get_sql_operator(op: schemas.SearchEventOperator):
|
||||
return {
|
||||
schemas.SearchEventOperator.IS: "=",
|
||||
schemas.SearchEventOperator.IS_ANY: "IN",
|
||||
schemas.SearchEventOperator.ON: "=",
|
||||
schemas.SearchEventOperator.ON_ANY: "IN",
|
||||
schemas.SearchEventOperator.IS_NOT: "!=",
|
||||
schemas.SearchEventOperator.NOT_ON: "!=",
|
||||
schemas.SearchEventOperator.CONTAINS: "ILIKE",
|
||||
schemas.SearchEventOperator.NOT_CONTAINS: "NOT ILIKE",
|
||||
schemas.SearchEventOperator.STARTS_WITH: "ILIKE",
|
||||
schemas.SearchEventOperator.ENDS_WITH: "ILIKE",
|
||||
}.get(op, "=")
|
||||
|
||||
|
||||
def _isAny_opreator(op: schemas.SearchEventOperator):
|
||||
return op in [schemas.SearchEventOperator.ON_ANY, schemas.SearchEventOperator.IS_ANY]
|
||||
|
||||
|
||||
def _isUndefined_operator(op: schemas.SearchEventOperator):
|
||||
return op in [schemas.SearchEventOperator.IS_UNDEFINED]
|
||||
|
||||
|
||||
def __is_negation_operator(op: schemas.SearchEventOperator):
|
||||
return op in [schemas.SearchEventOperator.IS_NOT,
|
||||
schemas.SearchEventOperator.NOT_ON,
|
||||
schemas.SearchEventOperator.NOT_CONTAINS]
|
||||
|
||||
|
||||
def _multiple_conditions(condition, values, value_key="value", is_not=False):
|
||||
query = []
|
||||
for i in range(len(values)):
|
||||
k = f"{value_key}_{i}"
|
||||
query.append(condition.replace(value_key, k))
|
||||
return "(" + (" AND " if is_not else " OR ").join(query) + ")"
|
||||
|
||||
|
||||
def get(error_id, family=False):
|
||||
return errors_legacy.get(error_id=error_id, family=family)
|
||||
|
||||
|
||||
def get_batch(error_ids):
|
||||
return errors_legacy.get_batch(error_ids=error_ids)
|
||||
|
||||
|
||||
def __get_basic_constraints_events(platform=None, time_constraint=True, startTime_arg_name="startDate",
|
||||
endTime_arg_name="endDate", type_condition=True, project_key="project_id",
|
||||
table_name=None):
|
||||
ch_sub_query = [f"{project_key} =toUInt16(%(project_id)s)"]
|
||||
if table_name is not None:
|
||||
table_name = table_name + "."
|
||||
else:
|
||||
table_name = ""
|
||||
if type_condition:
|
||||
ch_sub_query.append(f"{table_name}`$event_name`='ERROR'")
|
||||
if time_constraint:
|
||||
ch_sub_query += [f"{table_name}created_at >= toDateTime(%({startTime_arg_name})s/1000)",
|
||||
f"{table_name}created_at < toDateTime(%({endTime_arg_name})s/1000)"]
|
||||
# if platform == schemas.PlatformType.MOBILE:
|
||||
# ch_sub_query.append("user_device_type = 'mobile'")
|
||||
# elif platform == schemas.PlatformType.DESKTOP:
|
||||
# ch_sub_query.append("user_device_type = 'desktop'")
|
||||
return ch_sub_query
|
||||
|
||||
|
||||
def __get_sort_key(key):
|
||||
return {
|
||||
schemas.ErrorSort.OCCURRENCE: "max_datetime",
|
||||
schemas.ErrorSort.USERS_COUNT: "users",
|
||||
schemas.ErrorSort.SESSIONS_COUNT: "sessions"
|
||||
}.get(key, 'max_datetime')
|
||||
|
||||
|
||||
def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, user_id):
|
||||
MAIN_EVENTS_TABLE = exp_ch_helper.get_main_events_table(data.startTimestamp)
|
||||
MAIN_SESSIONS_TABLE = exp_ch_helper.get_main_sessions_table(data.startTimestamp)
|
||||
|
||||
platform = None
|
||||
for f in data.filters:
|
||||
if f.type == schemas.FilterType.PLATFORM and len(f.value) > 0:
|
||||
platform = f.value[0]
|
||||
ch_sessions_sub_query = errors_helper.__get_basic_constraints_ch(platform, type_condition=False)
|
||||
# ignore platform for errors table
|
||||
ch_sub_query = __get_basic_constraints_events(None, type_condition=True)
|
||||
ch_sub_query.append("JSONExtractString(toString(`$properties`), 'source') = 'js_exception'")
|
||||
|
||||
# To ignore Script error
|
||||
ch_sub_query.append("JSONExtractString(toString(`$properties`), 'message') != 'Script error.'")
|
||||
error_ids = None
|
||||
|
||||
if data.startTimestamp is None:
|
||||
data.startTimestamp = TimeUTC.now(-7)
|
||||
if data.endTimestamp is None:
|
||||
data.endTimestamp = TimeUTC.now(1)
|
||||
|
||||
subquery_part = ""
|
||||
params = {}
|
||||
if len(data.events) > 0:
|
||||
errors_condition_count = 0
|
||||
for i, e in enumerate(data.events):
|
||||
if e.type == schemas.EventType.ERROR:
|
||||
errors_condition_count += 1
|
||||
is_any = _isAny_opreator(e.operator)
|
||||
op = __get_sql_operator(e.operator)
|
||||
e_k = f"e_value{i}"
|
||||
params = {**params, **_multiple_values(e.value, value_key=e_k)}
|
||||
if not is_any and len(e.value) > 0 and e.value[1] not in [None, "*", ""]:
|
||||
ch_sub_query.append(
|
||||
_multiple_conditions(f"(message {op} %({e_k})s OR name {op} %({e_k})s)",
|
||||
e.value, value_key=e_k))
|
||||
if len(data.events) > errors_condition_count:
|
||||
subquery_part_args, subquery_part = sessions.search_query_parts_ch(data=data, error_status=data.status,
|
||||
errors_only=True,
|
||||
project_id=project.project_id,
|
||||
user_id=user_id,
|
||||
issue=None,
|
||||
favorite_only=False)
|
||||
subquery_part = f"INNER JOIN {subquery_part} USING(session_id)"
|
||||
params = {**params, **subquery_part_args}
|
||||
if len(data.filters) > 0:
|
||||
meta_keys = None
|
||||
# to reduce include a sub-query of sessions inside events query, in order to reduce the selected data
|
||||
for i, f in enumerate(data.filters):
|
||||
if not isinstance(f.value, list):
|
||||
f.value = [f.value]
|
||||
filter_type = f.type
|
||||
f.value = helper.values_for_operator(value=f.value, op=f.operator)
|
||||
f_k = f"f_value{i}"
|
||||
params = {**params, f_k: f.value, **_multiple_values(f.value, value_key=f_k)}
|
||||
op = __get_sql_operator(f.operator) \
|
||||
if filter_type not in [schemas.FilterType.EVENTS_COUNT] else f.operator
|
||||
is_any = _isAny_opreator(f.operator)
|
||||
is_undefined = _isUndefined_operator(f.operator)
|
||||
if not is_any and not is_undefined and len(f.value) == 0:
|
||||
continue
|
||||
is_not = False
|
||||
if __is_negation_operator(f.operator):
|
||||
is_not = True
|
||||
if filter_type == schemas.FilterType.USER_BROWSER:
|
||||
if is_any:
|
||||
ch_sessions_sub_query.append('isNotNull(s.user_browser)')
|
||||
else:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f's.user_browser {op} %({f_k})s', f.value, is_not=is_not,
|
||||
value_key=f_k))
|
||||
|
||||
elif filter_type in [schemas.FilterType.USER_OS, schemas.FilterType.USER_OS_MOBILE]:
|
||||
if is_any:
|
||||
ch_sessions_sub_query.append('isNotNull(s.user_os)')
|
||||
else:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f's.user_os {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
|
||||
|
||||
elif filter_type in [schemas.FilterType.USER_DEVICE, schemas.FilterType.USER_DEVICE_MOBILE]:
|
||||
if is_any:
|
||||
ch_sessions_sub_query.append('isNotNull(s.user_device)')
|
||||
else:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f's.user_device {op} %({f_k})s', f.value, is_not=is_not,
|
||||
value_key=f_k))
|
||||
|
||||
elif filter_type in [schemas.FilterType.USER_COUNTRY, schemas.FilterType.USER_COUNTRY_MOBILE]:
|
||||
if is_any:
|
||||
ch_sessions_sub_query.append('isNotNull(s.user_country)')
|
||||
else:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f's.user_country {op} %({f_k})s', f.value, is_not=is_not,
|
||||
value_key=f_k))
|
||||
|
||||
elif filter_type in [schemas.FilterType.UTM_SOURCE]:
|
||||
if is_any:
|
||||
ch_sessions_sub_query.append('isNotNull(s.utm_source)')
|
||||
elif is_undefined:
|
||||
ch_sessions_sub_query.append('isNull(s.utm_source)')
|
||||
else:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f's.utm_source {op} toString(%({f_k})s)', f.value, is_not=is_not,
|
||||
value_key=f_k))
|
||||
|
||||
elif filter_type in [schemas.FilterType.UTM_MEDIUM]:
|
||||
if is_any:
|
||||
ch_sessions_sub_query.append('isNotNull(s.utm_medium)')
|
||||
elif is_undefined:
|
||||
ch_sessions_sub_query.append('isNull(s.utm_medium)')
|
||||
else:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f's.utm_medium {op} toString(%({f_k})s)', f.value, is_not=is_not,
|
||||
value_key=f_k))
|
||||
elif filter_type in [schemas.FilterType.UTM_CAMPAIGN]:
|
||||
if is_any:
|
||||
ch_sessions_sub_query.append('isNotNull(s.utm_campaign)')
|
||||
elif is_undefined:
|
||||
ch_sessions_sub_query.append('isNull(s.utm_campaign)')
|
||||
else:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f's.utm_campaign {op} toString(%({f_k})s)', f.value, is_not=is_not,
|
||||
value_key=f_k))
|
||||
|
||||
elif filter_type == schemas.FilterType.DURATION:
|
||||
if len(f.value) > 0 and f.value[0] is not None:
|
||||
ch_sessions_sub_query.append("s.duration >= %(minDuration)s")
|
||||
params["minDuration"] = f.value[0]
|
||||
if len(f.value) > 1 and f.value[1] is not None and int(f.value[1]) > 0:
|
||||
ch_sessions_sub_query.append("s.duration <= %(maxDuration)s")
|
||||
params["maxDuration"] = f.value[1]
|
||||
|
||||
elif filter_type == schemas.FilterType.REFERRER:
|
||||
# extra_from += f"INNER JOIN {events.EventType.LOCATION.table} AS p USING(session_id)"
|
||||
if is_any:
|
||||
referrer_constraint = 'isNotNull(s.base_referrer)'
|
||||
else:
|
||||
referrer_constraint = _multiple_conditions(f"s.base_referrer {op} %({f_k})s", f.value,
|
||||
is_not=is_not, value_key=f_k)
|
||||
elif filter_type == schemas.FilterType.METADATA:
|
||||
# get metadata list only if you need it
|
||||
if meta_keys is None:
|
||||
meta_keys = metadata.get(project_id=project.project_id)
|
||||
meta_keys = {m["key"]: m["index"] for m in meta_keys}
|
||||
if f.source in meta_keys.keys():
|
||||
if is_any:
|
||||
ch_sessions_sub_query.append(f"isNotNull(s.{metadata.index_to_colname(meta_keys[f.source])})")
|
||||
elif is_undefined:
|
||||
ch_sessions_sub_query.append(f"isNull(s.{metadata.index_to_colname(meta_keys[f.source])})")
|
||||
else:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(
|
||||
f"s.{metadata.index_to_colname(meta_keys[f.source])} {op} toString(%({f_k})s)",
|
||||
f.value, is_not=is_not, value_key=f_k))
|
||||
|
||||
elif filter_type in [schemas.FilterType.USER_ID, schemas.FilterType.USER_ID_MOBILE]:
|
||||
if is_any:
|
||||
ch_sessions_sub_query.append('isNotNull(s.user_id)')
|
||||
elif is_undefined:
|
||||
ch_sessions_sub_query.append('isNull(s.user_id)')
|
||||
else:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f"s.user_id {op} toString(%({f_k})s)", f.value, is_not=is_not,
|
||||
value_key=f_k))
|
||||
elif filter_type in [schemas.FilterType.USER_ANONYMOUS_ID,
|
||||
schemas.FilterType.USER_ANONYMOUS_ID_MOBILE]:
|
||||
if is_any:
|
||||
ch_sessions_sub_query.append('isNotNull(s.user_anonymous_id)')
|
||||
elif is_undefined:
|
||||
ch_sessions_sub_query.append('isNull(s.user_anonymous_id)')
|
||||
else:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f"s.user_anonymous_id {op} toString(%({f_k})s)", f.value,
|
||||
is_not=is_not,
|
||||
value_key=f_k))
|
||||
|
||||
elif filter_type in [schemas.FilterType.REV_ID, schemas.FilterType.REV_ID_MOBILE]:
|
||||
if is_any:
|
||||
ch_sessions_sub_query.append('isNotNull(s.rev_id)')
|
||||
elif is_undefined:
|
||||
ch_sessions_sub_query.append('isNull(s.rev_id)')
|
||||
else:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f"s.rev_id {op} toString(%({f_k})s)", f.value, is_not=is_not,
|
||||
value_key=f_k))
|
||||
|
||||
elif filter_type == schemas.FilterType.PLATFORM:
|
||||
# op = __get_sql_operator(f.operator)
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f"s.user_device_type {op} %({f_k})s", f.value, is_not=is_not,
|
||||
value_key=f_k))
|
||||
# elif filter_type == schemas.FilterType.issue:
|
||||
# if is_any:
|
||||
# ch_sessions_sub_query.append("notEmpty(s.issue_types)")
|
||||
# else:
|
||||
# ch_sessions_sub_query.append(f"hasAny(s.issue_types,%({f_k})s)")
|
||||
# # _multiple_conditions(f"%({f_k})s {op} ANY (s.issue_types)", f.value, is_not=is_not,
|
||||
# # value_key=f_k))
|
||||
#
|
||||
# if is_not:
|
||||
# extra_constraints[-1] = f"not({extra_constraints[-1]})"
|
||||
# ss_constraints[-1] = f"not({ss_constraints[-1]})"
|
||||
elif filter_type == schemas.FilterType.EVENTS_COUNT:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f"s.events_count {op} %({f_k})s", f.value, is_not=is_not,
|
||||
value_key=f_k))
|
||||
|
||||
with ch_client.ClickHouseClient() as ch:
|
||||
step_size = metrics_helper.get_step_size(data.startTimestamp, data.endTimestamp, data.density)
|
||||
sort = __get_sort_key('datetime')
|
||||
if data.sort is not None:
|
||||
sort = __get_sort_key(data.sort)
|
||||
order = "DESC"
|
||||
if data.order is not None:
|
||||
order = data.order
|
||||
params = {
|
||||
**params,
|
||||
"startDate": data.startTimestamp,
|
||||
"endDate": data.endTimestamp,
|
||||
"project_id": project.project_id,
|
||||
"userId": user_id,
|
||||
"step_size": step_size}
|
||||
if data.limit is not None and data.page is not None:
|
||||
params["errors_offset"] = (data.page - 1) * data.limit
|
||||
params["errors_limit"] = data.limit
|
||||
else:
|
||||
params["errors_offset"] = 0
|
||||
params["errors_limit"] = 200
|
||||
# if data.bookmarked:
|
||||
# cur.execute(cur.mogrify(f"""SELECT error_id
|
||||
# FROM public.user_favorite_errors
|
||||
# WHERE user_id = %(userId)s
|
||||
# {"" if error_ids is None else "AND error_id IN %(error_ids)s"}""",
|
||||
# {"userId": user_id, "error_ids": tuple(error_ids or [])}))
|
||||
# error_ids = cur.fetchall()
|
||||
# if len(error_ids) == 0:
|
||||
# return empty_response
|
||||
# error_ids = [e["error_id"] for e in error_ids]
|
||||
|
||||
if error_ids is not None:
|
||||
params["error_ids"] = tuple(error_ids)
|
||||
ch_sub_query.append("error_id IN %(error_ids)s")
|
||||
|
||||
main_ch_query = f"""\
|
||||
SELECT details.error_id as error_id,
|
||||
name, message, users, total,
|
||||
sessions, last_occurrence, first_occurrence, chart
|
||||
FROM (SELECT error_id,
|
||||
JSONExtractString(toString(`$properties`), 'name') AS name,
|
||||
JSONExtractString(toString(`$properties`), 'message') AS message,
|
||||
COUNT(DISTINCT user_id) AS users,
|
||||
COUNT(DISTINCT events.session_id) AS sessions,
|
||||
MAX(created_at) AS max_datetime,
|
||||
MIN(created_at) AS min_datetime,
|
||||
COUNT(DISTINCT error_id)
|
||||
OVER() AS total
|
||||
FROM {MAIN_EVENTS_TABLE} AS events
|
||||
INNER JOIN (SELECT session_id, coalesce(user_id,toString(user_uuid)) AS user_id
|
||||
FROM {MAIN_SESSIONS_TABLE} AS s
|
||||
{subquery_part}
|
||||
WHERE {" AND ".join(ch_sessions_sub_query)}) AS sessions
|
||||
ON (events.session_id = sessions.session_id)
|
||||
WHERE {" AND ".join(ch_sub_query)}
|
||||
GROUP BY error_id, name, message
|
||||
ORDER BY {sort} {order}
|
||||
LIMIT %(errors_limit)s OFFSET %(errors_offset)s) AS details
|
||||
INNER JOIN (SELECT error_id,
|
||||
toUnixTimestamp(MAX(created_at))*1000 AS last_occurrence,
|
||||
toUnixTimestamp(MIN(created_at))*1000 AS first_occurrence
|
||||
FROM {MAIN_EVENTS_TABLE}
|
||||
WHERE project_id=%(project_id)s
|
||||
AND `$event_name`='ERROR'
|
||||
GROUP BY error_id) AS time_details
|
||||
ON details.error_id=time_details.error_id
|
||||
INNER JOIN (SELECT error_id, groupArray([timestamp, count]) AS chart
|
||||
FROM (SELECT error_id,
|
||||
gs.generate_series AS timestamp,
|
||||
COUNT(DISTINCT session_id) AS count
|
||||
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS gs
|
||||
LEFT JOIN {MAIN_EVENTS_TABLE} ON(TRUE)
|
||||
WHERE {" AND ".join(ch_sub_query)}
|
||||
AND created_at >= toDateTime(timestamp / 1000)
|
||||
AND created_at < toDateTime((timestamp + %(step_size)s) / 1000)
|
||||
GROUP BY error_id, timestamp
|
||||
ORDER BY timestamp) AS sub_table
|
||||
GROUP BY error_id) AS chart_details ON details.error_id=chart_details.error_id;"""
|
||||
|
||||
# print("------------")
|
||||
# print(ch.format(main_ch_query, params))
|
||||
# print("------------")
|
||||
query = ch.format(query=main_ch_query, parameters=params)
|
||||
|
||||
rows = ch.execute(query=query)
|
||||
total = rows[0]["total"] if len(rows) > 0 else 0
|
||||
|
||||
for r in rows:
|
||||
r["chart"] = list(r["chart"])
|
||||
for i in range(len(r["chart"])):
|
||||
r["chart"][i] = {"timestamp": r["chart"][i][0], "count": r["chart"][i][1]}
|
||||
|
||||
return {
|
||||
'total': total,
|
||||
'errors': helper.list_to_camel_case(rows)
|
||||
}
|
||||
|
||||
|
||||
def get_trace(project_id, error_id):
|
||||
return errors_legacy.get_trace(project_id=project_id, error_id=error_id)
|
||||
|
||||
|
||||
def get_sessions(start_date, end_date, project_id, user_id, error_id):
|
||||
return errors_legacy.get_sessions(start_date=start_date,
|
||||
end_date=end_date,
|
||||
project_id=project_id,
|
||||
user_id=user_id,
|
||||
error_id=error_id)
|
||||
|
|
@ -1,248 +0,0 @@
|
|||
from chalicelib.core.errors.modules import errors_helper
|
||||
|
||||
from chalicelib.utils import pg_client, helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.utils.metrics_helper import get_step_size
|
||||
|
||||
|
||||
def __flatten_sort_key_count_version(data, merge_nested=False):
|
||||
if data is None:
|
||||
return []
|
||||
return sorted(
|
||||
[
|
||||
{
|
||||
"name": f'{o["name"]}@{v["version"]}',
|
||||
"count": v["count"]
|
||||
} for o in data for v in o["partition"]
|
||||
],
|
||||
key=lambda o: o["count"], reverse=True) if merge_nested else \
|
||||
[
|
||||
{
|
||||
"name": o["name"],
|
||||
"count": o["count"],
|
||||
} for o in data
|
||||
]
|
||||
|
||||
|
||||
def __process_tags(row):
|
||||
return [
|
||||
{"name": "browser", "partitions": __flatten_sort_key_count_version(data=row.get("browsers_partition"))},
|
||||
{"name": "browser.ver",
|
||||
"partitions": __flatten_sort_key_count_version(data=row.pop("browsers_partition"), merge_nested=True)},
|
||||
{"name": "OS", "partitions": __flatten_sort_key_count_version(data=row.get("os_partition"))},
|
||||
{"name": "OS.ver",
|
||||
"partitions": __flatten_sort_key_count_version(data=row.pop("os_partition"), merge_nested=True)},
|
||||
{"name": "device.family", "partitions": __flatten_sort_key_count_version(data=row.get("device_partition"))},
|
||||
{"name": "device",
|
||||
"partitions": __flatten_sort_key_count_version(data=row.pop("device_partition"), merge_nested=True)},
|
||||
{"name": "country", "partitions": row.pop("country_partition")}
|
||||
]
|
||||
|
||||
|
||||
def get_details(project_id, error_id, user_id, **data):
|
||||
pg_sub_query24 = errors_helper.__get_basic_constraints(time_constraint=False, chart=True,
|
||||
step_size_name="step_size24")
|
||||
pg_sub_query24.append("error_id = %(error_id)s")
|
||||
pg_sub_query30_session = errors_helper.__get_basic_constraints(time_constraint=True, chart=False,
|
||||
startTime_arg_name="startDate30",
|
||||
endTime_arg_name="endDate30",
|
||||
project_key="sessions.project_id")
|
||||
pg_sub_query30_session.append("sessions.start_ts >= %(startDate30)s")
|
||||
pg_sub_query30_session.append("sessions.start_ts <= %(endDate30)s")
|
||||
pg_sub_query30_session.append("error_id = %(error_id)s")
|
||||
pg_sub_query30_err = errors_helper.__get_basic_constraints(time_constraint=True, chart=False,
|
||||
startTime_arg_name="startDate30",
|
||||
endTime_arg_name="endDate30",
|
||||
project_key="errors.project_id")
|
||||
pg_sub_query30_err.append("sessions.project_id = %(project_id)s")
|
||||
pg_sub_query30_err.append("sessions.start_ts >= %(startDate30)s")
|
||||
pg_sub_query30_err.append("sessions.start_ts <= %(endDate30)s")
|
||||
pg_sub_query30_err.append("error_id = %(error_id)s")
|
||||
pg_sub_query30_err.append("source ='js_exception'")
|
||||
pg_sub_query30 = errors_helper.__get_basic_constraints(time_constraint=False, chart=True,
|
||||
step_size_name="step_size30")
|
||||
pg_sub_query30.append("error_id = %(error_id)s")
|
||||
pg_basic_query = errors_helper.__get_basic_constraints(time_constraint=False)
|
||||
pg_basic_query.append("error_id = %(error_id)s")
|
||||
with pg_client.PostgresClient() as cur:
|
||||
data["startDate24"] = TimeUTC.now(-1)
|
||||
data["endDate24"] = TimeUTC.now()
|
||||
data["startDate30"] = TimeUTC.now(-30)
|
||||
data["endDate30"] = TimeUTC.now()
|
||||
density24 = int(data.get("density24", 24))
|
||||
step_size24 = get_step_size(data["startDate24"], data["endDate24"], density24, factor=1)
|
||||
density30 = int(data.get("density30", 30))
|
||||
step_size30 = get_step_size(data["startDate30"], data["endDate30"], density30, factor=1)
|
||||
params = {
|
||||
"startDate24": data['startDate24'],
|
||||
"endDate24": data['endDate24'],
|
||||
"startDate30": data['startDate30'],
|
||||
"endDate30": data['endDate30'],
|
||||
"project_id": project_id,
|
||||
"userId": user_id,
|
||||
"step_size24": step_size24,
|
||||
"step_size30": step_size30,
|
||||
"error_id": error_id}
|
||||
|
||||
main_pg_query = f"""\
|
||||
SELECT error_id,
|
||||
name,
|
||||
message,
|
||||
users,
|
||||
sessions,
|
||||
last_occurrence,
|
||||
first_occurrence,
|
||||
last_session_id,
|
||||
browsers_partition,
|
||||
os_partition,
|
||||
device_partition,
|
||||
country_partition,
|
||||
chart24,
|
||||
chart30
|
||||
FROM (SELECT error_id,
|
||||
name,
|
||||
message,
|
||||
COUNT(DISTINCT user_id) AS users,
|
||||
COUNT(DISTINCT session_id) AS sessions
|
||||
FROM public.errors
|
||||
INNER JOIN events.errors AS s_errors USING (error_id)
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_err)}
|
||||
GROUP BY error_id, name, message) AS details
|
||||
INNER JOIN (SELECT MAX(timestamp) AS last_occurrence,
|
||||
MIN(timestamp) AS first_occurrence
|
||||
FROM events.errors
|
||||
WHERE error_id = %(error_id)s) AS time_details ON (TRUE)
|
||||
INNER JOIN (SELECT session_id AS last_session_id
|
||||
FROM events.errors
|
||||
WHERE error_id = %(error_id)s
|
||||
ORDER BY errors.timestamp DESC
|
||||
LIMIT 1) AS last_session_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(browser_details) AS browsers_partition
|
||||
FROM (SELECT *
|
||||
FROM (SELECT user_browser AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors
|
||||
INNER JOIN sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
GROUP BY user_browser
|
||||
ORDER BY count DESC) AS count_per_browser_query
|
||||
INNER JOIN LATERAL (SELECT JSONB_AGG(version_details) AS partition
|
||||
FROM (SELECT user_browser_version AS version,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
AND sessions.user_browser = count_per_browser_query.name
|
||||
GROUP BY user_browser_version
|
||||
ORDER BY count DESC) AS version_details
|
||||
) AS browser_version_details ON (TRUE)) AS browser_details) AS browser_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(os_details) AS os_partition
|
||||
FROM (SELECT *
|
||||
FROM (SELECT user_os AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
GROUP BY user_os
|
||||
ORDER BY count DESC) AS count_per_os_details
|
||||
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_version_details) AS partition
|
||||
FROM (SELECT COALESCE(user_os_version,'unknown') AS version, COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
AND sessions.user_os = count_per_os_details.name
|
||||
GROUP BY user_os_version
|
||||
ORDER BY count DESC) AS count_per_version_details
|
||||
GROUP BY count_per_os_details.name ) AS os_version_details
|
||||
ON (TRUE)) AS os_details) AS os_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(device_details) AS device_partition
|
||||
FROM (SELECT *
|
||||
FROM (SELECT user_device_type AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
GROUP BY user_device_type
|
||||
ORDER BY count DESC) AS count_per_device_details
|
||||
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_device_v_details) AS partition
|
||||
FROM (SELECT CASE
|
||||
WHEN user_device = '' OR user_device ISNULL
|
||||
THEN 'unknown'
|
||||
ELSE user_device END AS version,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
AND sessions.user_device_type = count_per_device_details.name
|
||||
GROUP BY user_device
|
||||
ORDER BY count DESC) AS count_per_device_v_details
|
||||
GROUP BY count_per_device_details.name ) AS device_version_details
|
||||
ON (TRUE)) AS device_details) AS device_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(count_per_country_details) AS country_partition
|
||||
FROM (SELECT user_country AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
GROUP BY user_country
|
||||
ORDER BY count DESC) AS count_per_country_details) AS country_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart24
|
||||
FROM (SELECT generated_timestamp AS timestamp,
|
||||
COUNT(session_id) AS count
|
||||
FROM generate_series(%(startDate24)s, %(endDate24)s, %(step_size24)s) AS generated_timestamp
|
||||
LEFT JOIN LATERAL (SELECT DISTINCT session_id
|
||||
FROM events.errors
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query24)}
|
||||
) AS chart_details ON (TRUE)
|
||||
GROUP BY generated_timestamp
|
||||
ORDER BY generated_timestamp) AS chart_details) AS chart_details24 ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart30
|
||||
FROM (SELECT generated_timestamp AS timestamp,
|
||||
COUNT(session_id) AS count
|
||||
FROM generate_series(%(startDate30)s, %(endDate30)s, %(step_size30)s) AS generated_timestamp
|
||||
LEFT JOIN LATERAL (SELECT DISTINCT session_id
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30)}) AS chart_details
|
||||
ON (TRUE)
|
||||
GROUP BY timestamp
|
||||
ORDER BY timestamp) AS chart_details) AS chart_details30 ON (TRUE);
|
||||
"""
|
||||
|
||||
# print("--------------------")
|
||||
# print(cur.mogrify(main_pg_query, params))
|
||||
# print("--------------------")
|
||||
cur.execute(cur.mogrify(main_pg_query, params))
|
||||
row = cur.fetchone()
|
||||
if row is None:
|
||||
return {"errors": ["error not found"]}
|
||||
row["tags"] = __process_tags(row)
|
||||
|
||||
query = cur.mogrify(
|
||||
f"""SELECT error_id, status, session_id, start_ts,
|
||||
parent_error_id,session_id, user_anonymous_id,
|
||||
user_id, user_uuid, user_browser, user_browser_version,
|
||||
user_os, user_os_version, user_device, payload,
|
||||
FALSE AS favorite,
|
||||
True AS viewed
|
||||
FROM public.errors AS pe
|
||||
INNER JOIN events.errors AS ee USING (error_id)
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
WHERE pe.project_id = %(project_id)s
|
||||
AND error_id = %(error_id)s
|
||||
ORDER BY start_ts DESC
|
||||
LIMIT 1;""",
|
||||
{"project_id": project_id, "error_id": error_id, "user_id": user_id})
|
||||
cur.execute(query=query)
|
||||
status = cur.fetchone()
|
||||
|
||||
if status is not None:
|
||||
row["stack"] = errors_helper.format_first_stack_frame(status).pop("stack")
|
||||
row["status"] = status.pop("status")
|
||||
row["parent_error_id"] = status.pop("parent_error_id")
|
||||
row["favorite"] = status.pop("favorite")
|
||||
row["viewed"] = status.pop("viewed")
|
||||
row["last_hydrated_session"] = status
|
||||
else:
|
||||
row["stack"] = []
|
||||
row["last_hydrated_session"] = None
|
||||
row["status"] = "untracked"
|
||||
row["parent_error_id"] = None
|
||||
row["favorite"] = False
|
||||
row["viewed"] = False
|
||||
return {"data": helper.dict_to_camel_case(row)}
|
||||
|
|
@ -1,294 +0,0 @@
|
|||
import json
|
||||
from typing import List
|
||||
|
||||
import schemas
|
||||
from chalicelib.core.errors.modules import errors_helper
|
||||
from chalicelib.core.sessions import sessions_search
|
||||
from chalicelib.core.sourcemaps import sourcemaps
|
||||
from chalicelib.utils import pg_client, helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.utils.metrics_helper import get_step_size
|
||||
|
||||
|
||||
def get(error_id, family=False) -> dict | List[dict]:
|
||||
if family:
|
||||
return get_batch([error_id])
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
"""SELECT *
|
||||
FROM public.errors
|
||||
WHERE error_id = %(error_id)s
|
||||
LIMIT 1;""",
|
||||
{"error_id": error_id})
|
||||
cur.execute(query=query)
|
||||
result = cur.fetchone()
|
||||
if result is not None:
|
||||
result["stacktrace_parsed_at"] = TimeUTC.datetime_to_timestamp(result["stacktrace_parsed_at"])
|
||||
return helper.dict_to_camel_case(result)
|
||||
|
||||
|
||||
def get_batch(error_ids):
|
||||
if len(error_ids) == 0:
|
||||
return []
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
"""
|
||||
WITH RECURSIVE error_family AS (
|
||||
SELECT *
|
||||
FROM public.errors
|
||||
WHERE error_id IN %(error_ids)s
|
||||
UNION
|
||||
SELECT child_errors.*
|
||||
FROM public.errors AS child_errors
|
||||
INNER JOIN error_family ON error_family.error_id = child_errors.parent_error_id OR error_family.parent_error_id = child_errors.error_id
|
||||
)
|
||||
SELECT *
|
||||
FROM error_family;""",
|
||||
{"error_ids": tuple(error_ids)})
|
||||
cur.execute(query=query)
|
||||
errors = cur.fetchall()
|
||||
for e in errors:
|
||||
e["stacktrace_parsed_at"] = TimeUTC.datetime_to_timestamp(e["stacktrace_parsed_at"])
|
||||
return helper.list_to_camel_case(errors)
|
||||
|
||||
|
||||
def __get_sort_key(key):
|
||||
return {
|
||||
schemas.ErrorSort.OCCURRENCE: "max_datetime",
|
||||
schemas.ErrorSort.USERS_COUNT: "users",
|
||||
schemas.ErrorSort.SESSIONS_COUNT: "sessions"
|
||||
}.get(key, 'max_datetime')
|
||||
|
||||
|
||||
def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, user_id):
|
||||
empty_response = {
|
||||
'total': 0,
|
||||
'errors': []
|
||||
}
|
||||
|
||||
platform = None
|
||||
for f in data.filters:
|
||||
if f.type == schemas.FilterType.PLATFORM and len(f.value) > 0:
|
||||
platform = f.value[0]
|
||||
pg_sub_query = errors_helper.__get_basic_constraints(platform, project_key="sessions.project_id")
|
||||
pg_sub_query += ["sessions.start_ts>=%(startDate)s", "sessions.start_ts<%(endDate)s", "source ='js_exception'",
|
||||
"pe.project_id=%(project_id)s"]
|
||||
# To ignore Script error
|
||||
pg_sub_query.append("pe.message!='Script error.'")
|
||||
pg_sub_query_chart = errors_helper.__get_basic_constraints(platform, time_constraint=False, chart=True,
|
||||
project_key=None)
|
||||
if platform:
|
||||
pg_sub_query_chart += ["start_ts>=%(startDate)s", "start_ts<%(endDate)s", "project_id=%(project_id)s"]
|
||||
pg_sub_query_chart.append("errors.error_id =details.error_id")
|
||||
statuses = []
|
||||
error_ids = None
|
||||
if data.startTimestamp is None:
|
||||
data.startTimestamp = TimeUTC.now(-30)
|
||||
if data.endTimestamp is None:
|
||||
data.endTimestamp = TimeUTC.now(1)
|
||||
if len(data.events) > 0 or len(data.filters) > 0:
|
||||
print("-- searching for sessions before errors")
|
||||
statuses = sessions_search.search_sessions(data=data, project=project, user_id=user_id, errors_only=True,
|
||||
error_status=data.status)
|
||||
if len(statuses) == 0:
|
||||
return empty_response
|
||||
error_ids = [e["errorId"] for e in statuses]
|
||||
with pg_client.PostgresClient() as cur:
|
||||
step_size = get_step_size(data.startTimestamp, data.endTimestamp, data.density, factor=1)
|
||||
sort = __get_sort_key('datetime')
|
||||
if data.sort is not None:
|
||||
sort = __get_sort_key(data.sort)
|
||||
order = schemas.SortOrderType.DESC
|
||||
if data.order is not None:
|
||||
order = data.order
|
||||
extra_join = ""
|
||||
|
||||
params = {
|
||||
"startDate": data.startTimestamp,
|
||||
"endDate": data.endTimestamp,
|
||||
"project_id": project.project_id,
|
||||
"userId": user_id,
|
||||
"step_size": step_size}
|
||||
if data.status != schemas.ErrorStatus.ALL:
|
||||
pg_sub_query.append("status = %(error_status)s")
|
||||
params["error_status"] = data.status
|
||||
if data.limit is not None and data.page is not None:
|
||||
params["errors_offset"] = (data.page - 1) * data.limit
|
||||
params["errors_limit"] = data.limit
|
||||
else:
|
||||
params["errors_offset"] = 0
|
||||
params["errors_limit"] = 200
|
||||
|
||||
if error_ids is not None:
|
||||
params["error_ids"] = tuple(error_ids)
|
||||
pg_sub_query.append("error_id IN %(error_ids)s")
|
||||
# if data.bookmarked:
|
||||
# pg_sub_query.append("ufe.user_id = %(userId)s")
|
||||
# extra_join += " INNER JOIN public.user_favorite_errors AS ufe USING (error_id)"
|
||||
if data.query is not None and len(data.query) > 0:
|
||||
pg_sub_query.append("(pe.name ILIKE %(error_query)s OR pe.message ILIKE %(error_query)s)")
|
||||
params["error_query"] = helper.values_for_operator(value=data.query,
|
||||
op=schemas.SearchEventOperator.CONTAINS)
|
||||
|
||||
main_pg_query = f"""SELECT full_count,
|
||||
error_id,
|
||||
name,
|
||||
message,
|
||||
users,
|
||||
sessions,
|
||||
last_occurrence,
|
||||
first_occurrence,
|
||||
chart
|
||||
FROM (SELECT COUNT(details) OVER () AS full_count, details.*
|
||||
FROM (SELECT error_id,
|
||||
name,
|
||||
message,
|
||||
COUNT(DISTINCT COALESCE(user_id,user_uuid::text)) AS users,
|
||||
COUNT(DISTINCT session_id) AS sessions,
|
||||
MAX(timestamp) AS max_datetime,
|
||||
MIN(timestamp) AS min_datetime
|
||||
FROM events.errors
|
||||
INNER JOIN public.errors AS pe USING (error_id)
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
{extra_join}
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
GROUP BY error_id, name, message
|
||||
ORDER BY {sort} {order}) AS details
|
||||
LIMIT %(errors_limit)s OFFSET %(errors_offset)s
|
||||
) AS details
|
||||
INNER JOIN LATERAL (SELECT MAX(timestamp) AS last_occurrence,
|
||||
MIN(timestamp) AS first_occurrence
|
||||
FROM events.errors
|
||||
WHERE errors.error_id = details.error_id) AS time_details ON (TRUE)
|
||||
INNER JOIN LATERAL (SELECT jsonb_agg(chart_details) AS chart
|
||||
FROM (SELECT generated_timestamp AS timestamp,
|
||||
COUNT(session_id) AS count
|
||||
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp
|
||||
LEFT JOIN LATERAL (SELECT DISTINCT session_id
|
||||
FROM events.errors
|
||||
{"INNER JOIN public.sessions USING(session_id)" if platform else ""}
|
||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||
) AS sessions ON (TRUE)
|
||||
GROUP BY timestamp
|
||||
ORDER BY timestamp) AS chart_details) AS chart_details ON (TRUE);"""
|
||||
|
||||
# print("--------------------")
|
||||
# print(cur.mogrify(main_pg_query, params))
|
||||
# print("--------------------")
|
||||
|
||||
cur.execute(cur.mogrify(main_pg_query, params))
|
||||
rows = cur.fetchall()
|
||||
total = 0 if len(rows) == 0 else rows[0]["full_count"]
|
||||
|
||||
if total == 0:
|
||||
rows = []
|
||||
else:
|
||||
if len(statuses) == 0:
|
||||
query = cur.mogrify(
|
||||
"""SELECT error_id
|
||||
FROM public.errors
|
||||
WHERE project_id = %(project_id)s AND error_id IN %(error_ids)s;""",
|
||||
{"project_id": project.project_id, "error_ids": tuple([r["error_id"] for r in rows]),
|
||||
"user_id": user_id})
|
||||
cur.execute(query=query)
|
||||
statuses = helper.list_to_camel_case(cur.fetchall())
|
||||
statuses = {
|
||||
s["errorId"]: s for s in statuses
|
||||
}
|
||||
|
||||
for r in rows:
|
||||
r.pop("full_count")
|
||||
|
||||
return {
|
||||
'total': total,
|
||||
'errors': helper.list_to_camel_case(rows)
|
||||
}
|
||||
|
||||
|
||||
def __save_stacktrace(error_id, data):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
"""UPDATE public.errors
|
||||
SET stacktrace=%(data)s::jsonb, stacktrace_parsed_at=timezone('utc'::text, now())
|
||||
WHERE error_id = %(error_id)s;""",
|
||||
{"error_id": error_id, "data": json.dumps(data)})
|
||||
cur.execute(query=query)
|
||||
|
||||
|
||||
def get_trace(project_id, error_id):
|
||||
error = get(error_id=error_id, family=False)
|
||||
if error is None:
|
||||
return {"errors": ["error not found"]}
|
||||
if error.get("source", "") != "js_exception":
|
||||
return {"errors": ["this source of errors doesn't have a sourcemap"]}
|
||||
if error.get("payload") is None:
|
||||
return {"errors": ["null payload"]}
|
||||
if error.get("stacktrace") is not None:
|
||||
return {"sourcemapUploaded": True,
|
||||
"trace": error.get("stacktrace"),
|
||||
"preparsed": True}
|
||||
trace, all_exists = sourcemaps.get_traces_group(project_id=project_id, payload=error["payload"])
|
||||
if all_exists:
|
||||
__save_stacktrace(error_id=error_id, data=trace)
|
||||
return {"sourcemapUploaded": all_exists,
|
||||
"trace": trace,
|
||||
"preparsed": False}
|
||||
|
||||
|
||||
def get_sessions(start_date, end_date, project_id, user_id, error_id):
|
||||
extra_constraints = ["s.project_id = %(project_id)s",
|
||||
"s.start_ts >= %(startDate)s",
|
||||
"s.start_ts <= %(endDate)s",
|
||||
"e.error_id = %(error_id)s"]
|
||||
if start_date is None:
|
||||
start_date = TimeUTC.now(-7)
|
||||
if end_date is None:
|
||||
end_date = TimeUTC.now()
|
||||
|
||||
params = {
|
||||
"startDate": start_date,
|
||||
"endDate": end_date,
|
||||
"project_id": project_id,
|
||||
"userId": user_id,
|
||||
"error_id": error_id}
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
f"""SELECT s.project_id,
|
||||
s.session_id::text AS session_id,
|
||||
s.user_uuid,
|
||||
s.user_id,
|
||||
s.user_agent,
|
||||
s.user_os,
|
||||
s.user_browser,
|
||||
s.user_device,
|
||||
s.user_country,
|
||||
s.start_ts,
|
||||
s.duration,
|
||||
s.events_count,
|
||||
s.pages_count,
|
||||
s.errors_count,
|
||||
s.issue_types,
|
||||
COALESCE((SELECT TRUE
|
||||
FROM public.user_favorite_sessions AS fs
|
||||
WHERE s.session_id = fs.session_id
|
||||
AND fs.user_id = %(userId)s LIMIT 1), FALSE) AS favorite,
|
||||
COALESCE((SELECT TRUE
|
||||
FROM public.user_viewed_sessions AS fs
|
||||
WHERE s.session_id = fs.session_id
|
||||
AND fs.user_id = %(userId)s LIMIT 1), FALSE) AS viewed
|
||||
FROM public.sessions AS s INNER JOIN events.errors AS e USING (session_id)
|
||||
WHERE {" AND ".join(extra_constraints)}
|
||||
ORDER BY s.start_ts DESC;""",
|
||||
params)
|
||||
cur.execute(query=query)
|
||||
sessions_list = []
|
||||
total = cur.rowcount
|
||||
row = cur.fetchone()
|
||||
while row is not None and len(sessions_list) < 100:
|
||||
sessions_list.append(row)
|
||||
row = cur.fetchone()
|
||||
|
||||
return {
|
||||
'total': total,
|
||||
'sessions': helper.list_to_camel_case(sessions_list)
|
||||
}
|
||||
|
|
@ -1,11 +0,0 @@
|
|||
import logging
|
||||
|
||||
from decouple import config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
from . import helper as errors_helper
|
||||
|
||||
if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
|
||||
import chalicelib.core.sessions.sessions_ch as sessions
|
||||
else:
|
||||
import chalicelib.core.sessions.sessions_pg as sessions
|
||||
|
|
@ -1,58 +0,0 @@
|
|||
from typing import Optional
|
||||
|
||||
import schemas
|
||||
from chalicelib.core.sourcemaps import sourcemaps
|
||||
|
||||
|
||||
def __get_basic_constraints(platform: Optional[schemas.PlatformType] = None, time_constraint: bool = True,
|
||||
startTime_arg_name: str = "startDate", endTime_arg_name: str = "endDate",
|
||||
chart: bool = False, step_size_name: str = "step_size",
|
||||
project_key: Optional[str] = "project_id"):
|
||||
if project_key is None:
|
||||
ch_sub_query = []
|
||||
else:
|
||||
ch_sub_query = [f"{project_key} =%(project_id)s"]
|
||||
if time_constraint:
|
||||
ch_sub_query += [f"timestamp >= %({startTime_arg_name})s",
|
||||
f"timestamp < %({endTime_arg_name})s"]
|
||||
if chart:
|
||||
ch_sub_query += [f"timestamp >= generated_timestamp",
|
||||
f"timestamp < generated_timestamp + %({step_size_name})s"]
|
||||
if platform == schemas.PlatformType.MOBILE:
|
||||
ch_sub_query.append("user_device_type = 'mobile'")
|
||||
elif platform == schemas.PlatformType.DESKTOP:
|
||||
ch_sub_query.append("user_device_type = 'desktop'")
|
||||
return ch_sub_query
|
||||
|
||||
|
||||
def __get_basic_constraints_ch(platform=None, time_constraint=True, startTime_arg_name="startDate",
|
||||
endTime_arg_name="endDate", type_condition=True, project_key="project_id",
|
||||
table_name=None):
|
||||
ch_sub_query = [f"{project_key} =toUInt16(%(project_id)s)"]
|
||||
if table_name is not None:
|
||||
table_name = table_name + "."
|
||||
else:
|
||||
table_name = ""
|
||||
if type_condition:
|
||||
ch_sub_query.append(f"{table_name}`$event_name`='ERROR'")
|
||||
if time_constraint:
|
||||
ch_sub_query += [f"{table_name}datetime >= toDateTime(%({startTime_arg_name})s/1000)",
|
||||
f"{table_name}datetime < toDateTime(%({endTime_arg_name})s/1000)"]
|
||||
if platform == schemas.PlatformType.MOBILE:
|
||||
ch_sub_query.append("user_device_type = 'mobile'")
|
||||
elif platform == schemas.PlatformType.DESKTOP:
|
||||
ch_sub_query.append("user_device_type = 'desktop'")
|
||||
return ch_sub_query
|
||||
|
||||
|
||||
def format_first_stack_frame(error):
|
||||
error["stack"] = sourcemaps.format_payload(error.pop("payload"), truncate_to_first=True)
|
||||
for s in error["stack"]:
|
||||
for c in s.get("context", []):
|
||||
for sci, sc in enumerate(c):
|
||||
if isinstance(sc, str) and len(sc) > 1000:
|
||||
c[sci] = sc[:1000]
|
||||
# convert bytes to string:
|
||||
if isinstance(s["filename"], bytes):
|
||||
s["filename"] = s["filename"].decode("utf-8")
|
||||
return error
|
||||
48
api/chalicelib/core/errors_favorite.py
Normal file
48
api/chalicelib/core/errors_favorite.py
Normal file
|
|
@ -0,0 +1,48 @@
|
|||
from chalicelib.utils import pg_client
|
||||
|
||||
|
||||
def add_favorite_error(project_id, user_id, error_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(f"""INSERT INTO public.user_favorite_errors(user_id, error_id)
|
||||
VALUES (%(userId)s,%(error_id)s);""",
|
||||
{"userId": user_id, "error_id": error_id})
|
||||
)
|
||||
return {"errorId": error_id, "favorite": True}
|
||||
|
||||
|
||||
def remove_favorite_error(project_id, user_id, error_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(f"""DELETE FROM public.user_favorite_errors
|
||||
WHERE
|
||||
user_id = %(userId)s
|
||||
AND error_id = %(error_id)s;""",
|
||||
{"userId": user_id, "error_id": error_id})
|
||||
)
|
||||
return {"errorId": error_id, "favorite": False}
|
||||
|
||||
|
||||
def favorite_error(project_id, user_id, error_id):
|
||||
exists, favorite = error_exists_and_favorite(user_id=user_id, error_id=error_id)
|
||||
if not exists:
|
||||
return {"errors": ["cannot bookmark non-rehydrated errors"]}
|
||||
if favorite:
|
||||
return remove_favorite_error(project_id=project_id, user_id=user_id, error_id=error_id)
|
||||
return add_favorite_error(project_id=project_id, user_id=user_id, error_id=error_id)
|
||||
|
||||
|
||||
def error_exists_and_favorite(user_id, error_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(
|
||||
"""SELECT errors.error_id AS exists, ufe.error_id AS favorite
|
||||
FROM public.errors
|
||||
LEFT JOIN (SELECT error_id FROM public.user_favorite_errors WHERE user_id = %(userId)s) AS ufe USING (error_id)
|
||||
WHERE error_id = %(error_id)s;""",
|
||||
{"userId": user_id, "error_id": error_id})
|
||||
)
|
||||
r = cur.fetchone()
|
||||
if r is None:
|
||||
return False, False
|
||||
return True, r.get("favorite") is not None
|
||||
37
api/chalicelib/core/errors_viewed.py
Normal file
37
api/chalicelib/core/errors_viewed.py
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
from chalicelib.utils import pg_client
|
||||
|
||||
|
||||
def add_viewed_error(project_id, user_id, error_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""INSERT INTO public.user_viewed_errors(user_id, error_id)
|
||||
VALUES (%(userId)s,%(error_id)s);""",
|
||||
{"userId": user_id, "error_id": error_id})
|
||||
)
|
||||
|
||||
|
||||
def viewed_error_exists(user_id, error_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
"""SELECT
|
||||
errors.error_id AS hydrated,
|
||||
COALESCE((SELECT TRUE
|
||||
FROM public.user_viewed_errors AS ve
|
||||
WHERE ve.error_id = %(error_id)s
|
||||
AND ve.user_id = %(userId)s LIMIT 1), FALSE) AS viewed
|
||||
FROM public.errors
|
||||
WHERE error_id = %(error_id)s""",
|
||||
{"userId": user_id, "error_id": error_id})
|
||||
cur.execute(
|
||||
query=query
|
||||
)
|
||||
r = cur.fetchone()
|
||||
if r:
|
||||
return r.get("viewed")
|
||||
return True
|
||||
|
||||
|
||||
def viewed_error(project_id, user_id, error_id):
|
||||
if viewed_error_exists(user_id=user_id, error_id=error_id):
|
||||
return None
|
||||
return add_viewed_error(project_id=project_id, user_id=user_id, error_id=error_id)
|
||||
|
|
@ -1,10 +1,9 @@
|
|||
from functools import cache
|
||||
from typing import Optional
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import autocomplete
|
||||
from chalicelib.core import issues
|
||||
from chalicelib.core.autocomplete import autocomplete
|
||||
from chalicelib.core.sessions import sessions_metas
|
||||
from chalicelib.core import sessions_metas
|
||||
from chalicelib.utils import pg_client, helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.utils.event_filter_definition import SupportedFilter, Event
|
||||
|
|
@ -56,7 +55,7 @@ def __get_grouped_clickrage(rows, session_id, project_id):
|
|||
def get_by_session_id(session_id, project_id, group_clickrage=False, event_type: Optional[schemas.EventType] = None):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
rows = []
|
||||
if event_type is None or event_type == schemas.EventType.CLICK:
|
||||
if event_type is None or event_type == schemas.EventType.click:
|
||||
cur.execute(cur.mogrify("""\
|
||||
SELECT
|
||||
c.*,
|
||||
|
|
@ -70,7 +69,7 @@ def get_by_session_id(session_id, project_id, group_clickrage=False, event_type:
|
|||
rows += cur.fetchall()
|
||||
if group_clickrage:
|
||||
rows = __get_grouped_clickrage(rows=rows, session_id=session_id, project_id=project_id)
|
||||
if event_type is None or event_type == schemas.EventType.INPUT:
|
||||
if event_type is None or event_type == schemas.EventType.input:
|
||||
cur.execute(cur.mogrify("""
|
||||
SELECT
|
||||
i.*,
|
||||
|
|
@ -82,7 +81,7 @@ def get_by_session_id(session_id, project_id, group_clickrage=False, event_type:
|
|||
{"project_id": project_id, "session_id": session_id})
|
||||
)
|
||||
rows += cur.fetchall()
|
||||
if event_type is None or event_type == schemas.EventType.LOCATION:
|
||||
if event_type is None or event_type == schemas.EventType.location:
|
||||
cur.execute(cur.mogrify("""\
|
||||
SELECT
|
||||
l.*,
|
||||
|
|
@ -99,96 +98,70 @@ def get_by_session_id(session_id, project_id, group_clickrage=False, event_type:
|
|||
return rows
|
||||
|
||||
|
||||
def _search_tags(project_id, value, key=None, source=None):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = f"""
|
||||
SELECT public.tags.name
|
||||
'TAG' AS type
|
||||
FROM public.tags
|
||||
WHERE public.tags.project_id = %(project_id)s
|
||||
ORDER BY SIMILARITY(public.tags.name, %(value)s) DESC
|
||||
LIMIT 10
|
||||
"""
|
||||
query = cur.mogrify(query, {'project_id': project_id, 'value': value})
|
||||
cur.execute(query)
|
||||
results = helper.list_to_camel_case(cur.fetchall())
|
||||
return results
|
||||
|
||||
|
||||
class EventType:
|
||||
CLICK = Event(ui_type=schemas.EventType.CLICK, table="events.clicks", column="label")
|
||||
INPUT = Event(ui_type=schemas.EventType.INPUT, table="events.inputs", column="label")
|
||||
LOCATION = Event(ui_type=schemas.EventType.LOCATION, table="events.pages", column="path")
|
||||
CUSTOM = Event(ui_type=schemas.EventType.CUSTOM, table="events_common.customs", column="name")
|
||||
REQUEST = Event(ui_type=schemas.EventType.REQUEST, table="events_common.requests", column="path")
|
||||
GRAPHQL = Event(ui_type=schemas.EventType.GRAPHQL, table="events.graphql", column="name")
|
||||
STATEACTION = Event(ui_type=schemas.EventType.STATE_ACTION, table="events.state_actions", column="name")
|
||||
TAG = Event(ui_type=schemas.EventType.TAG, table="events.tags", column="tag_id")
|
||||
ERROR = Event(ui_type=schemas.EventType.ERROR, table="events.errors",
|
||||
CLICK = Event(ui_type=schemas.EventType.click, table="events.clicks", column="label")
|
||||
INPUT = Event(ui_type=schemas.EventType.input, table="events.inputs", column="label")
|
||||
LOCATION = Event(ui_type=schemas.EventType.location, table="events.pages", column="path")
|
||||
CUSTOM = Event(ui_type=schemas.EventType.custom, table="events_common.customs", column="name")
|
||||
REQUEST = Event(ui_type=schemas.EventType.request, table="events_common.requests", column="path")
|
||||
GRAPHQL = Event(ui_type=schemas.EventType.graphql, table="events.graphql", column="name")
|
||||
STATEACTION = Event(ui_type=schemas.EventType.state_action, table="events.state_actions", column="name")
|
||||
ERROR = Event(ui_type=schemas.EventType.error, table="events.errors",
|
||||
column=None) # column=None because errors are searched by name or message
|
||||
METADATA = Event(ui_type=schemas.FilterType.METADATA, table="public.sessions", column=None)
|
||||
# MOBILE
|
||||
CLICK_MOBILE = Event(ui_type=schemas.EventType.CLICK_MOBILE, table="events_ios.taps", column="label")
|
||||
INPUT_MOBILE = Event(ui_type=schemas.EventType.INPUT_MOBILE, table="events_ios.inputs", column="label")
|
||||
VIEW_MOBILE = Event(ui_type=schemas.EventType.VIEW_MOBILE, table="events_ios.views", column="name")
|
||||
SWIPE_MOBILE = Event(ui_type=schemas.EventType.SWIPE_MOBILE, table="events_ios.swipes", column="label")
|
||||
CUSTOM_MOBILE = Event(ui_type=schemas.EventType.CUSTOM_MOBILE, table="events_common.customs", column="name")
|
||||
REQUEST_MOBILE = Event(ui_type=schemas.EventType.REQUEST_MOBILE, table="events_common.requests", column="path")
|
||||
CRASH_MOBILE = Event(ui_type=schemas.EventType.ERROR_MOBILE, table="events_common.crashes",
|
||||
column=None) # column=None because errors are searched by name or message
|
||||
METADATA = Event(ui_type=schemas.FilterType.metadata, table="public.sessions", column=None)
|
||||
# IOS
|
||||
CLICK_IOS = Event(ui_type=schemas.EventType.click_ios, table="events_ios.taps", column="label")
|
||||
INPUT_IOS = Event(ui_type=schemas.EventType.input_ios, table="events_ios.inputs", column="label")
|
||||
VIEW_IOS = Event(ui_type=schemas.EventType.view_ios, table="events_ios.views", column="name")
|
||||
SWIPE_IOS = Event(ui_type=schemas.EventType.swipe_ios, table="events_ios.swipes", column="label")
|
||||
CUSTOM_IOS = Event(ui_type=schemas.EventType.custom_ios, table="events_common.customs", column="name")
|
||||
REQUEST_IOS = Event(ui_type=schemas.EventType.request_ios, table="events_common.requests", column="path")
|
||||
CRASH_IOS = Event(ui_type=schemas.EventType.error_ios, table="events_common.crashes",
|
||||
column=None) # column=None because errors are searched by name or message
|
||||
|
||||
|
||||
@cache
|
||||
def supported_types():
|
||||
return {
|
||||
EventType.CLICK.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK),
|
||||
query=autocomplete.__generic_query(typename=EventType.CLICK.ui_type)),
|
||||
EventType.INPUT.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT),
|
||||
query=autocomplete.__generic_query(typename=EventType.INPUT.ui_type)),
|
||||
EventType.LOCATION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.LOCATION),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.LOCATION.ui_type)),
|
||||
EventType.CUSTOM.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CUSTOM),
|
||||
SUPPORTED_TYPES = {
|
||||
EventType.CLICK.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK),
|
||||
query=autocomplete.__generic_query(typename=EventType.CLICK.ui_type)),
|
||||
EventType.INPUT.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT),
|
||||
query=autocomplete.__generic_query(typename=EventType.INPUT.ui_type)),
|
||||
EventType.LOCATION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.LOCATION),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.LOCATION.ui_type)),
|
||||
EventType.CUSTOM.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CUSTOM),
|
||||
query=autocomplete.__generic_query(typename=EventType.CUSTOM.ui_type)),
|
||||
EventType.REQUEST.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.REQUEST),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.REQUEST.ui_type)),
|
||||
EventType.GRAPHQL.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.GRAPHQL),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.GRAPHQL.ui_type)),
|
||||
EventType.STATEACTION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.STATEACTION),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.STATEACTION.ui_type)),
|
||||
EventType.ERROR.ui_type: SupportedFilter(get=autocomplete.__search_errors,
|
||||
query=None),
|
||||
EventType.METADATA.ui_type: SupportedFilter(get=autocomplete.__search_metadata,
|
||||
query=None),
|
||||
# IOS
|
||||
EventType.CLICK_IOS.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK_IOS),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.CLICK_IOS.ui_type)),
|
||||
EventType.INPUT_IOS.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT_IOS),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.INPUT_IOS.ui_type)),
|
||||
EventType.VIEW_IOS.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.VIEW_IOS),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.VIEW_IOS.ui_type)),
|
||||
EventType.CUSTOM_IOS.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CUSTOM_IOS),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.CUSTOM.ui_type)),
|
||||
EventType.REQUEST.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.REQUEST),
|
||||
typename=EventType.CUSTOM_IOS.ui_type)),
|
||||
EventType.REQUEST_IOS.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.REQUEST_IOS),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.REQUEST.ui_type)),
|
||||
EventType.GRAPHQL.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.GRAPHQL),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.GRAPHQL.ui_type)),
|
||||
EventType.STATEACTION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.STATEACTION),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.STATEACTION.ui_type)),
|
||||
EventType.TAG.ui_type: SupportedFilter(get=_search_tags, query=None),
|
||||
EventType.ERROR.ui_type: SupportedFilter(get=autocomplete.__search_errors,
|
||||
typename=EventType.REQUEST_IOS.ui_type)),
|
||||
EventType.CRASH_IOS.ui_type: SupportedFilter(get=autocomplete.__search_errors_ios,
|
||||
query=None),
|
||||
EventType.METADATA.ui_type: SupportedFilter(get=autocomplete.__search_metadata,
|
||||
query=None),
|
||||
# MOBILE
|
||||
EventType.CLICK_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.CLICK_MOBILE.ui_type)),
|
||||
EventType.SWIPE_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.SWIPE_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.SWIPE_MOBILE.ui_type)),
|
||||
EventType.INPUT_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.INPUT_MOBILE.ui_type)),
|
||||
EventType.VIEW_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.VIEW_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.VIEW_MOBILE.ui_type)),
|
||||
EventType.CUSTOM_MOBILE.ui_type: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete(EventType.CUSTOM_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.CUSTOM_MOBILE.ui_type)),
|
||||
EventType.REQUEST_MOBILE.ui_type: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete(EventType.REQUEST_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.REQUEST_MOBILE.ui_type)),
|
||||
EventType.CRASH_MOBILE.ui_type: SupportedFilter(get=autocomplete.__search_errors_mobile,
|
||||
query=None),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def get_errors_by_session_id(session_id, project_id):
|
||||
|
|
@ -208,17 +181,17 @@ def search(text, event_type, project_id, source, key):
|
|||
if not event_type:
|
||||
return {"data": autocomplete.__get_autocomplete_table(text, project_id)}
|
||||
|
||||
if event_type in supported_types().keys():
|
||||
rows = supported_types()[event_type].get(project_id=project_id, value=text, key=key, source=source)
|
||||
elif event_type + "_MOBILE" in supported_types().keys():
|
||||
rows = supported_types()[event_type + "_MOBILE"].get(project_id=project_id, value=text, key=key, source=source)
|
||||
elif event_type in sessions_metas.supported_types().keys():
|
||||
if event_type in SUPPORTED_TYPES.keys():
|
||||
rows = SUPPORTED_TYPES[event_type].get(project_id=project_id, value=text, key=key, source=source)
|
||||
# for IOS events autocomplete
|
||||
# if event_type + "_IOS" in SUPPORTED_TYPES.keys():
|
||||
# rows += SUPPORTED_TYPES[event_type + "_IOS"].get(project_id=project_id, value=text, key=key,source=source)
|
||||
elif event_type + "_IOS" in SUPPORTED_TYPES.keys():
|
||||
rows = SUPPORTED_TYPES[event_type + "_IOS"].get(project_id=project_id, value=text, key=key, source=source)
|
||||
elif event_type in sessions_metas.SUPPORTED_TYPES.keys():
|
||||
return sessions_metas.search(text, event_type, project_id)
|
||||
elif event_type.endswith("_IOS") \
|
||||
and event_type[:-len("_IOS")] in sessions_metas.supported_types().keys():
|
||||
return sessions_metas.search(text, event_type, project_id)
|
||||
elif event_type.endswith("_MOBILE") \
|
||||
and event_type[:-len("_MOBILE")] in sessions_metas.supported_types().keys():
|
||||
and event_type[:-len("_IOS")] in sessions_metas.SUPPORTED_TYPES.keys():
|
||||
return sessions_metas.search(text, event_type, project_id)
|
||||
else:
|
||||
return {"errors": ["unsupported event"]}
|
||||
|
|
|
|||
68
api/chalicelib/core/events_ios.py
Normal file
68
api/chalicelib/core/events_ios.py
Normal file
|
|
@ -0,0 +1,68 @@
|
|||
from chalicelib.utils import pg_client, helper
|
||||
from chalicelib.core import events
|
||||
|
||||
|
||||
def get_customs_by_session_id(session_id, project_id):
|
||||
return events.get_customs_by_session_id(session_id=session_id, project_id=project_id)
|
||||
|
||||
|
||||
def get_by_sessionId(session_id, project_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(cur.mogrify(f"""
|
||||
SELECT
|
||||
c.*,
|
||||
'TAP' AS type
|
||||
FROM events_ios.taps AS c
|
||||
WHERE
|
||||
c.session_id = %(session_id)s
|
||||
ORDER BY c.timestamp;""",
|
||||
{"project_id": project_id, "session_id": session_id})
|
||||
)
|
||||
rows = cur.fetchall()
|
||||
|
||||
cur.execute(cur.mogrify(f"""
|
||||
SELECT
|
||||
i.*,
|
||||
'INPUT' AS type
|
||||
FROM events_ios.inputs AS i
|
||||
WHERE
|
||||
i.session_id = %(session_id)s
|
||||
ORDER BY i.timestamp;""",
|
||||
{"project_id": project_id, "session_id": session_id})
|
||||
)
|
||||
rows += cur.fetchall()
|
||||
cur.execute(cur.mogrify(f"""
|
||||
SELECT
|
||||
v.*,
|
||||
'VIEW' AS type
|
||||
FROM events_ios.views AS v
|
||||
WHERE
|
||||
v.session_id = %(session_id)s
|
||||
ORDER BY v.timestamp;""", {"project_id": project_id, "session_id": session_id}))
|
||||
rows += cur.fetchall()
|
||||
cur.execute(cur.mogrify(f"""
|
||||
SELECT
|
||||
s.*,
|
||||
'SWIPE' AS type
|
||||
FROM events_ios.swipes AS s
|
||||
WHERE
|
||||
s.session_id = %(session_id)s
|
||||
ORDER BY s.timestamp;""", {"project_id": project_id, "session_id": session_id}))
|
||||
rows += cur.fetchall()
|
||||
rows = helper.list_to_camel_case(rows)
|
||||
rows = sorted(rows, key=lambda k: k["timestamp"])
|
||||
return rows
|
||||
|
||||
|
||||
def get_crashes_by_session_id(session_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(cur.mogrify(f"""
|
||||
SELECT cr.*,uc.*, cr.timestamp - s.start_ts AS time
|
||||
FROM {events.EventType.CRASH_IOS.table} AS cr
|
||||
INNER JOIN public.crashes_ios AS uc USING (crash_ios_id)
|
||||
INNER JOIN public.sessions AS s USING (session_id)
|
||||
WHERE
|
||||
cr.session_id = %(session_id)s
|
||||
ORDER BY timestamp;""", {"session_id": session_id}))
|
||||
errors = cur.fetchall()
|
||||
return helper.list_to_camel_case(errors)
|
||||
|
|
@ -1,68 +0,0 @@
|
|||
from chalicelib.utils import pg_client, helper
|
||||
from chalicelib.core import events
|
||||
|
||||
|
||||
def get_customs_by_session_id(session_id, project_id):
|
||||
return events.get_customs_by_session_id(session_id=session_id, project_id=project_id)
|
||||
|
||||
|
||||
def get_by_sessionId(session_id, project_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(cur.mogrify(f"""
|
||||
SELECT
|
||||
c.*,
|
||||
'TAP' AS type
|
||||
FROM events_ios.taps AS c
|
||||
WHERE
|
||||
c.session_id = %(session_id)s
|
||||
ORDER BY c.timestamp;""",
|
||||
{"project_id": project_id, "session_id": session_id})
|
||||
)
|
||||
rows = cur.fetchall()
|
||||
|
||||
cur.execute(cur.mogrify(f"""
|
||||
SELECT
|
||||
i.*,
|
||||
'INPUT' AS type
|
||||
FROM events_ios.inputs AS i
|
||||
WHERE
|
||||
i.session_id = %(session_id)s
|
||||
ORDER BY i.timestamp;""",
|
||||
{"project_id": project_id, "session_id": session_id})
|
||||
)
|
||||
rows += cur.fetchall()
|
||||
cur.execute(cur.mogrify(f"""
|
||||
SELECT
|
||||
v.*,
|
||||
'VIEW' AS type
|
||||
FROM events_ios.views AS v
|
||||
WHERE
|
||||
v.session_id = %(session_id)s
|
||||
ORDER BY v.timestamp;""", {"project_id": project_id, "session_id": session_id}))
|
||||
rows += cur.fetchall()
|
||||
cur.execute(cur.mogrify(f"""
|
||||
SELECT
|
||||
s.*,
|
||||
'SWIPE' AS type
|
||||
FROM events_ios.swipes AS s
|
||||
WHERE
|
||||
s.session_id = %(session_id)s
|
||||
ORDER BY s.timestamp;""", {"project_id": project_id, "session_id": session_id}))
|
||||
rows += cur.fetchall()
|
||||
rows = helper.list_to_camel_case(rows)
|
||||
rows = sorted(rows, key=lambda k: k["timestamp"])
|
||||
return rows
|
||||
|
||||
|
||||
def get_crashes_by_session_id(session_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(cur.mogrify(f"""
|
||||
SELECT cr.*,uc.*, cr.timestamp - s.start_ts AS time
|
||||
FROM {events.EventType.CRASH_MOBILE.table} AS cr
|
||||
INNER JOIN public.crashes_ios AS uc USING (crash_ios_id)
|
||||
INNER JOIN public.sessions AS s USING (session_id)
|
||||
WHERE
|
||||
cr.session_id = %(session_id)s
|
||||
ORDER BY timestamp;""", {"session_id": session_id}))
|
||||
errors = cur.fetchall()
|
||||
return helper.list_to_camel_case(errors)
|
||||
|
|
@ -1,14 +1,11 @@
|
|||
import json
|
||||
import logging
|
||||
from typing import Any, List, Dict, Optional
|
||||
|
||||
import schemas
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from typing import Any, List, Dict, Optional
|
||||
from fastapi import HTTPException, status
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
import json
|
||||
import logging
|
||||
|
||||
feature_flag_columns = (
|
||||
"feature_flag_id",
|
||||
|
|
@ -51,7 +48,7 @@ def update_feature_flag_status(project_id: int, feature_flag_id: int, is_active:
|
|||
|
||||
return {"is_active": cur.fetchone()["is_active"]}
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update feature flag status: {e}")
|
||||
logging.error(f"Failed to update feature flag status: {e}")
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="Failed to update feature flag status")
|
||||
|
||||
|
|
@ -109,12 +106,12 @@ def prepare_constraints_params_to_search(data, project_id, user_id):
|
|||
if data.query is not None and len(data.query) > 0:
|
||||
constraints.append("flag_key ILIKE %(query)s")
|
||||
params["query"] = helper.values_for_operator(value=data.query,
|
||||
op=schemas.SearchEventOperator.CONTAINS)
|
||||
op=schemas.SearchEventOperator._contains)
|
||||
return constraints, params
|
||||
|
||||
|
||||
def create_feature_flag(project_id: int, user_id: int, feature_flag_data: schemas.FeatureFlagSchema) -> Optional[int]:
|
||||
if feature_flag_data.flag_type == schemas.FeatureFlagType.MULTI_VARIANT and len(feature_flag_data.variants) == 0:
|
||||
if feature_flag_data.flag_type == schemas.FeatureFlagType.multi_variant and len(feature_flag_data.variants) == 0:
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="Variants are required for multi variant flag")
|
||||
|
||||
|
|
@ -196,7 +193,7 @@ def validate_unique_flag_key(feature_flag_data, project_id, exclude_id=None):
|
|||
|
||||
|
||||
def validate_multi_variant_flag(feature_flag_data):
|
||||
if feature_flag_data.flag_type == schemas.FeatureFlagType.MULTI_VARIANT:
|
||||
if feature_flag_data.flag_type == schemas.FeatureFlagType.multi_variant:
|
||||
if sum([v.rollout_percentage for v in feature_flag_data.variants]) > 100:
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=f"Sum of rollout percentage for variants cannot be greater than 100.")
|
||||
|
|
@ -302,8 +299,7 @@ def create_conditions(feature_flag_id: int, conditions: List[schemas.FeatureFlag
|
|||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
params = [
|
||||
(feature_flag_id, c.name, c.rollout_percentage,
|
||||
json.dumps([filter_.model_dump() for filter_ in c.filters]))
|
||||
(feature_flag_id, c.name, c.rollout_percentage, json.dumps([filter_.model_dump() for filter_ in c.filters]))
|
||||
for c in conditions]
|
||||
query = cur.mogrify(sql, params)
|
||||
cur.execute(query)
|
||||
|
|
@ -459,8 +455,7 @@ def create_variants(feature_flag_id: int, variants: List[schemas.FeatureFlagVari
|
|||
"""
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
params = [(feature_flag_id, v.value, v.description, json.dumps(v.payload), v.rollout_percentage) for v in
|
||||
variants]
|
||||
params = [(feature_flag_id, v.value, v.description, json.dumps(v.payload), v.rollout_percentage) for v in variants]
|
||||
query = cur.mogrify(sql, params)
|
||||
cur.execute(query)
|
||||
rows = cur.fetchall()
|
||||
|
|
|
|||
68
api/chalicelib/core/funnels.py
Normal file
68
api/chalicelib/core/funnels.py
Normal file
|
|
@ -0,0 +1,68 @@
|
|||
from typing import List
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import significance
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils import sql_helper as sh
|
||||
|
||||
|
||||
def filter_stages(stages: List[schemas.SessionSearchEventSchema2]):
|
||||
ALLOW_TYPES = [schemas.EventType.click, schemas.EventType.input,
|
||||
schemas.EventType.location, schemas.EventType.custom,
|
||||
schemas.EventType.click_ios, schemas.EventType.input_ios,
|
||||
schemas.EventType.view_ios, schemas.EventType.custom_ios, ]
|
||||
return [s for s in stages if s.type in ALLOW_TYPES and s.value is not None]
|
||||
|
||||
|
||||
def __parse_events(f_events: List[dict]):
|
||||
return [schemas.SessionSearchEventSchema2.parse_obj(e) for e in f_events]
|
||||
|
||||
|
||||
def __fix_stages(f_events: List[schemas.SessionSearchEventSchema2]):
|
||||
if f_events is None:
|
||||
return
|
||||
events = []
|
||||
for e in f_events:
|
||||
if e.operator is None:
|
||||
e.operator = schemas.SearchEventOperator._is
|
||||
|
||||
if not isinstance(e.value, list):
|
||||
e.value = [e.value]
|
||||
is_any = sh.isAny_opreator(e.operator)
|
||||
if not is_any and isinstance(e.value, list) and len(e.value) == 0:
|
||||
continue
|
||||
events.append(e)
|
||||
return events
|
||||
|
||||
|
||||
# def get_top_insights_on_the_fly_widget(project_id, data: schemas.FunnelInsightsPayloadSchema):
|
||||
def get_top_insights_on_the_fly_widget(project_id, data: schemas.CardSeriesFilterSchema):
|
||||
data.events = filter_stages(__parse_events(data.events))
|
||||
data.events = __fix_stages(data.events)
|
||||
if len(data.events) == 0:
|
||||
return {"stages": [], "totalDropDueToIssues": 0}
|
||||
insights, total_drop_due_to_issues = significance.get_top_insights(filter_d=data, project_id=project_id)
|
||||
insights = helper.list_to_camel_case(insights)
|
||||
if len(insights) > 0:
|
||||
# TODO: check if this correct
|
||||
if total_drop_due_to_issues > insights[0]["sessionsCount"]:
|
||||
if len(insights) == 0:
|
||||
total_drop_due_to_issues = 0
|
||||
else:
|
||||
total_drop_due_to_issues = insights[0]["sessionsCount"] - insights[-1]["sessionsCount"]
|
||||
insights[-1]["dropDueToIssues"] = total_drop_due_to_issues
|
||||
return {"stages": insights,
|
||||
"totalDropDueToIssues": total_drop_due_to_issues}
|
||||
|
||||
|
||||
# def get_issues_on_the_fly_widget(project_id, data: schemas.FunnelSearchPayloadSchema):
|
||||
def get_issues_on_the_fly_widget(project_id, data: schemas.CardSeriesFilterSchema):
|
||||
data.events = filter_stages(data.events)
|
||||
data.events = __fix_stages(data.events)
|
||||
if len(data.events) < 0:
|
||||
return {"issues": []}
|
||||
|
||||
return {
|
||||
"issues": helper.dict_to_camel_case(
|
||||
significance.get_issues_list(filter_d=data, project_id=project_id, first_stage=1,
|
||||
last_stage=len(data.events)))}
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
import logging
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import redis
|
||||
import requests
|
||||
|
|
@ -7,13 +7,11 @@ from decouple import config
|
|||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def app_connection_string(name, port, path):
|
||||
namespace = config("POD_NAMESPACE", default="app")
|
||||
conn_string = config("CLUSTER_URL", default="svc.cluster.local")
|
||||
return f"http://{name}.{namespace}.{conn_string}:{port}/{path}"
|
||||
return f"http://{'.'.join(filter(None,[name,namespace,conn_string]))}:{port}/{path}"
|
||||
|
||||
|
||||
HEALTH_ENDPOINTS = {
|
||||
|
|
@ -27,6 +25,7 @@ HEALTH_ENDPOINTS = {
|
|||
"http": app_connection_string("http-openreplay", 8888, "metrics"),
|
||||
"ingress-nginx": app_connection_string("ingress-nginx-openreplay", 80, "healthz"),
|
||||
"integrations": app_connection_string("integrations-openreplay", 8888, "metrics"),
|
||||
"peers": app_connection_string("peers-openreplay", 8888, "health"),
|
||||
"sink": app_connection_string("sink-openreplay", 8888, "metrics"),
|
||||
"sourcemapreader": app_connection_string(
|
||||
"sourcemapreader-openreplay", 8888, "health"
|
||||
|
|
@ -38,66 +37,78 @@ HEALTH_ENDPOINTS = {
|
|||
def __check_database_pg(*_):
|
||||
fail_response = {
|
||||
"health": False,
|
||||
"details": {"errors": ["Postgres health-check failed"]},
|
||||
"details": {
|
||||
"errors": ["Postgres health-check failed"]
|
||||
}
|
||||
}
|
||||
with pg_client.PostgresClient() as cur:
|
||||
try:
|
||||
cur.execute("SHOW server_version;")
|
||||
# server_version = cur.fetchone()
|
||||
server_version = cur.fetchone()
|
||||
except Exception as e:
|
||||
logger.error("!! health failed: postgres not responding")
|
||||
logger.exception(e)
|
||||
print("!! health failed: postgres not responding")
|
||||
print(str(e))
|
||||
return fail_response
|
||||
try:
|
||||
cur.execute("SELECT openreplay_version() AS version;")
|
||||
# schema_version = cur.fetchone()
|
||||
schema_version = cur.fetchone()
|
||||
except Exception as e:
|
||||
logger.error("!! health failed: openreplay_version not defined")
|
||||
logger.exception(e)
|
||||
print("!! health failed: openreplay_version not defined")
|
||||
print(str(e))
|
||||
return fail_response
|
||||
return {
|
||||
"health": True,
|
||||
"details": {
|
||||
# "version": server_version["server_version"],
|
||||
# "schema": schema_version["version"]
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def __not_supported(*_):
|
||||
return {"errors": ["not supported"]}
|
||||
|
||||
|
||||
def __always_healthy(*_):
|
||||
return {"health": True, "details": {}}
|
||||
return {
|
||||
"health": True,
|
||||
"details": {}
|
||||
}
|
||||
|
||||
|
||||
def __check_be_service(service_name):
|
||||
def fn(*_):
|
||||
fail_response = {
|
||||
"health": False,
|
||||
"details": {"errors": ["server health-check failed"]},
|
||||
"details": {
|
||||
"errors": ["server health-check failed"]
|
||||
}
|
||||
}
|
||||
try:
|
||||
results = requests.get(HEALTH_ENDPOINTS.get(service_name), timeout=2)
|
||||
if results.status_code != 200:
|
||||
logger.error(
|
||||
f"!! issue with the {service_name}-health code:{results.status_code}"
|
||||
)
|
||||
logger.error(results.text)
|
||||
print(f"!! issue with the {service_name}-health code:{results.status_code}")
|
||||
print(results.text)
|
||||
# fail_response["details"]["errors"].append(results.text)
|
||||
return fail_response
|
||||
except requests.exceptions.Timeout:
|
||||
logger.error(f"!! Timeout getting {service_name}-health")
|
||||
print(f"!! Timeout getting {service_name}-health")
|
||||
# fail_response["details"]["errors"].append("timeout")
|
||||
return fail_response
|
||||
except Exception as e:
|
||||
logger.error(f"!! Issue getting {service_name}-health response")
|
||||
logger.exception(e)
|
||||
print(f"!! Issue getting {service_name}-health response")
|
||||
print(str(e))
|
||||
try:
|
||||
logger.error(results.text)
|
||||
print(results.text)
|
||||
# fail_response["details"]["errors"].append(results.text)
|
||||
except Exception:
|
||||
logger.error("couldn't get response")
|
||||
print("couldn't get response")
|
||||
# fail_response["details"]["errors"].append(str(e))
|
||||
return fail_response
|
||||
return {"health": True, "details": {}}
|
||||
return {
|
||||
"health": True,
|
||||
"details": {}
|
||||
}
|
||||
|
||||
return fn
|
||||
|
||||
|
|
@ -105,18 +116,18 @@ def __check_be_service(service_name):
|
|||
def __check_redis(*_):
|
||||
fail_response = {
|
||||
"health": False,
|
||||
"details": {"errors": ["server health-check failed"]},
|
||||
"details": {"errors": ["server health-check failed"]}
|
||||
}
|
||||
if config("REDIS_STRING", default=None) is None:
|
||||
# fail_response["details"]["errors"].append("REDIS_STRING not defined in env-vars")
|
||||
return fail_response
|
||||
|
||||
try:
|
||||
r = redis.from_url(config("REDIS_STRING"), socket_timeout=2)
|
||||
r = redis.from_url(config("REDIS_STRING"))
|
||||
r.ping()
|
||||
except Exception as e:
|
||||
logger.error("!! Issue getting redis-health response")
|
||||
logger.exception(e)
|
||||
print("!! Issue getting redis-health response")
|
||||
print(str(e))
|
||||
# fail_response["details"]["errors"].append(str(e))
|
||||
return fail_response
|
||||
|
||||
|
|
@ -124,43 +135,53 @@ def __check_redis(*_):
|
|||
"health": True,
|
||||
"details": {
|
||||
# "version": r.execute_command('INFO')['redis_version']
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def __check_SSL(*_):
|
||||
fail_response = {
|
||||
"health": False,
|
||||
"details": {"errors": ["SSL Certificate health-check failed"]},
|
||||
"details": {
|
||||
"errors": ["SSL Certificate health-check failed"]
|
||||
}
|
||||
}
|
||||
try:
|
||||
requests.get(config("SITE_URL"), verify=True, allow_redirects=True)
|
||||
except Exception as e:
|
||||
logger.error("!! health failed: SSL Certificate")
|
||||
logger.exception(e)
|
||||
print("!! health failed: SSL Certificate")
|
||||
print(str(e))
|
||||
return fail_response
|
||||
return {"health": True, "details": {}}
|
||||
return {
|
||||
"health": True,
|
||||
"details": {}
|
||||
}
|
||||
|
||||
|
||||
def __get_sessions_stats(*_):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
constraints = ["projects.deleted_at IS NULL"]
|
||||
query = cur.mogrify(
|
||||
f"""SELECT COALESCE(SUM(sessions_count),0) AS s_c,
|
||||
query = cur.mogrify(f"""SELECT COALESCE(SUM(sessions_count),0) AS s_c,
|
||||
COALESCE(SUM(events_count),0) AS e_c
|
||||
FROM public.projects_stats
|
||||
INNER JOIN public.projects USING(project_id)
|
||||
WHERE {" AND ".join(constraints)};"""
|
||||
)
|
||||
WHERE {" AND ".join(constraints)};""")
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
return {"numberOfSessionsCaptured": row["s_c"], "numberOfEventCaptured": row["e_c"]}
|
||||
return {
|
||||
"numberOfSessionsCaptured": row["s_c"],
|
||||
"numberOfEventCaptured": row["e_c"]
|
||||
}
|
||||
|
||||
|
||||
def get_health(tenant_id=None):
|
||||
def get_health():
|
||||
health_map = {
|
||||
"databases": {"postgres": __check_database_pg},
|
||||
"ingestionPipeline": {"redis": __check_redis},
|
||||
"databases": {
|
||||
"postgres": __check_database_pg
|
||||
},
|
||||
"ingestionPipeline": {
|
||||
"redis": __check_redis
|
||||
},
|
||||
"backendServices": {
|
||||
"alerts": __check_be_service("alerts"),
|
||||
"assets": __check_be_service("assets"),
|
||||
|
|
@ -173,12 +194,13 @@ def get_health(tenant_id=None):
|
|||
"http": __check_be_service("http"),
|
||||
"ingress-nginx": __always_healthy,
|
||||
"integrations": __check_be_service("integrations"),
|
||||
"peers": __check_be_service("peers"),
|
||||
"sink": __check_be_service("sink"),
|
||||
"sourcemapreader": __check_be_service("sourcemapreader"),
|
||||
"storage": __check_be_service("storage"),
|
||||
"storage": __check_be_service("storage")
|
||||
},
|
||||
"details": __get_sessions_stats,
|
||||
"ssl": __check_SSL,
|
||||
"ssl": __check_SSL
|
||||
}
|
||||
return __process_health(health_map=health_map)
|
||||
|
||||
|
|
@ -190,16 +212,10 @@ def __process_health(health_map):
|
|||
response.pop(parent_key)
|
||||
elif isinstance(health_map[parent_key], dict):
|
||||
for element_key in health_map[parent_key]:
|
||||
if config(
|
||||
f"SKIP_H_{parent_key.upper()}_{element_key.upper()}",
|
||||
cast=bool,
|
||||
default=False,
|
||||
):
|
||||
if config(f"SKIP_H_{parent_key.upper()}_{element_key.upper()}", cast=bool, default=False):
|
||||
response[parent_key].pop(element_key)
|
||||
else:
|
||||
response[parent_key][element_key] = health_map[parent_key][
|
||||
element_key
|
||||
]()
|
||||
response[parent_key][element_key] = health_map[parent_key][element_key]()
|
||||
else:
|
||||
response[parent_key] = health_map[parent_key]()
|
||||
return response
|
||||
|
|
@ -207,8 +223,7 @@ def __process_health(health_map):
|
|||
|
||||
def cron():
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
"""SELECT projects.project_id,
|
||||
query = cur.mogrify("""SELECT projects.project_id,
|
||||
projects.created_at,
|
||||
projects.sessions_last_check_at,
|
||||
projects.first_recorded_session_at,
|
||||
|
|
@ -216,8 +231,7 @@ def cron():
|
|||
FROM public.projects
|
||||
LEFT JOIN public.projects_stats USING (project_id)
|
||||
WHERE projects.deleted_at IS NULL
|
||||
ORDER BY project_id;"""
|
||||
)
|
||||
ORDER BY project_id;""")
|
||||
cur.execute(query)
|
||||
rows = cur.fetchall()
|
||||
for r in rows:
|
||||
|
|
@ -238,24 +252,20 @@ def cron():
|
|||
count_start_from = r["last_update_at"]
|
||||
|
||||
count_start_from = TimeUTC.datetime_to_timestamp(count_start_from)
|
||||
params = {
|
||||
"project_id": r["project_id"],
|
||||
"start_ts": count_start_from,
|
||||
"end_ts": TimeUTC.now(),
|
||||
"sessions_count": 0,
|
||||
"events_count": 0,
|
||||
}
|
||||
params = {"project_id": r["project_id"],
|
||||
"start_ts": count_start_from,
|
||||
"end_ts": TimeUTC.now(),
|
||||
"sessions_count": 0,
|
||||
"events_count": 0}
|
||||
|
||||
query = cur.mogrify(
|
||||
"""SELECT COUNT(1) AS sessions_count,
|
||||
query = cur.mogrify("""SELECT COUNT(1) AS sessions_count,
|
||||
COALESCE(SUM(events_count),0) AS events_count
|
||||
FROM public.sessions
|
||||
WHERE project_id=%(project_id)s
|
||||
AND start_ts>=%(start_ts)s
|
||||
AND start_ts<=%(end_ts)s
|
||||
AND duration IS NOT NULL;""",
|
||||
params,
|
||||
)
|
||||
params)
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
if row is not None:
|
||||
|
|
@ -263,68 +273,56 @@ def cron():
|
|||
params["events_count"] = row["events_count"]
|
||||
|
||||
if insert:
|
||||
query = cur.mogrify(
|
||||
"""INSERT INTO public.projects_stats(project_id, sessions_count, events_count, last_update_at)
|
||||
query = cur.mogrify("""INSERT INTO public.projects_stats(project_id, sessions_count, events_count, last_update_at)
|
||||
VALUES (%(project_id)s, %(sessions_count)s, %(events_count)s, (now() AT TIME ZONE 'utc'::text));""",
|
||||
params,
|
||||
)
|
||||
params)
|
||||
else:
|
||||
query = cur.mogrify(
|
||||
"""UPDATE public.projects_stats
|
||||
query = cur.mogrify("""UPDATE public.projects_stats
|
||||
SET sessions_count=sessions_count+%(sessions_count)s,
|
||||
events_count=events_count+%(events_count)s,
|
||||
last_update_at=(now() AT TIME ZONE 'utc'::text)
|
||||
WHERE project_id=%(project_id)s;""",
|
||||
params,
|
||||
)
|
||||
params)
|
||||
cur.execute(query)
|
||||
|
||||
|
||||
# this cron is used to correct the sessions&events count every week
|
||||
def weekly_cron():
|
||||
with pg_client.PostgresClient(long_query=True) as cur:
|
||||
query = cur.mogrify(
|
||||
"""SELECT project_id,
|
||||
query = cur.mogrify("""SELECT project_id,
|
||||
projects_stats.last_update_at
|
||||
FROM public.projects
|
||||
LEFT JOIN public.projects_stats USING (project_id)
|
||||
WHERE projects.deleted_at IS NULL
|
||||
ORDER BY project_id;"""
|
||||
)
|
||||
ORDER BY project_id;""")
|
||||
cur.execute(query)
|
||||
rows = cur.fetchall()
|
||||
for r in rows:
|
||||
if r["last_update_at"] is None:
|
||||
continue
|
||||
|
||||
params = {
|
||||
"project_id": r["project_id"],
|
||||
"end_ts": TimeUTC.now(),
|
||||
"sessions_count": 0,
|
||||
"events_count": 0,
|
||||
}
|
||||
params = {"project_id": r["project_id"],
|
||||
"end_ts": TimeUTC.now(),
|
||||
"sessions_count": 0,
|
||||
"events_count": 0}
|
||||
|
||||
query = cur.mogrify(
|
||||
"""SELECT COUNT(1) AS sessions_count,
|
||||
query = cur.mogrify("""SELECT COUNT(1) AS sessions_count,
|
||||
COALESCE(SUM(events_count),0) AS events_count
|
||||
FROM public.sessions
|
||||
WHERE project_id=%(project_id)s
|
||||
AND start_ts<=%(end_ts)s
|
||||
AND duration IS NOT NULL;""",
|
||||
params,
|
||||
)
|
||||
params)
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
if row is not None:
|
||||
params["sessions_count"] = row["sessions_count"]
|
||||
params["events_count"] = row["events_count"]
|
||||
|
||||
query = cur.mogrify(
|
||||
"""UPDATE public.projects_stats
|
||||
query = cur.mogrify("""UPDATE public.projects_stats
|
||||
SET sessions_count=%(sessions_count)s,
|
||||
events_count=%(events_count)s,
|
||||
last_update_at=(now() AT TIME ZONE 'utc'::text)
|
||||
WHERE project_id=%(project_id)s;""",
|
||||
params,
|
||||
)
|
||||
params)
|
||||
cur.execute(query)
|
||||
|
|
|
|||
74
api/chalicelib/core/heatmaps.py
Normal file
74
api/chalicelib/core/heatmaps.py
Normal file
|
|
@ -0,0 +1,74 @@
|
|||
from chalicelib.utils import sql_helper as sh
|
||||
import schemas
|
||||
from chalicelib.utils import helper, pg_client
|
||||
|
||||
|
||||
def get_by_url(project_id, data: schemas.GetHeatmapPayloadSchema):
|
||||
args = {"startDate": data.startTimestamp, "endDate": data.endTimestamp,
|
||||
"project_id": project_id, "url": data.url}
|
||||
constraints = ["sessions.project_id = %(project_id)s",
|
||||
"(url = %(url)s OR path= %(url)s)",
|
||||
"clicks.timestamp >= %(startDate)s",
|
||||
"clicks.timestamp <= %(endDate)s",
|
||||
"start_ts >= %(startDate)s",
|
||||
"start_ts <= %(endDate)s",
|
||||
"duration IS NOT NULL"]
|
||||
query_from = "events.clicks INNER JOIN sessions USING (session_id)"
|
||||
q_count = "count(1) AS count"
|
||||
has_click_rage_filter = False
|
||||
if len(data.filters) > 0:
|
||||
for i, f in enumerate(data.filters):
|
||||
if f.type == schemas.FilterType.issue and len(f.value) > 0:
|
||||
has_click_rage_filter = True
|
||||
q_count = "max(real_count) AS count,TRUE AS click_rage"
|
||||
query_from += """INNER JOIN events_common.issues USING (timestamp, session_id)
|
||||
INNER JOIN issues AS mis USING (issue_id)
|
||||
INNER JOIN LATERAL (
|
||||
SELECT COUNT(1) AS real_count
|
||||
FROM events.clicks AS sc
|
||||
INNER JOIN sessions as ss USING (session_id)
|
||||
WHERE ss.project_id = 2
|
||||
AND (sc.url = %(url)s OR sc.path = %(url)s)
|
||||
AND sc.timestamp >= %(startDate)s
|
||||
AND sc.timestamp <= %(endDate)s
|
||||
AND ss.start_ts >= %(startDate)s
|
||||
AND ss.start_ts <= %(endDate)s
|
||||
AND sc.selector = clicks.selector) AS r_clicks ON (TRUE)"""
|
||||
constraints += ["mis.project_id = %(project_id)s",
|
||||
"issues.timestamp >= %(startDate)s",
|
||||
"issues.timestamp <= %(endDate)s"]
|
||||
f_k = f"issue_value{i}"
|
||||
args = {**args, **sh.multi_values(f.value, value_key=f_k)}
|
||||
constraints.append(sh.multi_conditions(f"%({f_k})s = ANY (issue_types)",
|
||||
f.value, value_key=f_k))
|
||||
constraints.append(sh.multi_conditions(f"mis.type = %({f_k})s",
|
||||
f.value, value_key=f_k))
|
||||
|
||||
if data.click_rage and not has_click_rage_filter:
|
||||
constraints.append("""(issues.session_id IS NULL
|
||||
OR (issues.timestamp >= %(startDate)s
|
||||
AND issues.timestamp <= %(endDate)s
|
||||
AND mis.project_id = %(project_id)s))""")
|
||||
q_count += ",COALESCE(bool_or(mis.type = 'click_rage'), FALSE) AS click_rage"
|
||||
query_from += """LEFT JOIN events_common.issues USING (timestamp, session_id)
|
||||
LEFT JOIN issues AS mis USING (issue_id)"""
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(f"""SELECT selector, {q_count}
|
||||
FROM {query_from}
|
||||
WHERE {" AND ".join(constraints)}
|
||||
GROUP BY selector
|
||||
LIMIT 500;""", args)
|
||||
# print("---------")
|
||||
# print(query.decode('UTF-8'))
|
||||
# print("---------")
|
||||
try:
|
||||
cur.execute(query)
|
||||
except Exception as err:
|
||||
print("--------- HEATMAP SEARCH QUERY EXCEPTION -----------")
|
||||
print(query.decode('UTF-8'))
|
||||
print("--------- PAYLOAD -----------")
|
||||
print(data)
|
||||
print("--------------------")
|
||||
raise err
|
||||
rows = cur.fetchall()
|
||||
return helper.list_to_camel_case(rows)
|
||||
86
api/chalicelib/core/integration_github.py
Normal file
86
api/chalicelib/core/integration_github.py
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
import schemas
|
||||
from chalicelib.core import integration_base
|
||||
from chalicelib.core.integration_github_issue import GithubIntegrationIssue
|
||||
from chalicelib.utils import pg_client, helper
|
||||
|
||||
PROVIDER = schemas.IntegrationType.github
|
||||
|
||||
|
||||
class GitHubIntegration(integration_base.BaseIntegration):
|
||||
|
||||
def __init__(self, tenant_id, user_id):
|
||||
self.__tenant_id = tenant_id
|
||||
super(GitHubIntegration, self).__init__(user_id=user_id, ISSUE_CLASS=GithubIntegrationIssue)
|
||||
|
||||
@property
|
||||
def provider(self):
|
||||
return PROVIDER
|
||||
|
||||
@property
|
||||
def issue_handler(self):
|
||||
return self._issue_handler
|
||||
|
||||
def get_obfuscated(self):
|
||||
integration = self.get()
|
||||
if integration is None:
|
||||
return None
|
||||
return {"token": helper.obfuscate(text=integration["token"]), "provider": self.provider.lower()}
|
||||
|
||||
def update(self, changes, obfuscate=False):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
sub_query = [f"{helper.key_to_snake_case(k)} = %({k})s" for k in changes.keys()]
|
||||
cur.execute(
|
||||
cur.mogrify(f"""\
|
||||
UPDATE public.oauth_authentication
|
||||
SET {','.join(sub_query)}
|
||||
WHERE user_id=%(user_id)s
|
||||
RETURNING token;""",
|
||||
{"user_id": self._user_id,
|
||||
**changes})
|
||||
)
|
||||
w = helper.dict_to_camel_case(cur.fetchone())
|
||||
if w and w.get("token") and obfuscate:
|
||||
w["token"] = helper.obfuscate(w["token"])
|
||||
return w
|
||||
|
||||
def _add(self, data):
|
||||
pass
|
||||
|
||||
def add(self, token, obfuscate=False):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
INSERT INTO public.oauth_authentication(user_id, provider, provider_user_id, token)
|
||||
VALUES(%(user_id)s, 'github', '', %(token)s)
|
||||
RETURNING token;""",
|
||||
{"user_id": self._user_id,
|
||||
"token": token})
|
||||
)
|
||||
w = helper.dict_to_camel_case(cur.fetchone())
|
||||
if w and w.get("token") and obfuscate:
|
||||
w["token"] = helper.obfuscate(w["token"])
|
||||
return w
|
||||
|
||||
# TODO: make a revoke token call
|
||||
def delete(self):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
DELETE FROM public.oauth_authentication
|
||||
WHERE user_id=%(user_id)s AND provider=%(provider)s;""",
|
||||
{"user_id": self._user_id, "provider": self.provider.lower()})
|
||||
)
|
||||
return {"state": "success"}
|
||||
|
||||
def add_edit(self, data: schemas.IssueTrackingGithubSchema):
|
||||
s = self.get()
|
||||
if s is not None:
|
||||
return self.update(
|
||||
changes={
|
||||
"token": data.token if len(data.token) > 0 and data.token.find("***") == -1 \
|
||||
else s.token
|
||||
},
|
||||
obfuscate=True
|
||||
)
|
||||
else:
|
||||
return self.add(token=data.token, obfuscate=True)
|
||||
100
api/chalicelib/core/integration_github_issue.py
Normal file
100
api/chalicelib/core/integration_github_issue.py
Normal file
|
|
@ -0,0 +1,100 @@
|
|||
from chalicelib.core.integration_base_issue import BaseIntegrationIssue
|
||||
from chalicelib.utils import github_client_v3
|
||||
from chalicelib.utils.github_client_v3 import github_formatters as formatter
|
||||
|
||||
|
||||
class GithubIntegrationIssue(BaseIntegrationIssue):
|
||||
def __init__(self, integration_token):
|
||||
self.__client = github_client_v3.githubV3Request(integration_token)
|
||||
super(GithubIntegrationIssue, self).__init__("GITHUB", integration_token)
|
||||
|
||||
def get_current_user(self):
|
||||
return formatter.user(self.__client.get("/user"))
|
||||
|
||||
def get_meta(self, repoId):
|
||||
current_user = self.get_current_user()
|
||||
try:
|
||||
users = self.__client.get(f"/repositories/{repoId}/collaborators")
|
||||
except Exception as e:
|
||||
users = []
|
||||
users = [formatter.user(u) for u in users]
|
||||
if current_user not in users:
|
||||
users.insert(0, current_user)
|
||||
meta = {
|
||||
'users': users,
|
||||
'issueTypes': [formatter.label(l) for l in
|
||||
self.__client.get(f"/repositories/{repoId}/labels")]
|
||||
}
|
||||
|
||||
return meta
|
||||
|
||||
def create_new_assignment(self, integration_project_id, title, description, assignee,
|
||||
issue_type):
|
||||
repoId = integration_project_id
|
||||
assignees = [assignee]
|
||||
labels = [str(issue_type)]
|
||||
|
||||
metas = self.get_meta(repoId)
|
||||
real_assignees = []
|
||||
for a in assignees:
|
||||
for u in metas["users"]:
|
||||
if a == str(u["id"]):
|
||||
real_assignees.append(u["name"])
|
||||
break
|
||||
real_labels = ["OpenReplay"]
|
||||
for l in labels:
|
||||
found = False
|
||||
for ll in metas["issueTypes"]:
|
||||
if l == str(ll["id"]):
|
||||
real_labels.append(ll["name"])
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
real_labels.append(l)
|
||||
issue = self.__client.post(f"/repositories/{repoId}/issues", body={"title": title, "body": description,
|
||||
"assignees": real_assignees,
|
||||
"labels": real_labels})
|
||||
return formatter.issue(issue)
|
||||
|
||||
def get_by_ids(self, saved_issues):
|
||||
results = []
|
||||
for i in saved_issues:
|
||||
results.append(self.get(integration_project_id=i["integrationProjectId"], assignment_id=i["id"]))
|
||||
return {"issues": results}
|
||||
|
||||
def get(self, integration_project_id, assignment_id):
|
||||
repoId = integration_project_id
|
||||
issueNumber = assignment_id
|
||||
issue = self.__client.get(f"/repositories/{repoId}/issues/{issueNumber}")
|
||||
issue = formatter.issue(issue)
|
||||
if issue["commentsCount"] > 0:
|
||||
issue["comments"] = [formatter.comment(c) for c in
|
||||
self.__client.get(f"/repositories/{repoId}/issues/{issueNumber}/comments")]
|
||||
return issue
|
||||
|
||||
def comment(self, integration_project_id, assignment_id, comment):
|
||||
repoId = integration_project_id
|
||||
issueNumber = assignment_id
|
||||
commentCreated = self.__client.post(f"/repositories/{repoId}/issues/{issueNumber}/comments",
|
||||
body={"body": comment})
|
||||
return formatter.comment(commentCreated)
|
||||
|
||||
def get_metas(self, integration_project_id):
|
||||
current_user = self.get_current_user()
|
||||
try:
|
||||
users = self.__client.get(f"/repositories/{integration_project_id}/collaborators")
|
||||
except Exception as e:
|
||||
users = []
|
||||
users = [formatter.user(u) for u in users]
|
||||
if current_user not in users:
|
||||
users.insert(0, current_user)
|
||||
|
||||
return {"provider": self.provider.lower(),
|
||||
'users': users,
|
||||
'issueTypes': [formatter.label(l) for l in
|
||||
self.__client.get(f"/repositories/{integration_project_id}/labels")]
|
||||
}
|
||||
|
||||
def get_projects(self):
|
||||
repos = self.__client.get("/user/repos")
|
||||
return [formatter.repo(r) for r in repos]
|
||||
132
api/chalicelib/core/integration_jira_cloud.py
Normal file
132
api/chalicelib/core/integration_jira_cloud.py
Normal file
|
|
@ -0,0 +1,132 @@
|
|||
import schemas
|
||||
from chalicelib.core import integration_base
|
||||
from chalicelib.core.integration_jira_cloud_issue import JIRACloudIntegrationIssue
|
||||
from chalicelib.utils import pg_client, helper
|
||||
|
||||
PROVIDER = schemas.IntegrationType.jira
|
||||
|
||||
|
||||
def obfuscate_string(string):
|
||||
return "*" * (len(string) - 4) + string[-4:]
|
||||
|
||||
|
||||
class JIRAIntegration(integration_base.BaseIntegration):
|
||||
def __init__(self, tenant_id, user_id):
|
||||
self.__tenant_id = tenant_id
|
||||
# TODO: enable super-constructor when OAuth is done
|
||||
# super(JIRAIntegration, self).__init__(jwt, user_id, JIRACloudIntegrationProxy)
|
||||
self._issue_handler = None
|
||||
self._user_id = user_id
|
||||
self.integration = self.get()
|
||||
|
||||
if self.integration is None:
|
||||
return
|
||||
self.integration["valid"] = True
|
||||
if not self.integration["url"].endswith('atlassian.net'):
|
||||
self.integration["valid"] = False
|
||||
|
||||
@property
|
||||
def provider(self):
|
||||
return PROVIDER
|
||||
|
||||
@property
|
||||
def issue_handler(self):
|
||||
if self.integration["url"].endswith('atlassian.net') and self._issue_handler is None:
|
||||
try:
|
||||
self._issue_handler = JIRACloudIntegrationIssue(token=self.integration["token"],
|
||||
username=self.integration["username"],
|
||||
url=self.integration["url"])
|
||||
except Exception as e:
|
||||
self._issue_handler = None
|
||||
self.integration["valid"] = False
|
||||
return self._issue_handler
|
||||
|
||||
# TODO: remove this once jira-oauth is done
|
||||
def get(self):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(
|
||||
"""SELECT username, token, url
|
||||
FROM public.jira_cloud
|
||||
WHERE user_id=%(user_id)s;""",
|
||||
{"user_id": self._user_id})
|
||||
)
|
||||
data = helper.dict_to_camel_case(cur.fetchone())
|
||||
|
||||
if data is None:
|
||||
return
|
||||
data["valid"] = True
|
||||
if not data["url"].endswith('atlassian.net'):
|
||||
data["valid"] = False
|
||||
return data
|
||||
|
||||
def get_obfuscated(self):
|
||||
if self.integration is None:
|
||||
return None
|
||||
integration = dict(self.integration)
|
||||
integration["token"] = obfuscate_string(self.integration["token"])
|
||||
integration["provider"] = self.provider.lower()
|
||||
return integration
|
||||
|
||||
def update(self, changes, obfuscate=False):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
sub_query = [f"{helper.key_to_snake_case(k)} = %({k})s" for k in changes.keys()]
|
||||
cur.execute(
|
||||
cur.mogrify(f"""\
|
||||
UPDATE public.jira_cloud
|
||||
SET {','.join(sub_query)}
|
||||
WHERE user_id=%(user_id)s
|
||||
RETURNING username, token, url;""",
|
||||
{"user_id": self._user_id,
|
||||
**changes})
|
||||
)
|
||||
w = helper.dict_to_camel_case(cur.fetchone())
|
||||
if obfuscate:
|
||||
w["token"] = obfuscate_string(w["token"])
|
||||
return self.get()
|
||||
|
||||
# TODO: make this generic for all issue tracking integrations
|
||||
def _add(self, data):
|
||||
print("a pretty defined abstract method")
|
||||
return
|
||||
|
||||
def add(self, username, token, url):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
INSERT INTO public.jira_cloud(username, token, user_id,url)
|
||||
VALUES (%(username)s, %(token)s, %(user_id)s,%(url)s)
|
||||
RETURNING username, token, url;""",
|
||||
{"user_id": self._user_id, "username": username,
|
||||
"token": token, "url": url})
|
||||
)
|
||||
w = helper.dict_to_camel_case(cur.fetchone())
|
||||
return self.get()
|
||||
|
||||
def delete(self):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
DELETE FROM public.jira_cloud
|
||||
WHERE user_id=%(user_id)s;""",
|
||||
{"user_id": self._user_id})
|
||||
)
|
||||
return {"state": "success"}
|
||||
|
||||
def add_edit(self, data: schemas.IssueTrackingJiraSchema):
|
||||
if self.integration is not None:
|
||||
return self.update(
|
||||
changes={
|
||||
"username": data.username,
|
||||
"token": data.token if len(data.token) > 0 and data.token.find("***") == -1 \
|
||||
else self.integration.token,
|
||||
"url": data.url
|
||||
},
|
||||
obfuscate=True
|
||||
)
|
||||
else:
|
||||
return self.add(
|
||||
username=data.username,
|
||||
token=data.token,
|
||||
url=str(data.url)
|
||||
)
|
||||
56
api/chalicelib/core/integration_jira_cloud_issue.py
Normal file
56
api/chalicelib/core/integration_jira_cloud_issue.py
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
from chalicelib.utils import jira_client
|
||||
from chalicelib.core.integration_base_issue import BaseIntegrationIssue
|
||||
|
||||
|
||||
class JIRACloudIntegrationIssue(BaseIntegrationIssue):
|
||||
def __init__(self, token, username, url):
|
||||
self.username = username
|
||||
self.url = url
|
||||
self._client = jira_client.JiraManager(self.url, self.username, token, None)
|
||||
super(JIRACloudIntegrationIssue, self).__init__("JIRA", token)
|
||||
|
||||
def create_new_assignment(self, integration_project_id, title, description, assignee, issue_type):
|
||||
self._client.set_jira_project_id(integration_project_id)
|
||||
data = {
|
||||
'summary': title,
|
||||
'description': description,
|
||||
'issuetype': {'id': issue_type},
|
||||
'assignee': {"id": assignee},
|
||||
"labels": ["OpenReplay"]
|
||||
}
|
||||
return self._client.create_issue(data)
|
||||
|
||||
def get_by_ids(self, saved_issues):
|
||||
projects_map = {}
|
||||
for i in saved_issues:
|
||||
if i["integrationProjectId"] not in projects_map.keys():
|
||||
projects_map[i["integrationProjectId"]] = []
|
||||
projects_map[i["integrationProjectId"]].append(i["id"])
|
||||
|
||||
results = []
|
||||
for integration_project_id in projects_map:
|
||||
self._client.set_jira_project_id(integration_project_id)
|
||||
jql = 'labels = OpenReplay'
|
||||
if len(projects_map[integration_project_id]) > 0:
|
||||
jql += f" AND ID IN ({','.join(projects_map[integration_project_id])})"
|
||||
issues = self._client.get_issues(jql, offset=0)
|
||||
results += issues
|
||||
return {"issues": results}
|
||||
|
||||
def get(self, integration_project_id, assignment_id):
|
||||
self._client.set_jira_project_id(integration_project_id)
|
||||
return self._client.get_issue_v3(assignment_id)
|
||||
|
||||
def comment(self, integration_project_id, assignment_id, comment):
|
||||
self._client.set_jira_project_id(integration_project_id)
|
||||
return self._client.add_comment_v3(assignment_id, comment)
|
||||
|
||||
def get_metas(self, integration_project_id):
|
||||
meta = {}
|
||||
self._client.set_jira_project_id(integration_project_id)
|
||||
meta['issueTypes'] = self._client.get_issue_types()
|
||||
meta['users'] = self._client.get_assignable_users()
|
||||
return {"provider": self.provider.lower(), **meta}
|
||||
|
||||
def get_projects(self):
|
||||
return self._client.get_projects()
|
||||
64
api/chalicelib/core/integrations_global.py
Normal file
64
api/chalicelib/core/integrations_global.py
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
import schemas
|
||||
from chalicelib.utils import pg_client
|
||||
|
||||
|
||||
def get_global_integrations_status(tenant_id, user_id, project_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(f"""\
|
||||
SELECT EXISTS((SELECT 1
|
||||
FROM public.oauth_authentication
|
||||
WHERE user_id = %(user_id)s
|
||||
AND provider = 'github')) AS {schemas.IntegrationType.github.value},
|
||||
EXISTS((SELECT 1
|
||||
FROM public.jira_cloud
|
||||
WHERE user_id = %(user_id)s)) AS {schemas.IntegrationType.jira.value},
|
||||
EXISTS((SELECT 1
|
||||
FROM public.integrations
|
||||
WHERE project_id=%(project_id)s
|
||||
AND provider='bugsnag')) AS {schemas.IntegrationType.bugsnag.value},
|
||||
EXISTS((SELECT 1
|
||||
FROM public.integrations
|
||||
WHERE project_id=%(project_id)s
|
||||
AND provider='cloudwatch')) AS {schemas.IntegrationType.cloudwatch.value},
|
||||
EXISTS((SELECT 1
|
||||
FROM public.integrations
|
||||
WHERE project_id=%(project_id)s
|
||||
AND provider='datadog')) AS {schemas.IntegrationType.datadog.value},
|
||||
EXISTS((SELECT 1
|
||||
FROM public.integrations
|
||||
WHERE project_id=%(project_id)s
|
||||
AND provider='newrelic')) AS {schemas.IntegrationType.newrelic.value},
|
||||
EXISTS((SELECT 1
|
||||
FROM public.integrations
|
||||
WHERE project_id=%(project_id)s
|
||||
AND provider='rollbar')) AS {schemas.IntegrationType.rollbar.value},
|
||||
EXISTS((SELECT 1
|
||||
FROM public.integrations
|
||||
WHERE project_id=%(project_id)s
|
||||
AND provider='sentry')) AS {schemas.IntegrationType.sentry.value},
|
||||
EXISTS((SELECT 1
|
||||
FROM public.integrations
|
||||
WHERE project_id=%(project_id)s
|
||||
AND provider='stackdriver')) AS {schemas.IntegrationType.stackdriver.value},
|
||||
EXISTS((SELECT 1
|
||||
FROM public.integrations
|
||||
WHERE project_id=%(project_id)s
|
||||
AND provider='sumologic')) AS {schemas.IntegrationType.sumologic.value},
|
||||
EXISTS((SELECT 1
|
||||
FROM public.integrations
|
||||
WHERE project_id=%(project_id)s
|
||||
AND provider='elasticsearch')) AS {schemas.IntegrationType.elasticsearch.value},
|
||||
EXISTS((SELECT 1
|
||||
FROM public.webhooks
|
||||
WHERE type='slack' AND deleted_at ISNULL)) AS {schemas.IntegrationType.slack.value},
|
||||
EXISTS((SELECT 1
|
||||
FROM public.webhooks
|
||||
WHERE type='msteams' AND deleted_at ISNULL)) AS {schemas.IntegrationType.ms_teams.value};""",
|
||||
{"user_id": user_id, "tenant_id": tenant_id, "project_id": project_id})
|
||||
)
|
||||
current_integrations = cur.fetchone()
|
||||
result = []
|
||||
for k in current_integrations.keys():
|
||||
result.append({"name": k, "integrated": current_integrations[k]})
|
||||
return result
|
||||
45
api/chalicelib/core/integrations_manager.py
Normal file
45
api/chalicelib/core/integrations_manager.py
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
from chalicelib.core import integration_github, integration_jira_cloud
|
||||
from chalicelib.utils import pg_client
|
||||
|
||||
SUPPORTED_TOOLS = [integration_github.PROVIDER, integration_jira_cloud.PROVIDER]
|
||||
|
||||
|
||||
def get_available_integrations(user_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(f"""\
|
||||
SELECT EXISTS((SELECT 1
|
||||
FROM public.oauth_authentication
|
||||
WHERE user_id = %(user_id)s
|
||||
AND provider = 'github')) AS github,
|
||||
EXISTS((SELECT 1
|
||||
FROM public.jira_cloud
|
||||
WHERE user_id = %(user_id)s)) AS jira;""",
|
||||
{"user_id": user_id})
|
||||
)
|
||||
current_integrations = cur.fetchone()
|
||||
return dict(current_integrations)
|
||||
|
||||
|
||||
def __get_default_integration(user_id):
|
||||
current_integrations = get_available_integrations(user_id)
|
||||
return integration_github.PROVIDER if current_integrations["github"] else integration_jira_cloud.PROVIDER if \
|
||||
current_integrations["jira"] else None
|
||||
|
||||
|
||||
def get_integration(tenant_id, user_id, tool=None, for_delete=False):
|
||||
if tool is None:
|
||||
tool = __get_default_integration(user_id=user_id)
|
||||
if tool is None:
|
||||
return {"errors": [f"no issue tracking tool found"]}, None
|
||||
tool = tool.upper()
|
||||
if tool not in SUPPORTED_TOOLS:
|
||||
return {"errors": [f"issue tracking tool not supported yet, available: {SUPPORTED_TOOLS}"]}, None
|
||||
if tool == integration_jira_cloud.PROVIDER:
|
||||
integration = integration_jira_cloud.JIRAIntegration(tenant_id=tenant_id, user_id=user_id)
|
||||
if not for_delete and integration.integration is not None and not integration.integration.get("valid", True):
|
||||
return {"errors": ["JIRA: connexion issue/unauthorized"]}, integration
|
||||
return None, integration
|
||||
elif tool == integration_github.PROVIDER:
|
||||
return None, integration_github.GitHubIntegration(tenant_id=tenant_id, user_id=user_id)
|
||||
return {"errors": ["lost integration"]}, None
|
||||
|
|
@ -1,86 +0,0 @@
|
|||
import schemas
|
||||
from chalicelib.core.issue_tracking import base
|
||||
from chalicelib.core.issue_tracking.github_issue import GithubIntegrationIssue
|
||||
from chalicelib.utils import pg_client, helper
|
||||
|
||||
PROVIDER = schemas.IntegrationType.GITHUB
|
||||
|
||||
|
||||
class GitHubIntegration(base.BaseIntegration):
|
||||
|
||||
def __init__(self, tenant_id, user_id):
|
||||
self.__tenant_id = tenant_id
|
||||
super(GitHubIntegration, self).__init__(user_id=user_id, ISSUE_CLASS=GithubIntegrationIssue)
|
||||
|
||||
@property
|
||||
def provider(self):
|
||||
return PROVIDER
|
||||
|
||||
@property
|
||||
def issue_handler(self):
|
||||
return self._issue_handler
|
||||
|
||||
def get_obfuscated(self):
|
||||
integration = self.get()
|
||||
if integration is None:
|
||||
return None
|
||||
return {"token": helper.obfuscate(text=integration["token"]), "provider": self.provider.lower()}
|
||||
|
||||
def update(self, changes, obfuscate=False):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
sub_query = [f"{helper.key_to_snake_case(k)} = %({k})s" for k in changes.keys()]
|
||||
cur.execute(
|
||||
cur.mogrify(f"""\
|
||||
UPDATE public.oauth_authentication
|
||||
SET {','.join(sub_query)}
|
||||
WHERE user_id=%(user_id)s
|
||||
RETURNING token;""",
|
||||
{"user_id": self._user_id,
|
||||
**changes})
|
||||
)
|
||||
w = helper.dict_to_camel_case(cur.fetchone())
|
||||
if w and w.get("token") and obfuscate:
|
||||
w["token"] = helper.obfuscate(w["token"])
|
||||
return w
|
||||
|
||||
def _add(self, data):
|
||||
pass
|
||||
|
||||
def add(self, token, obfuscate=False):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
INSERT INTO public.oauth_authentication(user_id, provider, provider_user_id, token)
|
||||
VALUES(%(user_id)s, 'github', '', %(token)s)
|
||||
RETURNING token;""",
|
||||
{"user_id": self._user_id,
|
||||
"token": token})
|
||||
)
|
||||
w = helper.dict_to_camel_case(cur.fetchone())
|
||||
if w and w.get("token") and obfuscate:
|
||||
w["token"] = helper.obfuscate(w["token"])
|
||||
return w
|
||||
|
||||
# TODO: make a revoke token call
|
||||
def delete(self):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
DELETE FROM public.oauth_authentication
|
||||
WHERE user_id=%(user_id)s AND provider=%(provider)s;""",
|
||||
{"user_id": self._user_id, "provider": self.provider.lower()})
|
||||
)
|
||||
return {"state": "success"}
|
||||
|
||||
def add_edit(self, data: schemas.IssueTrackingGithubSchema):
|
||||
s = self.get()
|
||||
if s is not None:
|
||||
return self.update(
|
||||
changes={
|
||||
"token": data.token if len(data.token) > 0 and data.token.find("***") == -1 \
|
||||
else s.token
|
||||
},
|
||||
obfuscate=True
|
||||
)
|
||||
else:
|
||||
return self.add(token=data.token, obfuscate=True)
|
||||
|
|
@ -1,100 +0,0 @@
|
|||
from chalicelib.core.issue_tracking.base_issue import BaseIntegrationIssue
|
||||
from chalicelib.utils import github_client_v3
|
||||
from chalicelib.utils.github_client_v3 import github_formatters as formatter
|
||||
|
||||
|
||||
class GithubIntegrationIssue(BaseIntegrationIssue):
|
||||
def __init__(self, token):
|
||||
self.__client = github_client_v3.githubV3Request(token)
|
||||
super(GithubIntegrationIssue, self).__init__("GITHUB", token)
|
||||
|
||||
def get_current_user(self):
|
||||
return formatter.user(self.__client.get("/user"))
|
||||
|
||||
def get_meta(self, repoId):
|
||||
current_user = self.get_current_user()
|
||||
try:
|
||||
users = self.__client.get(f"/repositories/{repoId}/collaborators")
|
||||
except Exception as e:
|
||||
users = []
|
||||
users = [formatter.user(u) for u in users]
|
||||
if current_user not in users:
|
||||
users.insert(0, current_user)
|
||||
meta = {
|
||||
'users': users,
|
||||
'issueTypes': [formatter.label(l) for l in
|
||||
self.__client.get(f"/repositories/{repoId}/labels")]
|
||||
}
|
||||
|
||||
return meta
|
||||
|
||||
def create_new_assignment(self, project_id, title, description, assignee,
|
||||
issue_type):
|
||||
repoId = project_id
|
||||
assignees = [assignee]
|
||||
labels = [str(issue_type)]
|
||||
|
||||
metas = self.get_meta(repoId)
|
||||
real_assignees = []
|
||||
for a in assignees:
|
||||
for u in metas["users"]:
|
||||
if a == str(u["id"]):
|
||||
real_assignees.append(u["name"])
|
||||
break
|
||||
real_labels = ["OpenReplay"]
|
||||
for l in labels:
|
||||
found = False
|
||||
for ll in metas["issueTypes"]:
|
||||
if l == str(ll["id"]):
|
||||
real_labels.append(ll["name"])
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
real_labels.append(l)
|
||||
issue = self.__client.post(f"/repositories/{repoId}/issues", body={"title": title, "body": description,
|
||||
"assignees": real_assignees,
|
||||
"labels": real_labels})
|
||||
return formatter.issue(issue)
|
||||
|
||||
def get_by_ids(self, saved_issues):
|
||||
results = []
|
||||
for i in saved_issues:
|
||||
results.append(self.get(project_id=i["integrationProjectId"], assignment_id=i["id"]))
|
||||
return {"issues": results}
|
||||
|
||||
def get(self, project_id, assignment_id):
|
||||
repoId = project_id
|
||||
issueNumber = assignment_id
|
||||
issue = self.__client.get(f"/repositories/{repoId}/issues/{issueNumber}")
|
||||
issue = formatter.issue(issue)
|
||||
if issue["commentsCount"] > 0:
|
||||
issue["comments"] = [formatter.comment(c) for c in
|
||||
self.__client.get(f"/repositories/{repoId}/issues/{issueNumber}/comments")]
|
||||
return issue
|
||||
|
||||
def comment(self, project_id, assignment_id, comment):
|
||||
repoId = project_id
|
||||
issueNumber = assignment_id
|
||||
commentCreated = self.__client.post(f"/repositories/{repoId}/issues/{issueNumber}/comments",
|
||||
body={"body": comment})
|
||||
return formatter.comment(commentCreated)
|
||||
|
||||
def get_metas(self, project_id):
|
||||
current_user = self.get_current_user()
|
||||
try:
|
||||
users = self.__client.get(f"/repositories/{project_id}/collaborators")
|
||||
except Exception as e:
|
||||
users = []
|
||||
users = [formatter.user(u) for u in users]
|
||||
if current_user not in users:
|
||||
users.insert(0, current_user)
|
||||
|
||||
return {"provider": self.provider.lower(),
|
||||
'users': users,
|
||||
'issueTypes': [formatter.label(l) for l in
|
||||
self.__client.get(f"/repositories/{project_id}/labels")]
|
||||
}
|
||||
|
||||
def get_projects(self):
|
||||
repos = self.__client.get("/user/repos")
|
||||
return [formatter.repo(r) for r in repos]
|
||||
|
|
@ -1,68 +0,0 @@
|
|||
import schemas
|
||||
from chalicelib.core.modules import TENANT_CONDITION
|
||||
from chalicelib.utils import pg_client
|
||||
|
||||
|
||||
def get_global_integrations_status(tenant_id, user_id, project_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(f"""\
|
||||
SELECT EXISTS((SELECT 1
|
||||
FROM public.oauth_authentication
|
||||
WHERE user_id = %(user_id)s
|
||||
AND provider = 'github')) AS {schemas.IntegrationType.GITHUB.value},
|
||||
EXISTS((SELECT 1
|
||||
FROM public.jira_cloud
|
||||
WHERE user_id = %(user_id)s)) AS {schemas.IntegrationType.JIRA.value},
|
||||
EXISTS((SELECT 1
|
||||
FROM public.integrations
|
||||
WHERE project_id=%(project_id)s
|
||||
AND provider='bugsnag')) AS {schemas.IntegrationType.BUGSNAG.value},
|
||||
EXISTS((SELECT 1
|
||||
FROM public.integrations
|
||||
WHERE project_id=%(project_id)s
|
||||
AND provider='cloudwatch')) AS {schemas.IntegrationType.CLOUDWATCH.value},
|
||||
EXISTS((SELECT 1
|
||||
FROM public.integrations
|
||||
WHERE project_id=%(project_id)s
|
||||
AND provider='datadog')) AS {schemas.IntegrationType.DATADOG.value},
|
||||
EXISTS((SELECT 1
|
||||
FROM public.integrations
|
||||
WHERE project_id=%(project_id)s
|
||||
AND provider='newrelic')) AS {schemas.IntegrationType.NEWRELIC.value},
|
||||
EXISTS((SELECT 1
|
||||
FROM public.integrations
|
||||
WHERE project_id=%(project_id)s
|
||||
AND provider='rollbar')) AS {schemas.IntegrationType.ROLLBAR.value},
|
||||
EXISTS((SELECT 1
|
||||
FROM public.integrations
|
||||
WHERE project_id=%(project_id)s
|
||||
AND provider='sentry')) AS {schemas.IntegrationType.SENTRY.value},
|
||||
EXISTS((SELECT 1
|
||||
FROM public.integrations
|
||||
WHERE project_id=%(project_id)s
|
||||
AND provider='stackdriver')) AS {schemas.IntegrationType.STACKDRIVER.value},
|
||||
EXISTS((SELECT 1
|
||||
FROM public.integrations
|
||||
WHERE project_id=%(project_id)s
|
||||
AND provider='sumologic')) AS {schemas.IntegrationType.SUMOLOGIC.value},
|
||||
EXISTS((SELECT 1
|
||||
FROM public.integrations
|
||||
WHERE project_id=%(project_id)s
|
||||
AND provider='elasticsearch')) AS {schemas.IntegrationType.ELASTICSEARCH.value},
|
||||
EXISTS((SELECT 1
|
||||
FROM public.webhooks
|
||||
WHERE type='slack' AND deleted_at ISNULL AND {TENANT_CONDITION})) AS {schemas.IntegrationType.SLACK.value},
|
||||
EXISTS((SELECT 1
|
||||
FROM public.webhooks
|
||||
WHERE type='msteams' AND deleted_at ISNULL AND {TENANT_CONDITION})) AS {schemas.IntegrationType.MS_TEAMS.value},
|
||||
EXISTS((SELECT 1
|
||||
FROM public.integrations
|
||||
WHERE project_id=%(project_id)s AND provider='dynatrace')) AS {schemas.IntegrationType.DYNATRACE.value};""",
|
||||
{"user_id": user_id, "tenant_id": tenant_id, "project_id": project_id})
|
||||
)
|
||||
current_integrations = cur.fetchone()
|
||||
result = []
|
||||
for k in current_integrations.keys():
|
||||
result.append({"name": k, "integrated": current_integrations[k]})
|
||||
return result
|
||||
|
|
@ -1,45 +0,0 @@
|
|||
from chalicelib.core.issue_tracking import github, jira_cloud
|
||||
from chalicelib.utils import pg_client
|
||||
|
||||
SUPPORTED_TOOLS = [github.PROVIDER, jira_cloud.PROVIDER]
|
||||
|
||||
|
||||
def get_available_integrations(user_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(f"""\
|
||||
SELECT EXISTS((SELECT 1
|
||||
FROM public.oauth_authentication
|
||||
WHERE user_id = %(user_id)s
|
||||
AND provider = 'github')) AS github,
|
||||
EXISTS((SELECT 1
|
||||
FROM public.jira_cloud
|
||||
WHERE user_id = %(user_id)s)) AS jira;""",
|
||||
{"user_id": user_id})
|
||||
)
|
||||
current_integrations = cur.fetchone()
|
||||
return dict(current_integrations)
|
||||
|
||||
|
||||
def __get_default_integration(user_id):
|
||||
current_integrations = get_available_integrations(user_id)
|
||||
return github.PROVIDER if current_integrations["github"] else jira_cloud.PROVIDER if \
|
||||
current_integrations["jira"] else None
|
||||
|
||||
|
||||
def get_integration(tenant_id, user_id, tool=None, for_delete=False):
|
||||
if tool is None:
|
||||
tool = __get_default_integration(user_id=user_id)
|
||||
if tool is None:
|
||||
return {"errors": [f"no issue tracking tool found"]}, None
|
||||
tool = tool.upper()
|
||||
if tool not in SUPPORTED_TOOLS:
|
||||
return {"errors": [f"issue tracking tool not supported yet, available: {SUPPORTED_TOOLS}"]}, None
|
||||
if tool == jira_cloud.PROVIDER:
|
||||
integration = jira_cloud.JIRAIntegration(tenant_id=tenant_id, user_id=user_id)
|
||||
if not for_delete and integration.integration is not None and not integration.integration.get("valid", True):
|
||||
return {"errors": ["JIRA: connexion issue/unauthorized"]}, integration
|
||||
return None, integration
|
||||
elif tool == github.PROVIDER:
|
||||
return None, github.GitHubIntegration(tenant_id=tenant_id, user_id=user_id)
|
||||
return {"errors": ["lost integration"]}, None
|
||||
|
|
@ -1,139 +0,0 @@
|
|||
import schemas
|
||||
from chalicelib.core.issue_tracking import base
|
||||
from chalicelib.core.issue_tracking.jira_cloud_issue import JIRACloudIntegrationIssue
|
||||
from chalicelib.utils import pg_client, helper
|
||||
|
||||
PROVIDER = schemas.IntegrationType.JIRA
|
||||
|
||||
|
||||
def obfuscate_string(string):
|
||||
return "*" * (len(string) - 4) + string[-4:]
|
||||
|
||||
|
||||
class JIRAIntegration(base.BaseIntegration):
|
||||
def __init__(self, tenant_id, user_id):
|
||||
self.__tenant_id = tenant_id
|
||||
# TODO: enable super-constructor when OAuth is done
|
||||
# super(JIRAIntegration, self).__init__(jwt, user_id, JIRACloudIntegrationProxy)
|
||||
self._issue_handler = None
|
||||
self._user_id = user_id
|
||||
self.integration = self.get()
|
||||
|
||||
@staticmethod
|
||||
def __validate(data):
|
||||
data["valid"] = JIRAIntegration.__is_valid_url(data["url"])
|
||||
|
||||
@staticmethod
|
||||
def __is_valid_url(url):
|
||||
return url.endswith('atlassian.net') or url.endswith('atlassian.net/')
|
||||
|
||||
@property
|
||||
def provider(self):
|
||||
return PROVIDER
|
||||
|
||||
@property
|
||||
def issue_handler(self):
|
||||
if JIRAIntegration.__is_valid_url(self.integration["url"]) and self._issue_handler is None:
|
||||
try:
|
||||
self._issue_handler = JIRACloudIntegrationIssue(token=self.integration["token"],
|
||||
username=self.integration["username"],
|
||||
url=self.integration["url"])
|
||||
except Exception as e:
|
||||
self._issue_handler = None
|
||||
self.integration["valid"] = False
|
||||
return {"errors": ["Something went wrong, please check your JIRA credentials."]}
|
||||
return self._issue_handler
|
||||
|
||||
# TODO: remove this once jira-oauth is done
|
||||
def get(self):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(
|
||||
"""SELECT username, token, url
|
||||
FROM public.jira_cloud
|
||||
WHERE user_id = %(user_id)s;""",
|
||||
{"user_id": self._user_id})
|
||||
)
|
||||
data = helper.dict_to_camel_case(cur.fetchone())
|
||||
|
||||
if data is None:
|
||||
return
|
||||
JIRAIntegration.__validate(data)
|
||||
return data
|
||||
|
||||
def get_obfuscated(self):
|
||||
if self.integration is None:
|
||||
return None
|
||||
integration = dict(self.integration)
|
||||
integration["token"] = obfuscate_string(self.integration["token"])
|
||||
integration["provider"] = self.provider.lower()
|
||||
return integration
|
||||
|
||||
def update(self, changes, obfuscate=False):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
sub_query = [f"{helper.key_to_snake_case(k)} = %({k})s" for k in changes.keys()]
|
||||
cur.execute(
|
||||
cur.mogrify(f"""\
|
||||
UPDATE public.jira_cloud
|
||||
SET {','.join(sub_query)}
|
||||
WHERE user_id=%(user_id)s
|
||||
RETURNING username, token, url;""",
|
||||
{"user_id": self._user_id,
|
||||
**changes})
|
||||
)
|
||||
w = helper.dict_to_camel_case(cur.fetchone())
|
||||
JIRAIntegration.__validate(w)
|
||||
if obfuscate:
|
||||
w["token"] = obfuscate_string(w["token"])
|
||||
return w
|
||||
|
||||
# TODO: make this generic for all issue tracking integrations
|
||||
def _add(self, data):
|
||||
print("a pretty defined abstract method")
|
||||
return
|
||||
|
||||
def add(self, username, token, url, obfuscate=False):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(""" \
|
||||
INSERT INTO public.jira_cloud(username, token, user_id, url)
|
||||
VALUES (%(username)s, %(token)s, %(user_id)s, %(url)s) RETURNING username, token, url;""",
|
||||
{"user_id": self._user_id, "username": username,
|
||||
"token": token, "url": url})
|
||||
)
|
||||
w = helper.dict_to_camel_case(cur.fetchone())
|
||||
JIRAIntegration.__validate(w)
|
||||
if obfuscate:
|
||||
w["token"] = obfuscate_string(w["token"])
|
||||
|
||||
return w
|
||||
|
||||
def delete(self):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(""" \
|
||||
DELETE
|
||||
FROM public.jira_cloud
|
||||
WHERE user_id = %(user_id)s;""",
|
||||
{"user_id": self._user_id})
|
||||
)
|
||||
return {"state": "success"}
|
||||
|
||||
def add_edit(self, data: schemas.IssueTrackingJiraSchema):
|
||||
if self.integration is not None:
|
||||
return self.update(
|
||||
changes={
|
||||
"username": data.username,
|
||||
"token": data.token if len(data.token) > 0 and data.token.find("***") == -1 \
|
||||
else self.integration["token"],
|
||||
"url": str(data.url)
|
||||
},
|
||||
obfuscate=True
|
||||
)
|
||||
else:
|
||||
return self.add(
|
||||
username=data.username,
|
||||
token=data.token,
|
||||
url=str(data.url),
|
||||
obfuscate=True
|
||||
)
|
||||
|
|
@ -1,56 +0,0 @@
|
|||
from chalicelib.utils import jira_client
|
||||
from chalicelib.core.issue_tracking.base_issue import BaseIntegrationIssue
|
||||
|
||||
|
||||
class JIRACloudIntegrationIssue(BaseIntegrationIssue):
|
||||
def __init__(self, token, username, url):
|
||||
self.username = username
|
||||
self.url = url
|
||||
self._client = jira_client.JiraManager(self.url, self.username, token, None)
|
||||
super(JIRACloudIntegrationIssue, self).__init__("JIRA", token)
|
||||
|
||||
def create_new_assignment(self, project_id, title, description, assignee, issue_type):
|
||||
self._client.set_jira_project_id(project_id)
|
||||
data = {
|
||||
'summary': title,
|
||||
'description': description,
|
||||
'issuetype': {'id': issue_type},
|
||||
'assignee': {"id": assignee},
|
||||
"labels": ["OpenReplay"]
|
||||
}
|
||||
return self._client.create_issue(data)
|
||||
|
||||
def get_by_ids(self, saved_issues):
|
||||
projects_map = {}
|
||||
for i in saved_issues:
|
||||
if i["integrationProjectId"] not in projects_map.keys():
|
||||
projects_map[i["integrationProjectId"]] = []
|
||||
projects_map[i["integrationProjectId"]].append(i["id"])
|
||||
|
||||
results = []
|
||||
for project_id in projects_map:
|
||||
self._client.set_jira_project_id(project_id)
|
||||
jql = 'labels = OpenReplay'
|
||||
if len(projects_map[project_id]) > 0:
|
||||
jql += f" AND ID IN ({','.join(projects_map[project_id])})"
|
||||
issues = self._client.get_issues(jql, offset=0)
|
||||
results += issues
|
||||
return {"issues": results}
|
||||
|
||||
def get(self, project_id, assignment_id):
|
||||
self._client.set_jira_project_id(project_id)
|
||||
return self._client.get_issue_v3(assignment_id)
|
||||
|
||||
def comment(self, project_id, assignment_id, comment):
|
||||
self._client.set_jira_project_id(project_id)
|
||||
return self._client.add_comment_v3(assignment_id, comment)
|
||||
|
||||
def get_metas(self, project_id):
|
||||
meta = {}
|
||||
self._client.set_jira_project_id(project_id)
|
||||
meta['issueTypes'] = self._client.get_issue_types()
|
||||
meta['users'] = self._client.get_assignable_users()
|
||||
return {"provider": self.provider.lower(), **meta}
|
||||
|
||||
def get_projects(self):
|
||||
return self._client.get_projects()
|
||||
|
|
@ -1,5 +1,32 @@
|
|||
from chalicelib.utils import pg_client, helper
|
||||
|
||||
ISSUE_TYPES = ['click_rage', 'dead_click', 'excessive_scrolling', 'bad_request', 'missing_resource', 'memory', 'cpu',
|
||||
'slow_resource', 'slow_page_load', 'crash', 'ml_cpu', 'ml_memory', 'ml_dead_click', 'ml_click_rage',
|
||||
'ml_mouse_thrashing', 'ml_excessive_scrolling', 'ml_slow_resources', 'custom', 'js_exception',
|
||||
'custom_event_error', 'js_error']
|
||||
ORDER_QUERY = """\
|
||||
(CASE WHEN type = 'js_exception' THEN 0
|
||||
WHEN type = 'bad_request' THEN 1
|
||||
WHEN type = 'missing_resource' THEN 2
|
||||
WHEN type = 'click_rage' THEN 3
|
||||
WHEN type = 'dead_click' THEN 4
|
||||
WHEN type = 'memory' THEN 5
|
||||
WHEN type = 'cpu' THEN 6
|
||||
WHEN type = 'crash' THEN 7
|
||||
ELSE -1 END)::INTEGER
|
||||
"""
|
||||
NAME_QUERY = """\
|
||||
(CASE WHEN type = 'js_exception' THEN 'Errors'
|
||||
WHEN type = 'bad_request' THEN 'Bad Requests'
|
||||
WHEN type = 'missing_resource' THEN 'Missing Images'
|
||||
WHEN type = 'click_rage' THEN 'Click Rage'
|
||||
WHEN type = 'dead_click' THEN 'Dead Clicks'
|
||||
WHEN type = 'memory' THEN 'High Memory'
|
||||
WHEN type = 'cpu' THEN 'High CPU'
|
||||
WHEN type = 'crash' THEN 'Crashes'
|
||||
ELSE type::text END)::text
|
||||
"""
|
||||
|
||||
|
||||
def get(project_id, issue_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
|
|
@ -35,6 +62,20 @@ def get_by_session_id(session_id, project_id, issue_type=None):
|
|||
return helper.list_to_camel_case(cur.fetchall())
|
||||
|
||||
|
||||
def get_types_by_project(project_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(f"""SELECT type,
|
||||
{ORDER_QUERY}>=0 AS visible,
|
||||
{ORDER_QUERY} AS order,
|
||||
{NAME_QUERY} AS name
|
||||
FROM (SELECT DISTINCT type
|
||||
FROM public.issues
|
||||
WHERE project_id = %(project_id)s) AS types
|
||||
ORDER BY "order";""", {"project_id": project_id}))
|
||||
return helper.list_to_camel_case(cur.fetchall())
|
||||
|
||||
|
||||
def get_all_types():
|
||||
return [
|
||||
{
|
||||
|
|
|
|||
|
|
@ -1,10 +1,6 @@
|
|||
import logging
|
||||
|
||||
from chalicelib.core.sessions import sessions_mobs, sessions_devtool
|
||||
from chalicelib.utils import pg_client, helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
from chalicelib.core import sessions_mobs, sessions_devtool
|
||||
|
||||
|
||||
class Actions:
|
||||
|
|
@ -154,23 +150,23 @@ def get_scheduled_jobs():
|
|||
def execute_jobs():
|
||||
jobs = get_scheduled_jobs()
|
||||
for job in jobs:
|
||||
logger.info(f"Executing jobId:{job['jobId']}")
|
||||
print(f"Executing jobId:{job['jobId']}")
|
||||
try:
|
||||
if job["action"] == Actions.DELETE_USER_DATA:
|
||||
session_ids = __get_session_ids_by_user_ids(project_id=job["projectId"],
|
||||
user_ids=[job["referenceId"]])
|
||||
if len(session_ids) > 0:
|
||||
logger.info(f"Deleting {len(session_ids)} sessions")
|
||||
print(f"Deleting {len(session_ids)} sessions")
|
||||
__delete_sessions_by_session_ids(session_ids=session_ids)
|
||||
__delete_session_mobs_by_session_ids(session_ids=session_ids, project_id=job["projectId"])
|
||||
else:
|
||||
raise Exception(f"The action '{job['action']}' not supported.")
|
||||
|
||||
job["status"] = JobStatus.COMPLETED
|
||||
logger.info(f"Job completed {job['jobId']}")
|
||||
print(f"Job completed {job['jobId']}")
|
||||
except Exception as e:
|
||||
job["status"] = JobStatus.FAILED
|
||||
job["errors"] = str(e)
|
||||
logger.error(f"Job failed {job['jobId']}")
|
||||
print(f"Job failed {job['jobId']}")
|
||||
|
||||
update(job["jobId"], job)
|
||||
|
|
|
|||
75
api/chalicelib/core/log_tool_bugsnag.py
Normal file
75
api/chalicelib/core/log_tool_bugsnag.py
Normal file
|
|
@ -0,0 +1,75 @@
|
|||
from chalicelib.core import log_tools
|
||||
import requests
|
||||
|
||||
from schemas import schemas
|
||||
|
||||
IN_TY = "bugsnag"
|
||||
|
||||
|
||||
def list_projects(auth_token):
|
||||
r = requests.get(url="https://api.bugsnag.com/user/organizations",
|
||||
params={"per_page": "100"},
|
||||
headers={"Authorization": "token " + auth_token, "X-Version": "2"})
|
||||
if r.status_code != 200:
|
||||
print("=======> bugsnag get organizations: something went wrong")
|
||||
print(r)
|
||||
print(r.status_code)
|
||||
print(r.text)
|
||||
return []
|
||||
|
||||
orgs = []
|
||||
for i in r.json():
|
||||
|
||||
pr = requests.get(url="https://api.bugsnag.com/organizations/%s/projects" % i["id"],
|
||||
params={"per_page": "100"},
|
||||
headers={"Authorization": "token " + auth_token, "X-Version": "2"})
|
||||
if pr.status_code != 200:
|
||||
print("=======> bugsnag get projects: something went wrong")
|
||||
print(pr)
|
||||
print(r.status_code)
|
||||
print(r.text)
|
||||
continue
|
||||
orgs.append({"name": i["name"], "projects": [{"name": p["name"], "id": p["id"]} for p in pr.json()]})
|
||||
return orgs
|
||||
|
||||
|
||||
def get_all(tenant_id):
|
||||
return log_tools.get_all_by_tenant(tenant_id=tenant_id, integration=IN_TY)
|
||||
|
||||
|
||||
def get(project_id):
|
||||
return log_tools.get(project_id=project_id, integration=IN_TY)
|
||||
|
||||
|
||||
def update(tenant_id, project_id, changes):
|
||||
options = {}
|
||||
if "authorizationToken" in changes:
|
||||
options["authorizationToken"] = changes.pop("authorizationToken")
|
||||
if "bugsnagProjectId" in changes:
|
||||
options["bugsnagProjectId"] = changes.pop("bugsnagProjectId")
|
||||
return log_tools.edit(project_id=project_id, integration=IN_TY, changes=options)
|
||||
|
||||
|
||||
def add(tenant_id, project_id, authorization_token, bugsnag_project_id):
|
||||
options = {
|
||||
"bugsnagProjectId": bugsnag_project_id,
|
||||
"authorizationToken": authorization_token,
|
||||
}
|
||||
return log_tools.add(project_id=project_id, integration=IN_TY, options=options)
|
||||
|
||||
|
||||
def delete(tenant_id, project_id):
|
||||
return log_tools.delete(project_id=project_id, integration=IN_TY)
|
||||
|
||||
|
||||
def add_edit(tenant_id, project_id, data:schemas.IntegrationBugsnagSchema ):
|
||||
s = get(project_id)
|
||||
if s is not None:
|
||||
return update(tenant_id=tenant_id, project_id=project_id,
|
||||
changes={"authorizationToken": data.authorization_token,
|
||||
"bugsnagProjectId": data.bugsnag_project_id})
|
||||
else:
|
||||
return add(tenant_id=tenant_id,
|
||||
project_id=project_id,
|
||||
authorization_token=data.authorization_token,
|
||||
bugsnag_project_id=data.bugsnag_project_id)
|
||||
120
api/chalicelib/core/log_tool_cloudwatch.py
Normal file
120
api/chalicelib/core/log_tool_cloudwatch.py
Normal file
|
|
@ -0,0 +1,120 @@
|
|||
import boto3
|
||||
from chalicelib.core import log_tools
|
||||
from schemas import schemas
|
||||
|
||||
IN_TY = "cloudwatch"
|
||||
|
||||
|
||||
def __find_groups(client, token):
|
||||
d_args = {
|
||||
"limit": 50
|
||||
}
|
||||
if token is not None:
|
||||
d_args["nextToken"] = token
|
||||
response = client.describe_log_groups(**d_args)
|
||||
response["logGroups"] = [i['logGroupName'] for i in response["logGroups"]]
|
||||
if "nextToken" not in response:
|
||||
return response["logGroups"]
|
||||
|
||||
return response["logGroups"] + __find_groups(client, response["nextToken"])
|
||||
|
||||
|
||||
def __make_stream_filter(start_time, end_time):
|
||||
def __valid_stream(stream):
|
||||
return "firstEventTimestamp" in stream and not (
|
||||
stream['firstEventTimestamp'] <= start_time and stream["lastEventTimestamp"] <= start_time
|
||||
or stream['firstEventTimestamp'] >= end_time and stream["lastEventTimestamp"] >= end_time
|
||||
)
|
||||
|
||||
return __valid_stream
|
||||
|
||||
|
||||
def __find_streams(project_id, log_group, client, token, stream_filter):
|
||||
d_args = {"logGroupName": log_group, "orderBy": 'LastEventTime', 'limit': 50}
|
||||
if token is not None and len(token) > 0:
|
||||
d_args["nextToken"] = token
|
||||
data = client.describe_log_streams(**d_args)
|
||||
streams = list(filter(stream_filter, data['logStreams']))
|
||||
if 'nextToken' not in data:
|
||||
save_new_token(project_id=project_id, token=token)
|
||||
return streams
|
||||
return streams + __find_streams(project_id, log_group, client, data['nextToken'], stream_filter)
|
||||
|
||||
|
||||
def __find_events(client, log_group, streams, last_token, start_time, end_time):
|
||||
f_args = {
|
||||
"logGroupName": log_group,
|
||||
"logStreamNames": streams,
|
||||
"startTime": start_time,
|
||||
"endTime": end_time,
|
||||
"limit": 10000,
|
||||
"filterPattern": "openreplay_session_id"
|
||||
}
|
||||
if last_token is not None:
|
||||
f_args["nextToken"] = last_token
|
||||
response = client.filter_log_events(
|
||||
**f_args
|
||||
)
|
||||
if "nextToken" not in response:
|
||||
return response["events"]
|
||||
|
||||
return response["events"] + __find_events(client, log_group, streams, response["nextToken"], start_time, end_time)
|
||||
|
||||
|
||||
def list_log_groups(aws_access_key_id, aws_secret_access_key, region):
|
||||
logs = boto3.client('logs', aws_access_key_id=aws_access_key_id,
|
||||
aws_secret_access_key=aws_secret_access_key,
|
||||
region_name=region
|
||||
)
|
||||
return __find_groups(logs, None)
|
||||
|
||||
|
||||
def get_all(tenant_id):
|
||||
return log_tools.get_all_by_tenant(tenant_id=tenant_id, integration=IN_TY)
|
||||
|
||||
|
||||
def get(project_id):
|
||||
return log_tools.get(project_id=project_id, integration=IN_TY)
|
||||
|
||||
|
||||
def update(tenant_id, project_id, changes):
|
||||
options = {}
|
||||
if "authorization_token" in changes:
|
||||
options["authorization_token"] = changes.pop("authorization_token")
|
||||
if "project_id" in changes:
|
||||
options["project_id"] = changes.pop("project_id")
|
||||
if len(options.keys()) > 0:
|
||||
changes["options"] = options
|
||||
return log_tools.edit(project_id=project_id, integration=IN_TY, changes=changes)
|
||||
|
||||
|
||||
def add(tenant_id, project_id, aws_access_key_id, aws_secret_access_key, log_group_name, region):
|
||||
return log_tools.add(project_id=project_id, integration=IN_TY,
|
||||
options={"awsAccessKeyId": aws_access_key_id,
|
||||
"awsSecretAccessKey": aws_secret_access_key,
|
||||
"logGroupName": log_group_name, "region": region})
|
||||
|
||||
|
||||
def save_new_token(project_id, token):
|
||||
update(tenant_id=None, project_id=project_id, changes={"last_token": token})
|
||||
|
||||
|
||||
def delete(tenant_id, project_id):
|
||||
return log_tools.delete(project_id=project_id, integration=IN_TY)
|
||||
|
||||
|
||||
def add_edit(tenant_id, project_id, data: schemas.IntegrationCloudwatchSchema):
|
||||
s = get(project_id)
|
||||
if s is not None:
|
||||
return update(tenant_id=tenant_id, project_id=project_id,
|
||||
changes={"awsAccessKeyId": data.aws_access_key_id,
|
||||
"awsSecretAccessKey": data.aws_secret_access_key,
|
||||
"logGroupName": data.log_group_name,
|
||||
"region": data.region})
|
||||
else:
|
||||
return add(tenant_id=tenant_id,
|
||||
project_id=project_id,
|
||||
aws_access_key_id=data.aws_access_key_id,
|
||||
aws_secret_access_key=data.aws_secret_access_key,
|
||||
log_group_name=data.log_group_name,
|
||||
region=data.region)
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue