Merge branch 'dev' into player-ref-ph3
This commit is contained in:
commit
5a966ca3de
176 changed files with 4387 additions and 1915 deletions
215
.github/workflows/alerts-ee.yaml
vendored
215
.github/workflows/alerts-ee.yaml
vendored
|
|
@ -10,8 +10,21 @@ on:
|
|||
branches:
|
||||
- api-v1.10.0
|
||||
paths:
|
||||
- ee/api/**
|
||||
- api/**
|
||||
- "ee/api/**"
|
||||
- "api/**"
|
||||
- "!api/.gitignore"
|
||||
- "!api/routers"
|
||||
- "!api/app.py"
|
||||
- "!api/*-dev.sh"
|
||||
- "!api/requirements.txt"
|
||||
- "!api/requirements-crons.txt"
|
||||
- "!ee/api/.gitignore"
|
||||
- "!ee/api/routers"
|
||||
- "!ee/api/app.py"
|
||||
- "!ee/api/*-dev.sh"
|
||||
- "!ee/api/requirements.txt"
|
||||
- "!ee/api/requirements-crons.txt"
|
||||
|
||||
|
||||
name: Build and Deploy Alerts EE
|
||||
|
||||
|
|
@ -21,115 +34,115 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
|
||||
- name: Building and Pushing api image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd api
|
||||
PUSH_IMAGE=0 bash -x ./build_alerts.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
- name: Building and Pushing api image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd api
|
||||
PUSH_IMAGE=0 bash -x ./build_alerts.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("alerts")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("alerts")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||
EOF
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("alerts")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/alerts/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,alerts,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,alerts,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||
EOF
|
||||
done
|
||||
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/alerts/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,alerts,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,alerts,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: ee
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: ee
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
|
|
|
|||
200
.github/workflows/alerts.yaml
vendored
200
.github/workflows/alerts.yaml
vendored
|
|
@ -10,7 +10,13 @@ on:
|
|||
branches:
|
||||
- api-v1.10.0
|
||||
paths:
|
||||
- api/**
|
||||
- "api/**"
|
||||
- "!api/.gitignore"
|
||||
- "!api/routers"
|
||||
- "!api/app.py"
|
||||
- "!api/*-dev.sh"
|
||||
- "!api/requirements.txt"
|
||||
- "!api/requirements-crons.txt"
|
||||
|
||||
name: Build and Deploy Alerts
|
||||
|
||||
|
|
@ -20,112 +26,112 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
|
||||
- name: Building and Pushing Alerts image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd api
|
||||
PUSH_IMAGE=0 bash -x ./build_alerts.sh
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
- name: Building and Pushing Alerts image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd api
|
||||
PUSH_IMAGE=0 bash -x ./build_alerts.sh
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("alerts")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("alerts")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
tag: ${image_array[1]}
|
||||
EOF
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("alerts")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/alerts/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,alerts,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,alerts,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
tag: ${image_array[1]}
|
||||
EOF
|
||||
done
|
||||
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/alerts/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,alerts,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,alerts,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: foss
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: foss
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
|
|
|
|||
14
.github/workflows/api-ee.yaml
vendored
14
.github/workflows/api-ee.yaml
vendored
|
|
@ -10,8 +10,18 @@ on:
|
|||
branches:
|
||||
- api-v1.10.0
|
||||
paths:
|
||||
- ee/api/**
|
||||
- api/**
|
||||
- "ee/api/**"
|
||||
- "api/**"
|
||||
- "!api/.gitignore"
|
||||
- "!api/app_alerts.py"
|
||||
- "!api/*-dev.sh"
|
||||
- "!api/requirements-*.txt"
|
||||
- "!ee/api/.gitignore"
|
||||
- "!ee/api/app_alerts.py"
|
||||
- "!ee/api/app_crons.py"
|
||||
- "!ee/api/*-dev.sh"
|
||||
- "!ee/api/requirements-*.txt"
|
||||
|
||||
|
||||
name: Build and Deploy Chalice EE
|
||||
|
||||
|
|
|
|||
8
.github/workflows/api.yaml
vendored
8
.github/workflows/api.yaml
vendored
|
|
@ -8,9 +8,13 @@ on:
|
|||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
- api-v1.10.0
|
||||
paths:
|
||||
- api/**
|
||||
- "api/**"
|
||||
- "!api/.gitignore"
|
||||
- "!api/app_alerts.py"
|
||||
- "!api/*-dev.sh"
|
||||
- "!api/requirements-*.txt"
|
||||
|
||||
name: Build and Deploy Chalice
|
||||
|
||||
|
|
|
|||
120
.github/workflows/assist-ee.yaml
vendored
Normal file
120
.github/workflows/assist-ee.yaml
vendored
Normal file
|
|
@ -0,0 +1,120 @@
|
|||
# This action will push the assist changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
paths:
|
||||
- "ee/utilities/**"
|
||||
- "utilities/**"
|
||||
- "!utilities/.gitignore"
|
||||
- "!utilities/*-dev.sh"
|
||||
|
||||
name: Build and Deploy Assist EE
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- name: Building and Pushing Assist image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd utilities
|
||||
PUSH_IMAGE=0 bash -x ./build.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("assist")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("assist")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||
EOF
|
||||
done
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/assist/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,chalice,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,chalice,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}
|
||||
# ENVIRONMENT: staging
|
||||
#
|
||||
119
.github/workflows/assist.yaml
vendored
Normal file
119
.github/workflows/assist.yaml
vendored
Normal file
|
|
@ -0,0 +1,119 @@
|
|||
# This action will push the assist changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
paths:
|
||||
- "utilities/**"
|
||||
- "!utilities/.gitignore"
|
||||
- "!utilities/*-dev.sh"
|
||||
|
||||
name: Build and Deploy Assist
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- name: Building and Pushing Assist image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd utilities
|
||||
PUSH_IMAGE=0 bash -x ./build.sh
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("assist")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("assist")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||
EOF
|
||||
done
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.OSS_LICENSE_KEY }}\"/g" vars.yaml
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/assist/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,chalice,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,chalice,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}
|
||||
# ENVIRONMENT: staging
|
||||
#
|
||||
20
.github/workflows/crons-ee.yaml
vendored
20
.github/workflows/crons-ee.yaml
vendored
|
|
@ -10,8 +10,20 @@ on:
|
|||
branches:
|
||||
- api-v1.10.0
|
||||
paths:
|
||||
- ee/api/**
|
||||
- api/**
|
||||
- "ee/api/**"
|
||||
- "api/**"
|
||||
- "!api/.gitignore"
|
||||
- "!api/app.py"
|
||||
- "!api/app_alerts.py"
|
||||
- "!api/*-dev.sh"
|
||||
- "!api/requirements.txt"
|
||||
- "!api/requirements-alerts.txt"
|
||||
- "!ee/api/.gitignore"
|
||||
- "!ee/api/app.py"
|
||||
- "!ee/api/app_alerts.py"
|
||||
- "!ee/api/*-dev.sh"
|
||||
- "!ee/api/requirements.txt"
|
||||
- "!ee/api/requirements-crons.txt"
|
||||
|
||||
name: Build and Deploy Crons EE
|
||||
|
||||
|
|
@ -110,9 +122,9 @@ jobs:
|
|||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,crons,quickwit} /tmp
|
||||
mv openreplay/charts/{ingress-nginx,utilities,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,crons,quickwit} openreplay/charts/
|
||||
mv /tmp/{ingress-nginx,utilities,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
|
|
|
|||
69
.github/workflows/peers-ee.yaml
vendored
Normal file
69
.github/workflows/peers-ee.yaml
vendored
Normal file
|
|
@ -0,0 +1,69 @@
|
|||
# This action will push the peers changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
paths:
|
||||
- "ee/peers/**"
|
||||
- "peers/**"
|
||||
- "!peers/.gitignore"
|
||||
- "!peers/*-dev.sh"
|
||||
|
||||
name: Build and Deploy Peers
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- name: Building and Pushing api image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
cd peers
|
||||
PUSH_IMAGE=1 bash build.sh ee
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.EE_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s#minio_access_key.*#minio_access_key: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\" #g" vars.yaml
|
||||
sed -i "s#minio_secret_key.*#minio_secret_key: \"${{ secrets.EE_MINIO_SECRET_KEY }}\" #g" vars.yaml
|
||||
sed -i "s#domain_name.*#domain_name: \"ee.openreplay.com\" #g" vars.yaml
|
||||
sed -i "s#kubeconfig.*#kubeconfig_path: ${KUBECONFIG}#g" vars.yaml
|
||||
sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml
|
||||
bash kube-install.sh --app peers
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}
|
||||
# ENVIRONMENT: staging
|
||||
#
|
||||
|
|
@ -1,13 +1,15 @@
|
|||
# This action will push the utilities changes to aws
|
||||
# This action will push the peers changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
paths:
|
||||
- utilities/**
|
||||
- "peers/**"
|
||||
- "!peers/.gitignore"
|
||||
- "!peers/*-dev.sh"
|
||||
|
||||
name: Build and Deploy Utilities
|
||||
name: Build and Deploy Peers
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
|
|
@ -39,7 +41,7 @@ jobs:
|
|||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
cd utilities
|
||||
cd peers
|
||||
PUSH_IMAGE=1 bash build.sh
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
|
|
@ -50,7 +52,7 @@ jobs:
|
|||
sed -i "s#domain_name.*#domain_name: \"foss.openreplay.com\" #g" vars.yaml
|
||||
sed -i "s#kubeconfig.*#kubeconfig_path: ${KUBECONFIG}#g" vars.yaml
|
||||
sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml
|
||||
bash kube-install.sh --app utilities
|
||||
bash kube-install.sh --app peers
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
4
.github/workflows/sourcemaps-reader.yaml
vendored
4
.github/workflows/sourcemaps-reader.yaml
vendored
|
|
@ -5,7 +5,9 @@ on:
|
|||
branches:
|
||||
- dev
|
||||
paths:
|
||||
- sourcemap-reader/**
|
||||
- "sourcemap-reader/**"
|
||||
- "!sourcemap-reader/.gitignore"
|
||||
- "!sourcemap-reader/*-dev.sh"
|
||||
|
||||
name: Build and Deploy sourcemap-reader
|
||||
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@
|
|||
OpenReplay is a session replay suite you can host yourself, that lets you see what users do on your web app, helping you troubleshoot issues faster. It's the only open-source alternative to products such as FullStory and LogRocket.
|
||||
|
||||
- **Session replay.** OpenReplay replays what users do, but not only. It also shows you what went under the hood, how your website or app behaves by capturing network activity, console logs, JS errors, store actions/state, page speed metrics, cpu/memory usage and much more.
|
||||
- **Low footprint**. With a ~18KB (.gz) tracker that asynchronously sends minimal data for a very limited impact on performance.
|
||||
- **Low footprint**. With a ~19KB (.gz) tracker that asynchronously sends minimal data for a very limited impact on performance.
|
||||
- **Self-hosted**. No more security compliance checks, 3rd-parties processing user data. Everything OpenReplay captures stays in your cloud for a complete control over your data.
|
||||
- **Privacy controls**. Fine-grained security features for sanitizing user data.
|
||||
- **Easy deploy**. With support of major public cloud providers (AWS, GCP, Azure, DigitalOcean).
|
||||
|
|
|
|||
|
|
@ -53,3 +53,10 @@ async def stop_server():
|
|||
await shutdown()
|
||||
import os, signal
|
||||
os.kill(1, signal.SIGTERM)
|
||||
|
||||
|
||||
if config("LOCAL_DEV", default=False, cast=bool):
|
||||
@app.get('/private/trigger', tags=["private"])
|
||||
async def trigger_main_cron():
|
||||
logging.info("Triggering main cron")
|
||||
alerts_processor.process()
|
||||
|
|
|
|||
|
|
@ -49,10 +49,12 @@ LeftToDb = {
|
|||
schemas.AlertColumn.errors__4xx_5xx__count: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)", "formula": "COUNT(session_id)",
|
||||
"condition": "status/100!=2"},
|
||||
schemas.AlertColumn.errors__4xx__count: {"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COUNT(session_id)", "condition": "status/100=4"},
|
||||
schemas.AlertColumn.errors__5xx__count: {"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COUNT(session_id)", "condition": "status/100=5"},
|
||||
schemas.AlertColumn.errors__4xx__count: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COUNT(session_id)", "condition": "status/100=4"},
|
||||
schemas.AlertColumn.errors__5xx__count: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COUNT(session_id)", "condition": "status/100=5"},
|
||||
schemas.AlertColumn.errors__javascript__impacted_sessions__count: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COUNT(DISTINCT session_id)", "condition": "success= FALSE AND type='script'"},
|
||||
|
|
@ -95,7 +97,7 @@ def can_check(a) -> bool:
|
|||
a["options"].get("lastNotification") is None or
|
||||
a["options"]["lastNotification"] <= 0 or
|
||||
((now - a["options"]["lastNotification"]) > a["options"]["renotifyInterval"] * 60 * 1000)) \
|
||||
and ((now - a["createdAt"]) % (TimeInterval[repetitionBase] * 60 * 1000)) < 60 * 1000
|
||||
and ((now - a["createdAt"]) % (TimeInterval[repetitionBase] * 60 * 1000)) < 60 * 1000
|
||||
|
||||
|
||||
def Build(a):
|
||||
|
|
@ -119,7 +121,7 @@ def Build(a):
|
|||
subQ = f"""SELECT {colDef["formula"]} AS value
|
||||
FROM {colDef["table"]}
|
||||
WHERE project_id = %(project_id)s
|
||||
{"AND " + colDef["condition"] if colDef.get("condition") is not None else ""}"""
|
||||
{"AND " + colDef["condition"] if colDef.get("condition") else ""}"""
|
||||
j_s = colDef.get("joinSessions", True)
|
||||
main_table = colDef["table"]
|
||||
is_ss = main_table == "public.sessions"
|
||||
|
|
@ -142,8 +144,7 @@ def Build(a):
|
|||
"startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000,
|
||||
"timestamp_sub2": TimeUTC.now() - 2 * a["options"]["currentPeriod"] * 60 * 1000}
|
||||
else:
|
||||
sub1 = f"""{subQ} AND timestamp>=%(startDate)s
|
||||
AND timestamp<=%(now)s
|
||||
sub1 = f"""{subQ} {"AND timestamp >= %(startDate)s AND timestamp <= %(now)s" if not is_ss else ""}
|
||||
{"AND start_ts >= %(startDate)s AND start_ts <= %(now)s" if j_s else ""}"""
|
||||
params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000
|
||||
sub2 = f"""{subQ} {"AND timestamp < %(startDate)s AND timestamp >= %(timestamp_sub2)s" if not is_ss else ""}
|
||||
|
|
@ -206,7 +207,7 @@ def process():
|
|||
cur = cur.recreate(rollback=True)
|
||||
if len(notifications) > 0:
|
||||
cur.execute(
|
||||
cur.mogrify(f"""UPDATE public.Alerts
|
||||
cur.mogrify(f"""UPDATE public.alerts
|
||||
SET options = options||'{{"lastNotification":{TimeUTC.now()}}}'::jsonb
|
||||
WHERE alert_id IN %(ids)s;""", {"ids": tuple([n["alertId"] for n in notifications])}))
|
||||
if len(notifications) > 0:
|
||||
|
|
|
|||
|
|
@ -114,17 +114,19 @@ def update_dashboard(project_id, user_id, dashboard_id, data: schemas.EditDashbo
|
|||
row = cur.fetchone()
|
||||
offset = row["count"]
|
||||
pg_query = f"""UPDATE dashboards
|
||||
SET name = %(name)s,
|
||||
SET name = %(name)s,
|
||||
description= %(description)s
|
||||
{", is_public = %(is_public)s" if data.is_public is not None else ""}
|
||||
{", is_pinned = %(is_pinned)s" if data.is_pinned is not None else ""}
|
||||
WHERE dashboards.project_id = %(projectId)s
|
||||
WHERE dashboards.project_id = %(projectId)s
|
||||
AND dashboard_id = %(dashboard_id)s
|
||||
AND (dashboards.user_id = %(userId)s OR is_public)"""
|
||||
AND (dashboards.user_id = %(userId)s OR is_public)
|
||||
RETURNING dashboard_id,name,description,is_public,created_at;"""
|
||||
if data.metrics is not None and len(data.metrics) > 0:
|
||||
pg_query = f"""WITH dash AS ({pg_query})
|
||||
INSERT INTO dashboard_widgets(dashboard_id, metric_id, user_id, config)
|
||||
VALUES {",".join([f"(%(dashboard_id)s, %(metric_id_{i})s, %(userId)s, (SELECT default_config FROM metrics WHERE metric_id=%(metric_id_{i})s)||%(config_{i})s)" for i in range(len(data.metrics))])};"""
|
||||
INSERT INTO dashboard_widgets(dashboard_id, metric_id, user_id, config)
|
||||
VALUES {",".join([f"(%(dashboard_id)s, %(metric_id_{i})s, %(userId)s, (SELECT default_config FROM metrics WHERE metric_id=%(metric_id_{i})s)||%(config_{i})s)" for i in range(len(data.metrics))])}
|
||||
RETURNING dash.*;"""
|
||||
for i, m in enumerate(data.metrics):
|
||||
params[f"metric_id_{i}"] = m
|
||||
# params[f"config_{i}"] = schemas.AddWidgetToDashboardPayloadSchema.schema() \
|
||||
|
|
@ -134,8 +136,10 @@ def update_dashboard(project_id, user_id, dashboard_id, data: schemas.EditDashbo
|
|||
params[f"config_{i}"] = json.dumps({"position": i + offset})
|
||||
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
|
||||
return {"success": True}
|
||||
row = cur.fetchone()
|
||||
if row:
|
||||
row["created_at"] = TimeUTC.datetime_to_timestamp(row["created_at"])
|
||||
return helper.dict_to_camel_case(row)
|
||||
|
||||
|
||||
def get_widget(project_id, user_id, dashboard_id, widget_id):
|
||||
|
|
|
|||
|
|
@ -19,13 +19,13 @@ def __exists_by_name(project_id: int, name: str, exclude_index: Optional[int]) -
|
|||
constraints = column_names()
|
||||
if exclude_index:
|
||||
del constraints[exclude_index - 1]
|
||||
for c in constraints:
|
||||
c += " ILIKE %(name)s"
|
||||
for i in range(len(constraints)):
|
||||
constraints[i] += " ILIKE %(name)s"
|
||||
query = cur.mogrify(f"""SELECT EXISTS(SELECT 1
|
||||
FROM public.projects
|
||||
WHERE project_id = %(project_id)s
|
||||
AND deleted_at ISNULL
|
||||
AND ({" OR ".join(constraints)})) AS exists;""",
|
||||
FROM public.projects
|
||||
WHERE project_id = %(project_id)s
|
||||
AND deleted_at ISNULL
|
||||
AND ({" OR ".join(constraints)})) AS exists;""",
|
||||
{"project_id": project_id, "name": name})
|
||||
cur.execute(query=query)
|
||||
row = cur.fetchone()
|
||||
|
|
|
|||
|
|
@ -13,10 +13,10 @@ from chalicelib.utils.TimeUTC import TimeUTC
|
|||
def __exists_by_name(name: str, exclude_id: Optional[int]) -> bool:
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(f"""SELECT EXISTS(SELECT 1
|
||||
FROM public.projects
|
||||
WHERE deleted_at IS NULL
|
||||
AND name ILIKE %(name)s
|
||||
{"AND project_id!=%(exclude_id))s" if exclude_id else ""}) AS exists;""",
|
||||
FROM public.projects
|
||||
WHERE deleted_at IS NULL
|
||||
AND name ILIKE %(name)s
|
||||
{"AND project_id!=%(exclude_id)s" if exclude_id else ""}) AS exists;""",
|
||||
{"name": name, "exclude_id": exclude_id})
|
||||
|
||||
cur.execute(query=query)
|
||||
|
|
|
|||
|
|
@ -301,7 +301,7 @@ def search2_series(data: schemas.SessionsSearchPayloadSchema, project_id: int, d
|
|||
extra_col = ""
|
||||
extra_where = ""
|
||||
pre_query = ""
|
||||
distinct_on="s.session_id"
|
||||
distinct_on = "s.session_id"
|
||||
if metric_of == schemas.MetricOfTable.user_country:
|
||||
main_col = "user_country"
|
||||
elif metric_of == schemas.MetricOfTable.user_device:
|
||||
|
|
@ -321,7 +321,7 @@ def search2_series(data: schemas.SessionsSearchPayloadSchema, project_id: int, d
|
|||
elif metric_of == schemas.MetricOfTable.visited_url:
|
||||
main_col = "path"
|
||||
extra_col = ", path"
|
||||
distinct_on+=",path"
|
||||
distinct_on += ",path"
|
||||
main_query = cur.mogrify(f"""{pre_query}
|
||||
SELECT COUNT(*) AS count, COALESCE(JSONB_AGG(users_sessions) FILTER ( WHERE rn <= 200 ), '[]'::JSONB) AS values
|
||||
FROM (SELECT {main_col} AS name,
|
||||
|
|
@ -1194,8 +1194,9 @@ def delete_sessions_by_user_ids(project_id, user_ids):
|
|||
|
||||
def count_all():
|
||||
with pg_client.PostgresClient(unlimited_query=True) as cur:
|
||||
row = cur.execute(query="SELECT COUNT(session_id) AS count FROM public.sessions")
|
||||
return row.get("count", 0)
|
||||
cur.execute(query="SELECT COUNT(session_id) AS count FROM public.sessions")
|
||||
row = cur.fetchone()
|
||||
return row.get("count", 0) if row else 0
|
||||
|
||||
|
||||
def session_exists(project_id, session_id):
|
||||
|
|
@ -1203,7 +1204,8 @@ def session_exists(project_id, session_id):
|
|||
query = cur.mogrify("""SELECT 1
|
||||
FROM public.sessions
|
||||
WHERE session_id=%(session_id)s
|
||||
AND project_id=%(project_id)s""",
|
||||
AND project_id=%(project_id)s
|
||||
LIMIT 1;""",
|
||||
{"project_id": project_id, "session_id": session_id})
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
|
|
|
|||
|
|
@ -514,14 +514,6 @@ def set_password_invitation(user_id, new_password):
|
|||
}
|
||||
|
||||
|
||||
def count_members():
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute("""SELECT COUNT(user_id)
|
||||
FROM public.users WHERE deleted_at IS NULL;""")
|
||||
r = cur.fetchone()
|
||||
return r["count"]
|
||||
|
||||
|
||||
def email_exists(email):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
|
|
|
|||
|
|
@ -110,11 +110,11 @@ def exists_by_name(name: str, exclude_id: Optional[int], webhook_type: str = sch
|
|||
tenant_id: Optional[int] = None) -> bool:
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(f"""SELECT EXISTS(SELECT 1
|
||||
FROM public.webhooks
|
||||
WHERE name ILIKE %(name)s
|
||||
AND deleted_at ISNULL
|
||||
AND type=%(webhook_type)s
|
||||
{"AND webhook_id!=%(exclude_id))s" if exclude_id else ""}) AS exists;""",
|
||||
FROM public.webhooks
|
||||
WHERE name ILIKE %(name)s
|
||||
AND deleted_at ISNULL
|
||||
AND type=%(webhook_type)s
|
||||
{"AND webhook_id!=%(exclude_id)s" if exclude_id else ""}) AS exists;""",
|
||||
{"name": name, "exclude_id": exclude_id, "webhook_type": webhook_type})
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
|
|
|
|||
|
|
@ -283,6 +283,7 @@ def custom_alert_to_front(values):
|
|||
# to support frontend format for payload
|
||||
if values.get("seriesId") is not None and values["query"]["left"] == schemas.AlertColumn.custom:
|
||||
values["query"]["left"] = values["seriesId"]
|
||||
values["seriesId"] = None
|
||||
return values
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -55,7 +55,7 @@ def get_presigned_url_for_sharing(bucket, expires_in, key, check_exists=False):
|
|||
)
|
||||
|
||||
|
||||
def get_presigned_url_for_upload_deprecated(bucket, expires_in, key, **args):
|
||||
def get_presigned_url_for_upload(bucket, expires_in, key, **args):
|
||||
return client.generate_presigned_url(
|
||||
'put_object',
|
||||
Params={
|
||||
|
|
@ -66,10 +66,7 @@ def get_presigned_url_for_upload_deprecated(bucket, expires_in, key, **args):
|
|||
)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def get_presigned_url_for_upload(bucket, expires_in, key, conditions=None, public=False, content_type=None):
|
||||
def get_presigned_url_for_upload_secure(bucket, expires_in, key, conditions=None, public=False, content_type=None):
|
||||
acl = 'private'
|
||||
if public:
|
||||
acl = 'public-read'
|
||||
|
|
|
|||
3
api/run-alerts-dev.sh
Executable file
3
api/run-alerts-dev.sh
Executable file
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/zsh
|
||||
|
||||
uvicorn app_alerts:app --reload
|
||||
|
|
@ -363,7 +363,8 @@ class AlertSchema(BaseModel):
|
|||
|
||||
@root_validator(pre=True)
|
||||
def transform_alert(cls, values):
|
||||
if values.get("seriesId") is None and isinstance(values["query"]["left"], int):
|
||||
values["seriesId"] = None
|
||||
if isinstance(values["query"]["left"], int):
|
||||
values["seriesId"] = values["query"]["left"]
|
||||
values["query"]["left"] = AlertColumn.custom
|
||||
|
||||
|
|
|
|||
|
|
@ -1,9 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
|
@ -13,12 +11,16 @@ import (
|
|||
"openreplay/backend/internal/assets/cacher"
|
||||
config "openreplay/backend/internal/config/assets"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
assetsMetrics "openreplay/backend/pkg/metrics/assets"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
"openreplay/backend/pkg/queue"
|
||||
)
|
||||
|
||||
func main() {
|
||||
metrics := monitoring.New("assets")
|
||||
m := metrics.New()
|
||||
m.Register(assetsMetrics.List())
|
||||
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := config.New()
|
||||
|
|
@ -26,18 +28,13 @@ func main() {
|
|||
pprof.StartProfilingServer()
|
||||
}
|
||||
|
||||
cacher := cacher.NewCacher(cfg, metrics)
|
||||
|
||||
totalAssets, err := metrics.RegisterCounter("assets_total")
|
||||
if err != nil {
|
||||
log.Printf("can't create assets_total metric: %s", err)
|
||||
}
|
||||
cacher := cacher.NewCacher(cfg)
|
||||
|
||||
msgHandler := func(msg messages.Message) {
|
||||
switch m := msg.(type) {
|
||||
case *messages.AssetCache:
|
||||
cacher.CacheURL(m.SessionID(), m.URL)
|
||||
totalAssets.Add(context.Background(), 1)
|
||||
assetsMetrics.IncreaseProcessesSessions()
|
||||
// TODO: connect to "raw" topic in order to listen for JSException
|
||||
case *messages.JSException:
|
||||
sourceList, err := assets.ExtractJSExceptionSources(&m.Payload)
|
||||
|
|
|
|||
|
|
@ -3,8 +3,6 @@ package main
|
|||
import (
|
||||
"errors"
|
||||
"log"
|
||||
types2 "openreplay/backend/pkg/db/types"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
|
@ -14,16 +12,21 @@ import (
|
|||
"openreplay/backend/internal/db/datasaver"
|
||||
"openreplay/backend/pkg/db/cache"
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
types2 "openreplay/backend/pkg/db/types"
|
||||
"openreplay/backend/pkg/handlers"
|
||||
custom2 "openreplay/backend/pkg/handlers/custom"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
databaseMetrics "openreplay/backend/pkg/metrics/database"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/sessions"
|
||||
)
|
||||
|
||||
func main() {
|
||||
metrics := monitoring.New("db")
|
||||
m := metrics.New()
|
||||
m.Register(databaseMetrics.List())
|
||||
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := db.New()
|
||||
|
|
@ -33,7 +36,7 @@ func main() {
|
|||
|
||||
// Init database
|
||||
pg := cache.NewPGCache(
|
||||
postgres.NewConn(cfg.Postgres.String(), cfg.BatchQueueLimit, cfg.BatchSizeLimit, metrics), cfg.ProjectExpirationTimeoutMs)
|
||||
postgres.NewConn(cfg.Postgres.String(), cfg.BatchQueueLimit, cfg.BatchSizeLimit), cfg.ProjectExpirationTimeoutMs)
|
||||
defer pg.Close()
|
||||
|
||||
// HandlersFabric returns the list of message handlers we want to be applied to each incoming message.
|
||||
|
|
|
|||
|
|
@ -2,8 +2,6 @@ package main
|
|||
|
||||
import (
|
||||
"log"
|
||||
"openreplay/backend/internal/storage"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
|
|
@ -12,16 +10,23 @@ import (
|
|||
|
||||
"openreplay/backend/internal/config/ender"
|
||||
"openreplay/backend/internal/sessionender"
|
||||
"openreplay/backend/internal/storage"
|
||||
"openreplay/backend/pkg/db/cache"
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/intervals"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
databaseMetrics "openreplay/backend/pkg/metrics/database"
|
||||
enderMetrics "openreplay/backend/pkg/metrics/ender"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
"openreplay/backend/pkg/queue"
|
||||
)
|
||||
|
||||
func main() {
|
||||
metrics := monitoring.New("ender")
|
||||
m := metrics.New()
|
||||
m.Register(enderMetrics.List())
|
||||
m.Register(databaseMetrics.List())
|
||||
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := ender.New()
|
||||
|
|
@ -29,10 +34,10 @@ func main() {
|
|||
pprof.StartProfilingServer()
|
||||
}
|
||||
|
||||
pg := cache.NewPGCache(postgres.NewConn(cfg.Postgres.String(), 0, 0, metrics), cfg.ProjectExpirationTimeoutMs)
|
||||
pg := cache.NewPGCache(postgres.NewConn(cfg.Postgres.String(), 0, 0), cfg.ProjectExpirationTimeoutMs)
|
||||
defer pg.Close()
|
||||
|
||||
sessions, err := sessionender.New(metrics, intervals.EVENTS_SESSION_END_TIMEOUT, cfg.PartitionsNumber)
|
||||
sessions, err := sessionender.New(intervals.EVENTS_SESSION_END_TIMEOUT, cfg.PartitionsNumber)
|
||||
if err != nil {
|
||||
log.Printf("can't init ender service: %s", err)
|
||||
return
|
||||
|
|
|
|||
|
|
@ -2,23 +2,28 @@ package main
|
|||
|
||||
import (
|
||||
"log"
|
||||
"openreplay/backend/internal/config/http"
|
||||
"openreplay/backend/internal/http/router"
|
||||
"openreplay/backend/internal/http/server"
|
||||
"openreplay/backend/internal/http/services"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"openreplay/backend/internal/config/http"
|
||||
"openreplay/backend/internal/http/router"
|
||||
"openreplay/backend/internal/http/server"
|
||||
"openreplay/backend/internal/http/services"
|
||||
"openreplay/backend/pkg/db/cache"
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
databaseMetrics "openreplay/backend/pkg/metrics/database"
|
||||
httpMetrics "openreplay/backend/pkg/metrics/http"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
"openreplay/backend/pkg/queue"
|
||||
)
|
||||
|
||||
func main() {
|
||||
metrics := monitoring.New("http")
|
||||
m := metrics.New()
|
||||
m.Register(httpMetrics.List())
|
||||
m.Register(databaseMetrics.List())
|
||||
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := http.New()
|
||||
|
|
@ -31,14 +36,14 @@ func main() {
|
|||
defer producer.Close(15000)
|
||||
|
||||
// Connect to database
|
||||
dbConn := cache.NewPGCache(postgres.NewConn(cfg.Postgres.String(), 0, 0, metrics), 1000*60*20)
|
||||
dbConn := cache.NewPGCache(postgres.NewConn(cfg.Postgres.String(), 0, 0), 1000*60*20)
|
||||
defer dbConn.Close()
|
||||
|
||||
// Build all services
|
||||
services := services.New(cfg, producer, dbConn)
|
||||
|
||||
// Init server's routes
|
||||
router, err := router.NewRouter(cfg, services, metrics)
|
||||
router, err := router.NewRouter(cfg, services)
|
||||
if err != nil {
|
||||
log.Fatalf("failed while creating engine: %s", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,24 +2,26 @@ package main
|
|||
|
||||
import (
|
||||
"log"
|
||||
config "openreplay/backend/internal/config/integrations"
|
||||
"openreplay/backend/internal/integrations/clientManager"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
"time"
|
||||
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
config "openreplay/backend/internal/config/integrations"
|
||||
"openreplay/backend/internal/integrations/clientManager"
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/intervals"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
databaseMetrics "openreplay/backend/pkg/metrics/database"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/token"
|
||||
)
|
||||
|
||||
func main() {
|
||||
metrics := monitoring.New("integrations")
|
||||
m := metrics.New()
|
||||
m.Register(databaseMetrics.List())
|
||||
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := config.New()
|
||||
|
|
@ -27,7 +29,7 @@ func main() {
|
|||
pprof.StartProfilingServer()
|
||||
}
|
||||
|
||||
pg := postgres.NewConn(cfg.Postgres.String(), 0, 0, metrics)
|
||||
pg := postgres.NewConn(cfg.Postgres.String(), 0, 0)
|
||||
defer pg.Close()
|
||||
|
||||
tokenizer := token.NewTokenizer(cfg.TokenSecret)
|
||||
|
|
|
|||
|
|
@ -2,10 +2,8 @@ package main
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"log"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
|
@ -16,13 +14,16 @@ import (
|
|||
"openreplay/backend/internal/sink/sessionwriter"
|
||||
"openreplay/backend/internal/storage"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
sinkMetrics "openreplay/backend/pkg/metrics/sink"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/url/assets"
|
||||
)
|
||||
|
||||
func main() {
|
||||
metrics := monitoring.New("sink")
|
||||
m := metrics.New()
|
||||
m.Register(sinkMetrics.List())
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := sink.New()
|
||||
|
|
@ -39,22 +40,8 @@ func main() {
|
|||
producer := queue.NewProducer(cfg.MessageSizeLimit, true)
|
||||
defer producer.Close(cfg.ProducerCloseTimeout)
|
||||
rewriter := assets.NewRewriter(cfg.AssetsOrigin)
|
||||
assetMessageHandler := assetscache.New(cfg, rewriter, producer, metrics)
|
||||
|
||||
assetMessageHandler := assetscache.New(cfg, rewriter, producer)
|
||||
counter := storage.NewLogCounter()
|
||||
// Session message metrics
|
||||
totalMessages, err := metrics.RegisterCounter("messages_total")
|
||||
if err != nil {
|
||||
log.Printf("can't create messages_total metric: %s", err)
|
||||
}
|
||||
savedMessages, err := metrics.RegisterCounter("messages_saved")
|
||||
if err != nil {
|
||||
log.Printf("can't create messages_saved metric: %s", err)
|
||||
}
|
||||
messageSize, err := metrics.RegisterHistogram("messages_size")
|
||||
if err != nil {
|
||||
log.Printf("can't create messages_size metric: %s", err)
|
||||
}
|
||||
|
||||
var (
|
||||
sessionID uint64
|
||||
|
|
@ -74,11 +61,12 @@ func main() {
|
|||
if domBuffer.Len() <= 0 && devBuffer.Len() <= 0 {
|
||||
return
|
||||
}
|
||||
sinkMetrics.RecordWrittenBytes(float64(domBuffer.Len()), "dom")
|
||||
sinkMetrics.RecordWrittenBytes(float64(devBuffer.Len()), "devtools")
|
||||
|
||||
// Write buffered batches to the session
|
||||
if err := writer.Write(sessionID, domBuffer.Bytes(), devBuffer.Bytes()); err != nil {
|
||||
log.Printf("writer error: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Prepare buffer for the next batch
|
||||
|
|
@ -88,8 +76,7 @@ func main() {
|
|||
return
|
||||
}
|
||||
|
||||
// [METRICS] Increase the number of processed messages
|
||||
totalMessages.Add(context.Background(), 1)
|
||||
sinkMetrics.IncreaseTotalMessages()
|
||||
|
||||
// Send SessionEnd trigger to storage service
|
||||
if msg.TypeID() == messages.MsgSessionEnd {
|
||||
|
|
@ -187,9 +174,8 @@ func main() {
|
|||
}
|
||||
}
|
||||
|
||||
// [METRICS] Increase the number of written to the files messages and the message size
|
||||
messageSize.Record(context.Background(), float64(len(msg.Encode())))
|
||||
savedMessages.Add(context.Background(), 1)
|
||||
sinkMetrics.IncreaseWrittenMessages()
|
||||
sinkMetrics.RecordMessageSize(float64(len(msg.Encode())))
|
||||
}
|
||||
|
||||
consumer := queue.NewConsumer(
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ package main
|
|||
|
||||
import (
|
||||
"log"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
|
@ -12,13 +11,17 @@ import (
|
|||
"openreplay/backend/internal/storage"
|
||||
"openreplay/backend/pkg/failover"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
storageMetrics "openreplay/backend/pkg/metrics/storage"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
"openreplay/backend/pkg/queue"
|
||||
s3storage "openreplay/backend/pkg/storage"
|
||||
cloud "openreplay/backend/pkg/storage"
|
||||
)
|
||||
|
||||
func main() {
|
||||
metrics := monitoring.New("storage")
|
||||
m := metrics.New()
|
||||
m.Register(storageMetrics.List())
|
||||
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := config.New()
|
||||
|
|
@ -26,8 +29,8 @@ func main() {
|
|||
pprof.StartProfilingServer()
|
||||
}
|
||||
|
||||
s3 := s3storage.NewS3(cfg.S3Region, cfg.S3Bucket)
|
||||
srv, err := storage.New(cfg, s3, metrics)
|
||||
s3 := cloud.NewS3(cfg.S3Region, cfg.S3Bucket)
|
||||
srv, err := storage.New(cfg, s3)
|
||||
if err != nil {
|
||||
log.Printf("can't init storage service: %s", err)
|
||||
return
|
||||
|
|
|
|||
|
|
@ -20,14 +20,11 @@ require (
|
|||
github.com/klauspost/pgzip v1.2.5
|
||||
github.com/oschwald/maxminddb-golang v1.7.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.12.1
|
||||
github.com/sethvargo/go-envconfig v0.7.0
|
||||
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce
|
||||
github.com/ua-parser/uap-go v0.0.0-20200325213135-e1c09f13e2fe
|
||||
go.opentelemetry.io/otel v1.7.0
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.30.0
|
||||
go.opentelemetry.io/otel/metric v0.30.0
|
||||
go.opentelemetry.io/otel/sdk/metric v0.30.0
|
||||
golang.org/x/net v0.0.0-20220906165146-f3363e06e74c
|
||||
golang.org/x/net v0.1.1-0.20221104162952-702349b0e862
|
||||
google.golang.org/api v0.81.0
|
||||
)
|
||||
|
||||
|
|
@ -38,8 +35,6 @@ require (
|
|||
cloud.google.com/go/storage v1.14.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-cmp v0.5.8 // indirect
|
||||
|
|
@ -55,20 +50,19 @@ require (
|
|||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/paulmach/orb v0.7.1 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.15 // indirect
|
||||
github.com/prometheus/client_golang v1.12.1 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.32.1 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/shopspring/decimal v1.3.1 // indirect
|
||||
github.com/stretchr/testify v1.8.0 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.7.0 // indirect
|
||||
go.opentelemetry.io/otel v1.7.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.7.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 // indirect
|
||||
golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 // indirect
|
||||
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 // indirect
|
||||
golang.org/x/text v0.4.0 // indirect
|
||||
golang.org/x/sys v0.1.0 // indirect
|
||||
golang.org/x/text v0.7.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd // indirect
|
||||
|
|
|
|||
|
|
@ -80,8 +80,6 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5
|
|||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/aws/aws-sdk-go v1.44.98 h1:fX+NxebSdO/9T6DTNOLhpC+Vv6RNkKRfsMg0a7o/yBo=
|
||||
github.com/aws/aws-sdk-go v1.44.98/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
|
||||
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
|
|
@ -156,9 +154,7 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9
|
|||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
||||
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
|
|
@ -489,14 +485,6 @@ go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
|
|||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.opentelemetry.io/otel v1.7.0 h1:Z2lA3Tdch0iDcrhJXDIlC94XE+bxok1F9B+4Lz/lGsM=
|
||||
go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.30.0 h1:YXo5ZY5nofaEYMCMTTMaRH2cLDZB8+0UGuk5RwMfIo0=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.30.0/go.mod h1:qN5feW+0/d661KDtJuATEmHtw5bKBK7NSvNEP927zSs=
|
||||
go.opentelemetry.io/otel/metric v0.30.0 h1:Hs8eQZ8aQgs0U49diZoaS6Uaxw3+bBE3lcMUKBFIk3c=
|
||||
go.opentelemetry.io/otel/metric v0.30.0/go.mod h1:/ShZ7+TS4dHzDFmfi1kSXMhMVubNoP0oIaBp70J6UXU=
|
||||
go.opentelemetry.io/otel/sdk v1.7.0 h1:4OmStpcKVOfvDOgCt7UriAPtKolwIhxpnSNI/yK+1B0=
|
||||
go.opentelemetry.io/otel/sdk v1.7.0/go.mod h1:uTEOTwaqIVuTGiJN7ii13Ibp75wJmYUDe374q6cZwUU=
|
||||
go.opentelemetry.io/otel/sdk/metric v0.30.0 h1:XTqQ4y3erR2Oj8xSAOL5ovO5011ch2ELg51z4fVkpME=
|
||||
go.opentelemetry.io/otel/sdk/metric v0.30.0/go.mod h1:8AKFRi5HyvTR0RRty3paN1aMC9HMT+NzcEhw/BLkLX8=
|
||||
go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJqC6o=
|
||||
go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
|
|
@ -601,8 +589,8 @@ golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su
|
|||
golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220906165146-f3363e06e74c h1:yKufUcDwucU5urd+50/Opbt4AYpqthk7wHpHok8f1lo=
|
||||
golang.org/x/net v0.0.0-20220906165146-f3363e06e74c/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||
golang.org/x/net v0.1.1-0.20221104162952-702349b0e862 h1:KrLJ+iz8J6j6VVr/OCfULAcK+xozUmWE43fKpMR4MlI=
|
||||
golang.org/x/net v0.1.1-0.20221104162952-702349b0e862/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
|
@ -690,7 +678,6 @@ golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
|
@ -715,8 +702,8 @@ golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220429233432-b5fbb4746d32/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 h1:WIoqL4EROvwiPdUtaip4VcDdpZ4kha7wBWZrbVKCIZg=
|
||||
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
|
@ -728,8 +715,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
|
|
|||
|
|
@ -1,16 +1,13 @@
|
|||
package cacher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"mime"
|
||||
"net/http"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
metrics "openreplay/backend/pkg/metrics/assets"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
|
@ -25,30 +22,22 @@ import (
|
|||
const MAX_CACHE_DEPTH = 5
|
||||
|
||||
type cacher struct {
|
||||
timeoutMap *timeoutMap // Concurrency implemented
|
||||
s3 *storage.S3 // AWS Docs: "These clients are safe to use concurrently."
|
||||
httpClient *http.Client // Docs: "Clients are safe for concurrent use by multiple goroutines."
|
||||
rewriter *assets.Rewriter // Read only
|
||||
Errors chan error
|
||||
sizeLimit int
|
||||
downloadedAssets syncfloat64.Counter
|
||||
requestHeaders map[string]string
|
||||
workers *WorkerPool
|
||||
timeoutMap *timeoutMap // Concurrency implemented
|
||||
s3 *storage.S3 // AWS Docs: "These clients are safe to use concurrently."
|
||||
httpClient *http.Client // Docs: "Clients are safe for concurrent use by multiple goroutines."
|
||||
rewriter *assets.Rewriter // Read only
|
||||
Errors chan error
|
||||
sizeLimit int
|
||||
requestHeaders map[string]string
|
||||
workers *WorkerPool
|
||||
}
|
||||
|
||||
func (c *cacher) CanCache() bool {
|
||||
return c.workers.CanAddTask()
|
||||
}
|
||||
|
||||
func NewCacher(cfg *config.Config, metrics *monitoring.Metrics) *cacher {
|
||||
func NewCacher(cfg *config.Config) *cacher {
|
||||
rewriter := assets.NewRewriter(cfg.AssetsOrigin)
|
||||
if metrics == nil {
|
||||
log.Fatalf("metrics are empty")
|
||||
}
|
||||
downloadedAssets, err := metrics.RegisterCounter("assets_downloaded")
|
||||
if err != nil {
|
||||
log.Printf("can't create downloaded_assets metric: %s", err)
|
||||
}
|
||||
c := &cacher{
|
||||
timeoutMap: newTimeoutMap(),
|
||||
s3: storage.NewS3(cfg.AWSRegion, cfg.S3BucketAssets),
|
||||
|
|
@ -59,11 +48,10 @@ func NewCacher(cfg *config.Config, metrics *monitoring.Metrics) *cacher {
|
|||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
},
|
||||
},
|
||||
rewriter: rewriter,
|
||||
Errors: make(chan error),
|
||||
sizeLimit: cfg.AssetsSizeLimit,
|
||||
downloadedAssets: downloadedAssets,
|
||||
requestHeaders: cfg.AssetsRequestHeaders,
|
||||
rewriter: rewriter,
|
||||
Errors: make(chan error),
|
||||
sizeLimit: cfg.AssetsSizeLimit,
|
||||
requestHeaders: cfg.AssetsRequestHeaders,
|
||||
}
|
||||
c.workers = NewPool(64, c.CacheFile)
|
||||
return c
|
||||
|
|
@ -75,6 +63,7 @@ func (c *cacher) CacheFile(task *Task) {
|
|||
|
||||
func (c *cacher) cacheURL(t *Task) {
|
||||
t.retries--
|
||||
start := time.Now()
|
||||
req, _ := http.NewRequest("GET", t.requestURL, nil)
|
||||
if t.retries%2 == 0 {
|
||||
req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 6.1; rv:31.0) Gecko/20100101 Firefox/31.0")
|
||||
|
|
@ -87,6 +76,7 @@ func (c *cacher) cacheURL(t *Task) {
|
|||
c.Errors <- errors.Wrap(err, t.urlContext)
|
||||
return
|
||||
}
|
||||
metrics.RecordDownloadDuration(float64(time.Now().Sub(start).Milliseconds()), res.StatusCode)
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode >= 400 {
|
||||
printErr := true
|
||||
|
|
@ -122,12 +112,15 @@ func (c *cacher) cacheURL(t *Task) {
|
|||
}
|
||||
|
||||
// TODO: implement in streams
|
||||
start = time.Now()
|
||||
err = c.s3.Upload(strings.NewReader(strData), t.cachePath, contentType, false)
|
||||
if err != nil {
|
||||
metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), true)
|
||||
c.Errors <- errors.Wrap(err, t.urlContext)
|
||||
return
|
||||
}
|
||||
c.downloadedAssets.Add(context.Background(), 1)
|
||||
metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), false)
|
||||
metrics.IncreaseSavedSessions()
|
||||
|
||||
if isCSS {
|
||||
if t.depth > 0 {
|
||||
|
|
|
|||
|
|
@ -22,28 +22,28 @@ func (e *Router) startSessionHandlerIOS(w http.ResponseWriter, r *http.Request)
|
|||
req := &StartIOSSessionRequest{}
|
||||
|
||||
if r.Body == nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"))
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
body := http.MaxBytesReader(w, r.Body, e.cfg.JsonSizeLimit)
|
||||
defer body.Close()
|
||||
|
||||
if err := json.NewDecoder(body).Decode(req); err != nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, err)
|
||||
ResponseWithError(w, http.StatusBadRequest, err, startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
|
||||
if req.ProjectKey == nil {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("ProjectKey value required"))
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("ProjectKey value required"), startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
|
||||
p, err := e.services.Database.GetProjectByKey(*req.ProjectKey)
|
||||
if err != nil {
|
||||
if postgres.IsNoRowsErr(err) {
|
||||
ResponseWithError(w, http.StatusNotFound, errors.New("Project doesn't exist or is not active"))
|
||||
ResponseWithError(w, http.StatusNotFound, errors.New("Project doesn't exist or is not active"), startTime, r.URL.Path, 0)
|
||||
} else {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging
|
||||
ResponseWithError(w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0) // TODO: send error here only on staging
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
@ -53,18 +53,18 @@ func (e *Router) startSessionHandlerIOS(w http.ResponseWriter, r *http.Request)
|
|||
if err != nil { // Starting the new one
|
||||
dice := byte(rand.Intn(100)) // [0, 100)
|
||||
if dice >= p.SampleRate {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("cancel"))
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("cancel"), startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
|
||||
ua := e.services.UaParser.ParseFromHTTPRequest(r)
|
||||
if ua == nil {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"))
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"), startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
sessionID, err := e.services.Flaker.Compose(uint64(startTime.UnixMilli()))
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err)
|
||||
ResponseWithError(w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
// TODO: if EXPIRED => send message for two sessions association
|
||||
|
|
@ -94,22 +94,24 @@ func (e *Router) startSessionHandlerIOS(w http.ResponseWriter, r *http.Request)
|
|||
UserUUID: userUUID,
|
||||
SessionID: strconv.FormatUint(tokenData.ID, 10),
|
||||
BeaconSizeLimit: e.cfg.BeaconSizeLimit,
|
||||
})
|
||||
}, startTime, r.URL.Path, 0)
|
||||
}
|
||||
|
||||
func (e *Router) pushMessagesHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
||||
startTime := time.Now()
|
||||
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusUnauthorized, err)
|
||||
ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
e.pushMessages(w, r, sessionData.ID, e.cfg.TopicRawIOS)
|
||||
}
|
||||
|
||||
func (e *Router) pushLateMessagesHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
||||
startTime := time.Now()
|
||||
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
|
||||
if err != nil && err != token.EXPIRED {
|
||||
ResponseWithError(w, http.StatusUnauthorized, err)
|
||||
ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
// Check timestamps here?
|
||||
|
|
@ -117,16 +119,17 @@ func (e *Router) pushLateMessagesHandlerIOS(w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
|
||||
func (e *Router) imagesUploadHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
||||
startTime := time.Now()
|
||||
log.Printf("recieved imagerequest")
|
||||
|
||||
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
|
||||
if err != nil { // Should accept expired token?
|
||||
ResponseWithError(w, http.StatusUnauthorized, err)
|
||||
ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
|
||||
if r.Body == nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"))
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
r.Body = http.MaxBytesReader(w, r.Body, e.cfg.FileSizeLimit)
|
||||
|
|
@ -134,21 +137,21 @@ func (e *Router) imagesUploadHandlerIOS(w http.ResponseWriter, r *http.Request)
|
|||
|
||||
err = r.ParseMultipartForm(1e6) // ~1Mb
|
||||
if err == http.ErrNotMultipart || err == http.ErrMissingBoundary {
|
||||
ResponseWithError(w, http.StatusUnsupportedMediaType, err)
|
||||
ResponseWithError(w, http.StatusUnsupportedMediaType, err, startTime, r.URL.Path, 0)
|
||||
return
|
||||
// } else if err == multipart.ErrMessageTooLarge // if non-files part exceeds 10 MB
|
||||
} else if err != nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging
|
||||
ResponseWithError(w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0) // TODO: send error here only on staging
|
||||
return
|
||||
}
|
||||
|
||||
if r.MultipartForm == nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, errors.New("Multipart not parsed"))
|
||||
ResponseWithError(w, http.StatusInternalServerError, errors.New("Multipart not parsed"), startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
|
||||
if len(r.MultipartForm.Value["projectKey"]) == 0 {
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("projectKey parameter missing")) // status for missing/wrong parameter?
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("projectKey parameter missing"), startTime, r.URL.Path, 0) // status for missing/wrong parameter?
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -3,18 +3,17 @@ package router
|
|||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"github.com/Masterminds/semver"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"openreplay/backend/internal/http/uuid"
|
||||
"openreplay/backend/pkg/flakeid"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/Masterminds/semver"
|
||||
"openreplay/backend/internal/http/uuid"
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/flakeid"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/token"
|
||||
)
|
||||
|
|
@ -28,13 +27,6 @@ func (e *Router) readBody(w http.ResponseWriter, r *http.Request, limit int64) (
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reqSize := len(bodyBytes)
|
||||
e.requestSize.Record(
|
||||
r.Context(),
|
||||
float64(reqSize),
|
||||
[]attribute.KeyValue{attribute.String("method", r.URL.Path)}...,
|
||||
)
|
||||
return bodyBytes, nil
|
||||
}
|
||||
|
||||
|
|
@ -56,40 +48,43 @@ func getSessionTimestamp(req *StartSessionRequest, startTimeMili int64) (ts uint
|
|||
|
||||
func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) {
|
||||
startTime := time.Now()
|
||||
bodySize := 0
|
||||
|
||||
// Check request body
|
||||
if r.Body == nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"))
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
bodyBytes, err := e.readBody(w, r, e.cfg.JsonSizeLimit)
|
||||
if err != nil {
|
||||
log.Printf("error while reading request body: %s", err)
|
||||
ResponseWithError(w, http.StatusRequestEntityTooLarge, err)
|
||||
ResponseWithError(w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
bodySize = len(bodyBytes)
|
||||
|
||||
// Parse request body
|
||||
req := &StartSessionRequest{}
|
||||
if err := json.Unmarshal(bodyBytes, req); err != nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, err)
|
||||
ResponseWithError(w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
// Handler's logic
|
||||
if req.ProjectKey == nil {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("ProjectKey value required"))
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("ProjectKey value required"), startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
p, err := e.services.Database.GetProjectByKey(*req.ProjectKey)
|
||||
if err != nil {
|
||||
if postgres.IsNoRowsErr(err) {
|
||||
ResponseWithError(w, http.StatusNotFound, errors.New("project doesn't exist or capture limit has been reached"))
|
||||
ResponseWithError(w, http.StatusNotFound,
|
||||
errors.New("project doesn't exist or capture limit has been reached"), startTime, r.URL.Path, bodySize)
|
||||
} else {
|
||||
log.Printf("can't get project by key: %s", err)
|
||||
ResponseWithError(w, http.StatusInternalServerError, errors.New("can't get project by key"))
|
||||
ResponseWithError(w, http.StatusInternalServerError, errors.New("can't get project by key"), startTime, r.URL.Path, bodySize)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
@ -99,19 +94,19 @@ func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request)
|
|||
if err != nil || req.Reset { // Starting the new one
|
||||
dice := byte(rand.Intn(100)) // [0, 100)
|
||||
if dice >= p.SampleRate {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("cancel"))
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("cancel"), startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
ua := e.services.UaParser.ParseFromHTTPRequest(r)
|
||||
if ua == nil {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"))
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"), startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
startTimeMili := startTime.UnixMilli()
|
||||
sessionID, err := e.services.Flaker.Compose(uint64(startTimeMili))
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err)
|
||||
ResponseWithError(w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
// TODO: if EXPIRED => send message for two sessions association
|
||||
|
|
@ -163,29 +158,33 @@ func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request)
|
|||
BeaconSizeLimit: e.getBeaconSize(tokenData.ID),
|
||||
StartTimestamp: int64(flakeid.ExtractTimestamp(tokenData.ID)),
|
||||
Delay: tokenData.Delay,
|
||||
})
|
||||
}, startTime, r.URL.Path, bodySize)
|
||||
}
|
||||
|
||||
func (e *Router) pushMessagesHandlerWeb(w http.ResponseWriter, r *http.Request) {
|
||||
startTime := time.Now()
|
||||
bodySize := 0
|
||||
|
||||
// Check authorization
|
||||
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusUnauthorized, err)
|
||||
ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
// Check request body
|
||||
if r.Body == nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"))
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
bodyBytes, err := e.readBody(w, r, e.getBeaconSize(sessionData.ID))
|
||||
if err != nil {
|
||||
log.Printf("error while reading request body: %s", err)
|
||||
ResponseWithError(w, http.StatusRequestEntityTooLarge, err)
|
||||
ResponseWithError(w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
bodySize = len(bodyBytes)
|
||||
|
||||
// Send processed messages to queue as array of bytes
|
||||
// TODO: check bytes for nonsense crap
|
||||
|
|
@ -194,39 +193,43 @@ func (e *Router) pushMessagesHandlerWeb(w http.ResponseWriter, r *http.Request)
|
|||
log.Printf("can't send processed messages to queue: %s", err)
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
ResponseOK(w, startTime, r.URL.Path, bodySize)
|
||||
}
|
||||
|
||||
func (e *Router) notStartedHandlerWeb(w http.ResponseWriter, r *http.Request) {
|
||||
startTime := time.Now()
|
||||
bodySize := 0
|
||||
|
||||
// Check request body
|
||||
if r.Body == nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"))
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
bodyBytes, err := e.readBody(w, r, e.cfg.JsonSizeLimit)
|
||||
if err != nil {
|
||||
log.Printf("error while reading request body: %s", err)
|
||||
ResponseWithError(w, http.StatusRequestEntityTooLarge, err)
|
||||
ResponseWithError(w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
bodySize = len(bodyBytes)
|
||||
|
||||
// Parse request body
|
||||
req := &NotStartedRequest{}
|
||||
|
||||
if err := json.Unmarshal(bodyBytes, req); err != nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, err)
|
||||
ResponseWithError(w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
// Handler's logic
|
||||
if req.ProjectKey == nil {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("projectKey value required"))
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("projectKey value required"), startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
ua := e.services.UaParser.ParseFromHTTPRequest(r) // TODO?: insert anyway
|
||||
if ua == nil {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"))
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"), startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
country := e.services.GeoIP.ExtractISOCodeFromHTTPRequest(r)
|
||||
|
|
@ -248,5 +251,5 @@ func (e *Router) notStartedHandlerWeb(w http.ResponseWriter, r *http.Request) {
|
|||
log.Printf("Unable to insert Unstarted Session: %v\n", err)
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
ResponseOK(w, startTime, r.URL.Path, bodySize)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,9 +6,11 @@ import (
|
|||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (e *Router) pushMessages(w http.ResponseWriter, r *http.Request, sessionID uint64, topicName string) {
|
||||
start := time.Now()
|
||||
body := http.MaxBytesReader(w, r.Body, e.cfg.BeaconSizeLimit)
|
||||
defer body.Close()
|
||||
|
||||
|
|
@ -21,7 +23,7 @@ func (e *Router) pushMessages(w http.ResponseWriter, r *http.Request, sessionID
|
|||
|
||||
reader, err = gzip.NewReader(body)
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err) // TODO: stage-dependent response
|
||||
ResponseWithError(w, http.StatusInternalServerError, err, start, r.URL.Path, 0) // TODO: stage-dependent response
|
||||
return
|
||||
}
|
||||
//log.Println("Gzip reader init", reader)
|
||||
|
|
@ -32,7 +34,7 @@ func (e *Router) pushMessages(w http.ResponseWriter, r *http.Request, sessionID
|
|||
//log.Println("Reader after switch:", reader)
|
||||
buf, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging
|
||||
ResponseWithError(w, http.StatusInternalServerError, err, start, r.URL.Path, 0) // TODO: send error here only on staging
|
||||
return
|
||||
}
|
||||
e.services.Producer.Produce(topicName, sessionID, buf) // What if not able to send?
|
||||
|
|
|
|||
|
|
@ -4,21 +4,44 @@ import (
|
|||
"encoding/json"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
metrics "openreplay/backend/pkg/metrics/http"
|
||||
)
|
||||
|
||||
func ResponseWithJSON(w http.ResponseWriter, res interface{}) {
|
||||
func recordMetrics(requestStart time.Time, url string, code, bodySize int) {
|
||||
if bodySize > 0 {
|
||||
metrics.RecordRequestSize(float64(bodySize), url, code)
|
||||
}
|
||||
metrics.IncreaseTotalRequests()
|
||||
metrics.RecordRequestDuration(float64(time.Now().Sub(requestStart).Milliseconds()), url, code)
|
||||
}
|
||||
|
||||
func ResponseOK(w http.ResponseWriter, requestStart time.Time, url string, bodySize int) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
recordMetrics(requestStart, url, http.StatusOK, bodySize)
|
||||
}
|
||||
|
||||
func ResponseWithJSON(w http.ResponseWriter, res interface{}, requestStart time.Time, url string, bodySize int) {
|
||||
body, err := json.Marshal(res)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(body)
|
||||
recordMetrics(requestStart, url, http.StatusOK, bodySize)
|
||||
}
|
||||
|
||||
func ResponseWithError(w http.ResponseWriter, code int, err error) {
|
||||
type response struct {
|
||||
Error string `json:"error"`
|
||||
type response struct {
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
func ResponseWithError(w http.ResponseWriter, code int, err error, requestStart time.Time, url string, bodySize int) {
|
||||
body, err := json.Marshal(&response{err.Error()})
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
w.WriteHeader(code)
|
||||
ResponseWithJSON(w, &response{err.Error()})
|
||||
w.Write(body)
|
||||
recordMetrics(requestStart, url, code, bodySize)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,19 +1,16 @@
|
|||
package router
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/gorilla/mux"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
|
||||
"log"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
http3 "openreplay/backend/internal/config/http"
|
||||
http2 "openreplay/backend/internal/http/services"
|
||||
"openreplay/backend/internal/http/util"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type BeaconSize struct {
|
||||
|
|
@ -25,21 +22,16 @@ type Router struct {
|
|||
router *mux.Router
|
||||
cfg *http3.Config
|
||||
services *http2.ServicesBuilder
|
||||
requestSize syncfloat64.Histogram
|
||||
requestDuration syncfloat64.Histogram
|
||||
totalRequests syncfloat64.Counter
|
||||
mutex *sync.RWMutex
|
||||
beaconSizeCache map[uint64]*BeaconSize // Cache for session's beaconSize
|
||||
}
|
||||
|
||||
func NewRouter(cfg *http3.Config, services *http2.ServicesBuilder, metrics *monitoring.Metrics) (*Router, error) {
|
||||
func NewRouter(cfg *http3.Config, services *http2.ServicesBuilder) (*Router, error) {
|
||||
switch {
|
||||
case cfg == nil:
|
||||
return nil, fmt.Errorf("config is empty")
|
||||
case services == nil:
|
||||
return nil, fmt.Errorf("services is empty")
|
||||
case metrics == nil:
|
||||
return nil, fmt.Errorf("metrics is empty")
|
||||
}
|
||||
e := &Router{
|
||||
cfg: cfg,
|
||||
|
|
@ -47,7 +39,6 @@ func NewRouter(cfg *http3.Config, services *http2.ServicesBuilder, metrics *moni
|
|||
mutex: &sync.RWMutex{},
|
||||
beaconSizeCache: make(map[uint64]*BeaconSize),
|
||||
}
|
||||
e.initMetrics(metrics)
|
||||
e.init()
|
||||
go e.clearBeaconSizes()
|
||||
return e, nil
|
||||
|
|
@ -115,22 +106,6 @@ func (e *Router) init() {
|
|||
e.router.Use(e.corsMiddleware)
|
||||
}
|
||||
|
||||
func (e *Router) initMetrics(metrics *monitoring.Metrics) {
|
||||
var err error
|
||||
e.requestSize, err = metrics.RegisterHistogram("requests_body_size")
|
||||
if err != nil {
|
||||
log.Printf("can't create requests_body_size metric: %s", err)
|
||||
}
|
||||
e.requestDuration, err = metrics.RegisterHistogram("requests_duration")
|
||||
if err != nil {
|
||||
log.Printf("can't create requests_duration metric: %s", err)
|
||||
}
|
||||
e.totalRequests, err = metrics.RegisterCounter("requests_total")
|
||||
if err != nil {
|
||||
log.Printf("can't create requests_total metric: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Router) root(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
|
@ -149,17 +124,8 @@ func (e *Router) corsMiddleware(next http.Handler) http.Handler {
|
|||
|
||||
log.Printf("Request: %v - %v ", r.Method, util.SafeString(r.URL.Path))
|
||||
|
||||
requestStart := time.Now()
|
||||
|
||||
// Serve request
|
||||
next.ServeHTTP(w, r)
|
||||
|
||||
metricsContext, _ := context.WithTimeout(context.Background(), time.Millisecond*100)
|
||||
e.totalRequests.Add(metricsContext, 1)
|
||||
e.requestDuration.Record(metricsContext,
|
||||
float64(time.Now().Sub(requestStart).Milliseconds()),
|
||||
[]attribute.KeyValue{attribute.String("method", r.URL.Path)}...,
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,13 +1,11 @@
|
|||
package sessionender
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
|
||||
"log"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/metrics/ender"
|
||||
)
|
||||
|
||||
// EndedSessionHandler handler for ended sessions
|
||||
|
|
@ -23,32 +21,16 @@ type session struct {
|
|||
|
||||
// SessionEnder updates timestamp of last message for each session
|
||||
type SessionEnder struct {
|
||||
timeout int64
|
||||
sessions map[uint64]*session // map[sessionID]session
|
||||
timeCtrl *timeController
|
||||
activeSessions syncfloat64.UpDownCounter
|
||||
totalSessions syncfloat64.Counter
|
||||
timeout int64
|
||||
sessions map[uint64]*session // map[sessionID]session
|
||||
timeCtrl *timeController
|
||||
}
|
||||
|
||||
func New(metrics *monitoring.Metrics, timeout int64, parts int) (*SessionEnder, error) {
|
||||
if metrics == nil {
|
||||
return nil, fmt.Errorf("metrics module is empty")
|
||||
}
|
||||
activeSessions, err := metrics.RegisterUpDownCounter("sessions_active")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't register session.active metric: %s", err)
|
||||
}
|
||||
totalSessions, err := metrics.RegisterCounter("sessions_total")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't register session.total metric: %s", err)
|
||||
}
|
||||
|
||||
func New(timeout int64, parts int) (*SessionEnder, error) {
|
||||
return &SessionEnder{
|
||||
timeout: timeout,
|
||||
sessions: make(map[uint64]*session),
|
||||
timeCtrl: NewTimeController(parts),
|
||||
activeSessions: activeSessions,
|
||||
totalSessions: totalSessions,
|
||||
timeout: timeout,
|
||||
sessions: make(map[uint64]*session),
|
||||
timeCtrl: NewTimeController(parts),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
@ -74,8 +56,8 @@ func (se *SessionEnder) UpdateSession(msg messages.Message) {
|
|||
lastUserTime: msgTimestamp, // last timestamp from user's machine
|
||||
isEnded: false,
|
||||
}
|
||||
se.activeSessions.Add(context.Background(), 1)
|
||||
se.totalSessions.Add(context.Background(), 1)
|
||||
ender.IncreaseActiveSessions()
|
||||
ender.IncreaseTotalSessions()
|
||||
return
|
||||
}
|
||||
// Keep the highest user's timestamp for correct session duration value
|
||||
|
|
@ -100,7 +82,8 @@ func (se *SessionEnder) HandleEndedSessions(handler EndedSessionHandler) {
|
|||
sess.isEnded = true
|
||||
if handler(sessID, sess.lastUserTime) {
|
||||
delete(se.sessions, sessID)
|
||||
se.activeSessions.Add(context.Background(), -1)
|
||||
ender.DecreaseActiveSessions()
|
||||
ender.IncreaseClosedSessions()
|
||||
removedSessions++
|
||||
} else {
|
||||
log.Printf("sessID: %d, userTime: %d", sessID, sess.lastUserTime)
|
||||
|
|
|
|||
|
|
@ -1,20 +1,19 @@
|
|||
package assetscache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
|
||||
"io"
|
||||
"log"
|
||||
"net/url"
|
||||
"openreplay/backend/internal/config/sink"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/pkg/url/assets"
|
||||
metrics "openreplay/backend/pkg/metrics/sink"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/internal/config/sink"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/pkg/url/assets"
|
||||
)
|
||||
|
||||
type CachedAsset struct {
|
||||
|
|
@ -23,52 +22,21 @@ type CachedAsset struct {
|
|||
}
|
||||
|
||||
type AssetsCache struct {
|
||||
mutex sync.RWMutex
|
||||
cfg *sink.Config
|
||||
rewriter *assets.Rewriter
|
||||
producer types.Producer
|
||||
cache map[string]*CachedAsset
|
||||
blackList []string // use "example.com" to filter all domains or ".example.com" to filter only third-level domain
|
||||
totalAssets syncfloat64.Counter
|
||||
cachedAssets syncfloat64.Counter
|
||||
skippedAssets syncfloat64.Counter
|
||||
assetSize syncfloat64.Histogram
|
||||
assetDuration syncfloat64.Histogram
|
||||
mutex sync.RWMutex
|
||||
cfg *sink.Config
|
||||
rewriter *assets.Rewriter
|
||||
producer types.Producer
|
||||
cache map[string]*CachedAsset
|
||||
blackList []string // use "example.com" to filter all domains or ".example.com" to filter only third-level domain
|
||||
}
|
||||
|
||||
func New(cfg *sink.Config, rewriter *assets.Rewriter, producer types.Producer, metrics *monitoring.Metrics) *AssetsCache {
|
||||
// Assets metrics
|
||||
totalAssets, err := metrics.RegisterCounter("assets_total")
|
||||
if err != nil {
|
||||
log.Printf("can't create assets_total metric: %s", err)
|
||||
}
|
||||
cachedAssets, err := metrics.RegisterCounter("assets_cached")
|
||||
if err != nil {
|
||||
log.Printf("can't create assets_cached metric: %s", err)
|
||||
}
|
||||
skippedAssets, err := metrics.RegisterCounter("assets_skipped")
|
||||
if err != nil {
|
||||
log.Printf("can't create assets_skipped metric: %s", err)
|
||||
}
|
||||
assetSize, err := metrics.RegisterHistogram("asset_size")
|
||||
if err != nil {
|
||||
log.Printf("can't create asset_size metric: %s", err)
|
||||
}
|
||||
assetDuration, err := metrics.RegisterHistogram("asset_duration")
|
||||
if err != nil {
|
||||
log.Printf("can't create asset_duration metric: %s", err)
|
||||
}
|
||||
func New(cfg *sink.Config, rewriter *assets.Rewriter, producer types.Producer) *AssetsCache {
|
||||
assetsCache := &AssetsCache{
|
||||
cfg: cfg,
|
||||
rewriter: rewriter,
|
||||
producer: producer,
|
||||
cache: make(map[string]*CachedAsset, 64),
|
||||
blackList: make([]string, 0),
|
||||
totalAssets: totalAssets,
|
||||
cachedAssets: cachedAssets,
|
||||
skippedAssets: skippedAssets,
|
||||
assetSize: assetSize,
|
||||
assetDuration: assetDuration,
|
||||
cfg: cfg,
|
||||
rewriter: rewriter,
|
||||
producer: producer,
|
||||
cache: make(map[string]*CachedAsset, 64),
|
||||
blackList: make([]string, 0),
|
||||
}
|
||||
// Parse black list for cache layer
|
||||
if len(cfg.CacheBlackList) > 0 {
|
||||
|
|
@ -84,7 +52,7 @@ func New(cfg *sink.Config, rewriter *assets.Rewriter, producer types.Producer, m
|
|||
}
|
||||
|
||||
func (e *AssetsCache) cleaner() {
|
||||
cleanTick := time.Tick(time.Minute * 30)
|
||||
cleanTick := time.Tick(time.Minute * 3)
|
||||
for {
|
||||
select {
|
||||
case <-cleanTick:
|
||||
|
|
@ -105,6 +73,7 @@ func (e *AssetsCache) clearCache() {
|
|||
if int64(now.Sub(cache.ts).Minutes()) > e.cfg.CacheExpiration {
|
||||
deleted++
|
||||
delete(e.cache, id)
|
||||
metrics.DecreaseCachedAssets()
|
||||
}
|
||||
}
|
||||
log.Printf("cache cleaner: deleted %d/%d assets", deleted, cacheSize)
|
||||
|
|
@ -232,8 +201,7 @@ func parseHost(baseURL string) (string, error) {
|
|||
}
|
||||
|
||||
func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) string {
|
||||
ctx := context.Background()
|
||||
e.totalAssets.Add(ctx, 1)
|
||||
metrics.IncreaseTotalAssets()
|
||||
// Try to find asset in cache
|
||||
h := md5.New()
|
||||
// Cut first part of url (scheme + host)
|
||||
|
|
@ -255,7 +223,7 @@ func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) st
|
|||
e.mutex.RUnlock()
|
||||
if ok {
|
||||
if int64(time.Now().Sub(cachedAsset.ts).Minutes()) < e.cfg.CacheExpiration {
|
||||
e.skippedAssets.Add(ctx, 1)
|
||||
metrics.IncreaseSkippedAssets()
|
||||
return cachedAsset.msg
|
||||
}
|
||||
}
|
||||
|
|
@ -267,8 +235,8 @@ func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) st
|
|||
start := time.Now()
|
||||
res := e.getRewrittenCSS(sessionID, baseURL, css)
|
||||
duration := time.Now().Sub(start).Milliseconds()
|
||||
e.assetSize.Record(ctx, float64(len(res)))
|
||||
e.assetDuration.Record(ctx, float64(duration))
|
||||
metrics.RecordAssetSize(float64(len(res)))
|
||||
metrics.RecordProcessAssetDuration(float64(duration))
|
||||
// Save asset to cache if we spent more than threshold
|
||||
if duration > e.cfg.CacheThreshold {
|
||||
e.mutex.Lock()
|
||||
|
|
@ -277,7 +245,7 @@ func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) st
|
|||
ts: time.Now(),
|
||||
}
|
||||
e.mutex.Unlock()
|
||||
e.cachedAssets.Add(ctx, 1)
|
||||
metrics.IncreaseCachedAssets()
|
||||
}
|
||||
// Return rewritten asset
|
||||
return res
|
||||
|
|
|
|||
|
|
@ -2,20 +2,20 @@ package storage
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
gzip "github.com/klauspost/pgzip"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
|
||||
"log"
|
||||
config "openreplay/backend/internal/config/storage"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"openreplay/backend/pkg/storage"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
config "openreplay/backend/internal/config/storage"
|
||||
"openreplay/backend/pkg/messages"
|
||||
metrics "openreplay/backend/pkg/metrics/storage"
|
||||
"openreplay/backend/pkg/storage"
|
||||
|
||||
gzip "github.com/klauspost/pgzip"
|
||||
)
|
||||
|
||||
type FileType string
|
||||
|
|
@ -25,6 +25,13 @@ const (
|
|||
DEV FileType = "/devtools.mob"
|
||||
)
|
||||
|
||||
func (t FileType) String() string {
|
||||
if t == DOM {
|
||||
return "dom"
|
||||
}
|
||||
return "devtools"
|
||||
}
|
||||
|
||||
type Task struct {
|
||||
id string
|
||||
doms *bytes.Buffer
|
||||
|
|
@ -36,92 +43,23 @@ type Storage struct {
|
|||
cfg *config.Config
|
||||
s3 *storage.S3
|
||||
startBytes []byte
|
||||
|
||||
totalSessions syncfloat64.Counter
|
||||
sessionDOMSize syncfloat64.Histogram
|
||||
sessionDEVSize syncfloat64.Histogram
|
||||
readingDOMTime syncfloat64.Histogram
|
||||
readingDEVTime syncfloat64.Histogram
|
||||
sortingDOMTime syncfloat64.Histogram
|
||||
sortingDEVTime syncfloat64.Histogram
|
||||
archivingDOMTime syncfloat64.Histogram
|
||||
archivingDEVTime syncfloat64.Histogram
|
||||
uploadingDOMTime syncfloat64.Histogram
|
||||
uploadingDEVTime syncfloat64.Histogram
|
||||
|
||||
tasks chan *Task
|
||||
ready chan struct{}
|
||||
tasks chan *Task
|
||||
ready chan struct{}
|
||||
}
|
||||
|
||||
func New(cfg *config.Config, s3 *storage.S3, metrics *monitoring.Metrics) (*Storage, error) {
|
||||
func New(cfg *config.Config, s3 *storage.S3) (*Storage, error) {
|
||||
switch {
|
||||
case cfg == nil:
|
||||
return nil, fmt.Errorf("config is empty")
|
||||
case s3 == nil:
|
||||
return nil, fmt.Errorf("s3 storage is empty")
|
||||
}
|
||||
// Create metrics
|
||||
totalSessions, err := metrics.RegisterCounter("sessions_total")
|
||||
if err != nil {
|
||||
log.Printf("can't create sessions_total metric: %s", err)
|
||||
}
|
||||
sessionDOMSize, err := metrics.RegisterHistogram("sessions_size")
|
||||
if err != nil {
|
||||
log.Printf("can't create session_size metric: %s", err)
|
||||
}
|
||||
sessionDevtoolsSize, err := metrics.RegisterHistogram("sessions_dt_size")
|
||||
if err != nil {
|
||||
log.Printf("can't create sessions_dt_size metric: %s", err)
|
||||
}
|
||||
readingDOMTime, err := metrics.RegisterHistogram("reading_duration")
|
||||
if err != nil {
|
||||
log.Printf("can't create reading_duration metric: %s", err)
|
||||
}
|
||||
readingDEVTime, err := metrics.RegisterHistogram("reading_dt_duration")
|
||||
if err != nil {
|
||||
log.Printf("can't create reading_duration metric: %s", err)
|
||||
}
|
||||
sortingDOMTime, err := metrics.RegisterHistogram("sorting_duration")
|
||||
if err != nil {
|
||||
log.Printf("can't create reading_duration metric: %s", err)
|
||||
}
|
||||
sortingDEVTime, err := metrics.RegisterHistogram("sorting_dt_duration")
|
||||
if err != nil {
|
||||
log.Printf("can't create reading_duration metric: %s", err)
|
||||
}
|
||||
archivingDOMTime, err := metrics.RegisterHistogram("archiving_duration")
|
||||
if err != nil {
|
||||
log.Printf("can't create archiving_duration metric: %s", err)
|
||||
}
|
||||
archivingDEVTime, err := metrics.RegisterHistogram("archiving_dt_duration")
|
||||
if err != nil {
|
||||
log.Printf("can't create archiving_duration metric: %s", err)
|
||||
}
|
||||
uploadingDOMTime, err := metrics.RegisterHistogram("uploading_duration")
|
||||
if err != nil {
|
||||
log.Printf("can't create uploading_duration metric: %s", err)
|
||||
}
|
||||
uploadingDEVTime, err := metrics.RegisterHistogram("uploading_dt_duration")
|
||||
if err != nil {
|
||||
log.Printf("can't create uploading_duration metric: %s", err)
|
||||
}
|
||||
newStorage := &Storage{
|
||||
cfg: cfg,
|
||||
s3: s3,
|
||||
startBytes: make([]byte, cfg.FileSplitSize),
|
||||
totalSessions: totalSessions,
|
||||
sessionDOMSize: sessionDOMSize,
|
||||
sessionDEVSize: sessionDevtoolsSize,
|
||||
readingDOMTime: readingDOMTime,
|
||||
readingDEVTime: readingDEVTime,
|
||||
sortingDOMTime: sortingDOMTime,
|
||||
sortingDEVTime: sortingDEVTime,
|
||||
archivingDOMTime: archivingDOMTime,
|
||||
archivingDEVTime: archivingDEVTime,
|
||||
uploadingDOMTime: uploadingDOMTime,
|
||||
uploadingDEVTime: uploadingDEVTime,
|
||||
tasks: make(chan *Task, 1),
|
||||
ready: make(chan struct{}),
|
||||
cfg: cfg,
|
||||
s3: s3,
|
||||
startBytes: make([]byte, cfg.FileSplitSize),
|
||||
tasks: make(chan *Task, 1),
|
||||
ready: make(chan struct{}),
|
||||
}
|
||||
go newStorage.worker()
|
||||
return newStorage, nil
|
||||
|
|
@ -187,11 +125,7 @@ func (s *Storage) openSession(filePath string, tp FileType) ([]byte, error) {
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("can't sort session, err: %s", err)
|
||||
}
|
||||
if tp == DOM {
|
||||
s.sortingDOMTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()))
|
||||
} else {
|
||||
s.sortingDEVTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()))
|
||||
}
|
||||
metrics.RecordSessionSortDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String())
|
||||
return res, nil
|
||||
}
|
||||
|
||||
|
|
@ -215,26 +149,19 @@ func (s *Storage) prepareSession(path string, tp FileType, task *Task) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
durRead := time.Now().Sub(startRead).Milliseconds()
|
||||
// Send metrics
|
||||
ctx, _ := context.WithTimeout(context.Background(), time.Millisecond*200)
|
||||
if tp == DOM {
|
||||
s.sessionDOMSize.Record(ctx, float64(len(mob)))
|
||||
s.readingDOMTime.Record(ctx, float64(durRead))
|
||||
} else {
|
||||
s.sessionDEVSize.Record(ctx, float64(len(mob)))
|
||||
s.readingDEVTime.Record(ctx, float64(durRead))
|
||||
}
|
||||
metrics.RecordSessionSize(float64(len(mob)), tp.String())
|
||||
metrics.RecordSessionReadDuration(float64(time.Now().Sub(startRead).Milliseconds()), tp.String())
|
||||
|
||||
// Encode and compress session
|
||||
if tp == DEV {
|
||||
startCompress := time.Now()
|
||||
start := time.Now()
|
||||
task.dev = s.compressSession(mob)
|
||||
s.archivingDEVTime.Record(ctx, float64(time.Now().Sub(startCompress).Milliseconds()))
|
||||
metrics.RecordSessionCompressDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String())
|
||||
} else {
|
||||
if len(mob) <= s.cfg.FileSplitSize {
|
||||
startCompress := time.Now()
|
||||
start := time.Now()
|
||||
task.doms = s.compressSession(mob)
|
||||
s.archivingDOMTime.Record(ctx, float64(time.Now().Sub(startCompress).Milliseconds()))
|
||||
metrics.RecordSessionCompressDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String())
|
||||
return nil
|
||||
}
|
||||
wg := &sync.WaitGroup{}
|
||||
|
|
@ -253,7 +180,7 @@ func (s *Storage) prepareSession(path string, tp FileType, task *Task) error {
|
|||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
s.archivingDOMTime.Record(ctx, float64(firstPart+secondPart))
|
||||
metrics.RecordSessionCompressDuration(float64(firstPart+secondPart), tp.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -324,11 +251,9 @@ func (s *Storage) uploadSession(task *Task) {
|
|||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
// Record metrics
|
||||
ctx, _ := context.WithTimeout(context.Background(), time.Millisecond*200)
|
||||
s.uploadingDOMTime.Record(ctx, float64(uploadDoms+uploadDome))
|
||||
s.uploadingDEVTime.Record(ctx, float64(uploadDev))
|
||||
s.totalSessions.Add(ctx, 1)
|
||||
metrics.RecordSessionUploadDuration(float64(uploadDoms+uploadDome), DOM.String())
|
||||
metrics.RecordSessionUploadDuration(float64(uploadDev), DEV.String())
|
||||
metrics.IncreaseStorageTotalSessions()
|
||||
}
|
||||
|
||||
func (s *Storage) worker() {
|
||||
|
|
|
|||
|
|
@ -1,14 +1,13 @@
|
|||
package postgres
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/jackc/pgx/v4"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
|
||||
"log"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
|
||||
"github.com/jackc/pgx/v4"
|
||||
)
|
||||
|
||||
type batchItem struct {
|
||||
|
|
@ -78,21 +77,17 @@ func NewBatchesTask(size int) *batchesTask {
|
|||
}
|
||||
|
||||
type BatchSet struct {
|
||||
c Pool
|
||||
batches map[uint64]*SessionBatch
|
||||
batchQueueLimit int
|
||||
batchSizeLimit int
|
||||
batchSizeBytes syncfloat64.Histogram
|
||||
batchSizeLines syncfloat64.Histogram
|
||||
sqlRequestTime syncfloat64.Histogram
|
||||
sqlRequestCounter syncfloat64.Counter
|
||||
updates map[uint64]*sessionUpdates
|
||||
workerTask chan *batchesTask
|
||||
done chan struct{}
|
||||
finished chan struct{}
|
||||
c Pool
|
||||
batches map[uint64]*SessionBatch
|
||||
batchQueueLimit int
|
||||
batchSizeLimit int
|
||||
updates map[uint64]*sessionUpdates
|
||||
workerTask chan *batchesTask
|
||||
done chan struct{}
|
||||
finished chan struct{}
|
||||
}
|
||||
|
||||
func NewBatchSet(c Pool, queueLimit, sizeLimit int, metrics *monitoring.Metrics) *BatchSet {
|
||||
func NewBatchSet(c Pool, queueLimit, sizeLimit int) *BatchSet {
|
||||
bs := &BatchSet{
|
||||
c: c,
|
||||
batches: make(map[uint64]*SessionBatch),
|
||||
|
|
@ -103,31 +98,10 @@ func NewBatchSet(c Pool, queueLimit, sizeLimit int, metrics *monitoring.Metrics)
|
|||
finished: make(chan struct{}),
|
||||
updates: make(map[uint64]*sessionUpdates),
|
||||
}
|
||||
bs.initMetrics(metrics)
|
||||
go bs.worker()
|
||||
return bs
|
||||
}
|
||||
|
||||
func (conn *BatchSet) initMetrics(metrics *monitoring.Metrics) {
|
||||
var err error
|
||||
conn.batchSizeBytes, err = metrics.RegisterHistogram("batch_size_bytes")
|
||||
if err != nil {
|
||||
log.Printf("can't create batchSizeBytes metric: %s", err)
|
||||
}
|
||||
conn.batchSizeLines, err = metrics.RegisterHistogram("batch_size_lines")
|
||||
if err != nil {
|
||||
log.Printf("can't create batchSizeLines metric: %s", err)
|
||||
}
|
||||
conn.sqlRequestTime, err = metrics.RegisterHistogram("sql_request_time")
|
||||
if err != nil {
|
||||
log.Printf("can't create sqlRequestTime metric: %s", err)
|
||||
}
|
||||
conn.sqlRequestCounter, err = metrics.RegisterCounter("sql_request_number")
|
||||
if err != nil {
|
||||
log.Printf("can't create sqlRequestNumber metric: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (conn *BatchSet) getBatch(sessionID uint64) *SessionBatch {
|
||||
sessionID = sessionID % 10
|
||||
if _, ok := conn.batches[sessionID]; !ok {
|
||||
|
|
@ -194,11 +168,10 @@ func (conn *BatchSet) sendBatches(t *batchesTask) {
|
|||
// Append session update sql request to the end of batch
|
||||
batch.Prepare()
|
||||
// Record batch size in bytes and number of lines
|
||||
conn.batchSizeBytes.Record(context.Background(), float64(batch.Size()))
|
||||
conn.batchSizeLines.Record(context.Background(), float64(batch.Len()))
|
||||
database.RecordBatchSize(float64(batch.Size()))
|
||||
database.RecordBatchElements(float64(batch.Len()))
|
||||
|
||||
start := time.Now()
|
||||
isFailed := false
|
||||
|
||||
// Send batch to db and execute
|
||||
br := conn.c.SendBatch(batch.batch)
|
||||
|
|
@ -209,15 +182,10 @@ func (conn *BatchSet) sendBatches(t *batchesTask) {
|
|||
failedSql := batch.items[i]
|
||||
query := strings.ReplaceAll(failedSql.query, "\n", " ")
|
||||
log.Println("failed sql req:", query, failedSql.arguments)
|
||||
isFailed = true
|
||||
}
|
||||
}
|
||||
br.Close() // returns err
|
||||
dur := time.Now().Sub(start).Milliseconds()
|
||||
conn.sqlRequestTime.Record(context.Background(), float64(dur),
|
||||
attribute.String("method", "batch"), attribute.Bool("failed", isFailed))
|
||||
conn.sqlRequestCounter.Add(context.Background(), 1,
|
||||
attribute.String("method", "batch"), attribute.Bool("failed", isFailed))
|
||||
database.RecordBatchInsertDuration(float64(time.Now().Sub(start).Milliseconds()))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -2,13 +2,9 @@ package postgres
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
|
||||
"log"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
|
@ -25,15 +21,13 @@ type Bulk interface {
|
|||
}
|
||||
|
||||
type bulkImpl struct {
|
||||
conn Pool
|
||||
table string
|
||||
columns string
|
||||
template string
|
||||
setSize int
|
||||
sizeLimit int
|
||||
values []interface{}
|
||||
bulkSize syncfloat64.Histogram
|
||||
bulkDuration syncfloat64.Histogram
|
||||
conn Pool
|
||||
table string
|
||||
columns string
|
||||
template string
|
||||
setSize int
|
||||
sizeLimit int
|
||||
values []interface{}
|
||||
}
|
||||
|
||||
func (b *bulkImpl) Append(args ...interface{}) error {
|
||||
|
|
@ -79,18 +73,15 @@ func (b *bulkImpl) send() error {
|
|||
return fmt.Errorf("send bulk err: %s", err)
|
||||
}
|
||||
// Save bulk metrics
|
||||
ctx, _ := context.WithTimeout(context.Background(), time.Millisecond*200)
|
||||
b.bulkDuration.Record(ctx, float64(time.Now().Sub(start).Milliseconds()), attribute.String("table", b.table))
|
||||
b.bulkSize.Record(ctx, float64(size), attribute.String("table", b.table))
|
||||
database.RecordBulkElements(float64(size), "pg", b.table)
|
||||
database.RecordBulkInsertDuration(float64(time.Now().Sub(start).Milliseconds()), "pg", b.table)
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewBulk(conn Pool, metrics *monitoring.Metrics, table, columns, template string, setSize, sizeLimit int) (Bulk, error) {
|
||||
func NewBulk(conn Pool, table, columns, template string, setSize, sizeLimit int) (Bulk, error) {
|
||||
switch {
|
||||
case conn == nil:
|
||||
return nil, errors.New("db conn is empty")
|
||||
case metrics == nil:
|
||||
return nil, errors.New("metrics is empty")
|
||||
case table == "":
|
||||
return nil, errors.New("table is empty")
|
||||
case columns == "":
|
||||
|
|
@ -102,23 +93,13 @@ func NewBulk(conn Pool, metrics *monitoring.Metrics, table, columns, template st
|
|||
case sizeLimit <= 0:
|
||||
return nil, errors.New("size limit is wrong")
|
||||
}
|
||||
messagesInBulk, err := metrics.RegisterHistogram("messages_in_bulk")
|
||||
if err != nil {
|
||||
log.Printf("can't create messages_size metric: %s", err)
|
||||
}
|
||||
bulkInsertDuration, err := metrics.RegisterHistogram("bulk_insert_duration")
|
||||
if err != nil {
|
||||
log.Printf("can't create messages_size metric: %s", err)
|
||||
}
|
||||
return &bulkImpl{
|
||||
conn: conn,
|
||||
table: table,
|
||||
columns: columns,
|
||||
template: template,
|
||||
setSize: setSize,
|
||||
sizeLimit: sizeLimit,
|
||||
values: make([]interface{}, 0, setSize*sizeLimit),
|
||||
bulkSize: messagesInBulk,
|
||||
bulkDuration: bulkInsertDuration,
|
||||
conn: conn,
|
||||
table: table,
|
||||
columns: columns,
|
||||
template: template,
|
||||
setSize: setSize,
|
||||
sizeLimit: sizeLimit,
|
||||
values: make([]interface{}, 0, setSize*sizeLimit),
|
||||
}, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ package postgres
|
|||
|
||||
import (
|
||||
"log"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
|
@ -30,16 +29,14 @@ type BulkSet struct {
|
|||
webCustomEvents Bulk
|
||||
webClickEvents Bulk
|
||||
webNetworkRequest Bulk
|
||||
metrics *monitoring.Metrics
|
||||
workerTask chan *bulksTask
|
||||
done chan struct{}
|
||||
finished chan struct{}
|
||||
}
|
||||
|
||||
func NewBulkSet(c Pool, metrics *monitoring.Metrics) *BulkSet {
|
||||
func NewBulkSet(c Pool) *BulkSet {
|
||||
bs := &BulkSet{
|
||||
c: c,
|
||||
metrics: metrics,
|
||||
workerTask: make(chan *bulksTask, 1),
|
||||
done: make(chan struct{}),
|
||||
finished: make(chan struct{}),
|
||||
|
|
@ -86,7 +83,7 @@ func (conn *BulkSet) Get(name string) Bulk {
|
|||
|
||||
func (conn *BulkSet) initBulks() {
|
||||
var err error
|
||||
conn.autocompletes, err = NewBulk(conn.c, conn.metrics,
|
||||
conn.autocompletes, err = NewBulk(conn.c,
|
||||
"autocomplete",
|
||||
"(value, type, project_id)",
|
||||
"($%d, $%d, $%d)",
|
||||
|
|
@ -94,7 +91,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
log.Fatalf("can't create autocomplete bulk: %s", err)
|
||||
}
|
||||
conn.requests, err = NewBulk(conn.c, conn.metrics,
|
||||
conn.requests, err = NewBulk(conn.c,
|
||||
"events_common.requests",
|
||||
"(session_id, timestamp, seq_index, url, duration, success)",
|
||||
"($%d, $%d, $%d, LEFT($%d, 8000), $%d, $%d)",
|
||||
|
|
@ -102,7 +99,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
log.Fatalf("can't create requests bulk: %s", err)
|
||||
}
|
||||
conn.customEvents, err = NewBulk(conn.c, conn.metrics,
|
||||
conn.customEvents, err = NewBulk(conn.c,
|
||||
"events_common.customs",
|
||||
"(session_id, timestamp, seq_index, name, payload)",
|
||||
"($%d, $%d, $%d, LEFT($%d, 2000), $%d)",
|
||||
|
|
@ -110,7 +107,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
log.Fatalf("can't create customEvents bulk: %s", err)
|
||||
}
|
||||
conn.webPageEvents, err = NewBulk(conn.c, conn.metrics,
|
||||
conn.webPageEvents, err = NewBulk(conn.c,
|
||||
"events.pages",
|
||||
"(session_id, message_id, timestamp, referrer, base_referrer, host, path, query, dom_content_loaded_time, "+
|
||||
"load_time, response_end, first_paint_time, first_contentful_paint_time, speed_index, visually_complete, "+
|
||||
|
|
@ -122,7 +119,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
log.Fatalf("can't create webPageEvents bulk: %s", err)
|
||||
}
|
||||
conn.webInputEvents, err = NewBulk(conn.c, conn.metrics,
|
||||
conn.webInputEvents, err = NewBulk(conn.c,
|
||||
"events.inputs",
|
||||
"(session_id, message_id, timestamp, value, label)",
|
||||
"($%d, $%d, $%d, LEFT($%d, 2000), NULLIF(LEFT($%d, 2000),''))",
|
||||
|
|
@ -130,7 +127,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
log.Fatalf("can't create webPageEvents bulk: %s", err)
|
||||
}
|
||||
conn.webGraphQL, err = NewBulk(conn.c, conn.metrics,
|
||||
conn.webGraphQL, err = NewBulk(conn.c,
|
||||
"events.graphql",
|
||||
"(session_id, timestamp, message_id, name, request_body, response_body)",
|
||||
"($%d, $%d, $%d, LEFT($%d, 2000), $%d, $%d)",
|
||||
|
|
@ -138,7 +135,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
log.Fatalf("can't create webPageEvents bulk: %s", err)
|
||||
}
|
||||
conn.webErrors, err = NewBulk(conn.c, conn.metrics,
|
||||
conn.webErrors, err = NewBulk(conn.c,
|
||||
"errors",
|
||||
"(error_id, project_id, source, name, message, payload)",
|
||||
"($%d, $%d, $%d, $%d, $%d, $%d::jsonb)",
|
||||
|
|
@ -146,7 +143,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
log.Fatalf("can't create webErrors bulk: %s", err)
|
||||
}
|
||||
conn.webErrorEvents, err = NewBulk(conn.c, conn.metrics,
|
||||
conn.webErrorEvents, err = NewBulk(conn.c,
|
||||
"events.errors",
|
||||
"(session_id, message_id, timestamp, error_id)",
|
||||
"($%d, $%d, $%d, $%d)",
|
||||
|
|
@ -154,7 +151,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
log.Fatalf("can't create webErrorEvents bulk: %s", err)
|
||||
}
|
||||
conn.webErrorTags, err = NewBulk(conn.c, conn.metrics,
|
||||
conn.webErrorTags, err = NewBulk(conn.c,
|
||||
"public.errors_tags",
|
||||
"(session_id, message_id, error_id, key, value)",
|
||||
"($%d, $%d, $%d, $%d, $%d)",
|
||||
|
|
@ -162,7 +159,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
log.Fatalf("can't create webErrorEvents bulk: %s", err)
|
||||
}
|
||||
conn.webIssues, err = NewBulk(conn.c, conn.metrics,
|
||||
conn.webIssues, err = NewBulk(conn.c,
|
||||
"issues",
|
||||
"(project_id, issue_id, type, context_string)",
|
||||
"($%d, $%d, $%d, $%d)",
|
||||
|
|
@ -170,7 +167,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
log.Fatalf("can't create webIssues bulk: %s", err)
|
||||
}
|
||||
conn.webIssueEvents, err = NewBulk(conn.c, conn.metrics,
|
||||
conn.webIssueEvents, err = NewBulk(conn.c,
|
||||
"events_common.issues",
|
||||
"(session_id, issue_id, timestamp, seq_index, payload)",
|
||||
"($%d, $%d, $%d, $%d, CAST($%d AS jsonb))",
|
||||
|
|
@ -178,7 +175,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
log.Fatalf("can't create webIssueEvents bulk: %s", err)
|
||||
}
|
||||
conn.webCustomEvents, err = NewBulk(conn.c, conn.metrics,
|
||||
conn.webCustomEvents, err = NewBulk(conn.c,
|
||||
"events_common.customs",
|
||||
"(session_id, seq_index, timestamp, name, payload, level)",
|
||||
"($%d, $%d, $%d, LEFT($%d, 2000), $%d, $%d)",
|
||||
|
|
@ -186,7 +183,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
log.Fatalf("can't create webCustomEvents bulk: %s", err)
|
||||
}
|
||||
conn.webClickEvents, err = NewBulk(conn.c, conn.metrics,
|
||||
conn.webClickEvents, err = NewBulk(conn.c,
|
||||
"events.clicks",
|
||||
"(session_id, message_id, timestamp, label, selector, url, path)",
|
||||
"($%d, $%d, $%d, NULLIF(LEFT($%d, 2000), ''), LEFT($%d, 8000), LEFT($%d, 2000), LEFT($%d, 2000))",
|
||||
|
|
@ -194,7 +191,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
log.Fatalf("can't create webClickEvents bulk: %s", err)
|
||||
}
|
||||
conn.webNetworkRequest, err = NewBulk(conn.c, conn.metrics,
|
||||
conn.webNetworkRequest, err = NewBulk(conn.c,
|
||||
"events_common.requests",
|
||||
"(session_id, timestamp, seq_index, url, host, path, query, request_body, response_body, status_code, method, duration, success)",
|
||||
"($%d, $%d, $%d, LEFT($%d, 8000), LEFT($%d, 300), LEFT($%d, 2000), LEFT($%d, 8000), $%d, $%d, $%d::smallint, NULLIF($%d, '')::http_method, $%d, $%d)",
|
||||
|
|
|
|||
|
|
@ -2,11 +2,10 @@ package postgres
|
|||
|
||||
import (
|
||||
"context"
|
||||
"github.com/jackc/pgx/v4/pgxpool"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
|
||||
"log"
|
||||
|
||||
"github.com/jackc/pgx/v4/pgxpool"
|
||||
"openreplay/backend/pkg/db/types"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
)
|
||||
|
||||
type CH interface {
|
||||
|
|
@ -15,36 +14,28 @@ type CH interface {
|
|||
|
||||
// Conn contains batches, bulks and cache for all sessions
|
||||
type Conn struct {
|
||||
c Pool
|
||||
batches *BatchSet
|
||||
bulks *BulkSet
|
||||
batchSizeBytes syncfloat64.Histogram
|
||||
batchSizeLines syncfloat64.Histogram
|
||||
sqlRequestTime syncfloat64.Histogram
|
||||
sqlRequestCounter syncfloat64.Counter
|
||||
chConn CH
|
||||
c Pool
|
||||
batches *BatchSet
|
||||
bulks *BulkSet
|
||||
chConn CH
|
||||
}
|
||||
|
||||
func (conn *Conn) SetClickHouse(ch CH) {
|
||||
conn.chConn = ch
|
||||
}
|
||||
|
||||
func NewConn(url string, queueLimit, sizeLimit int, metrics *monitoring.Metrics) *Conn {
|
||||
if metrics == nil {
|
||||
log.Fatalf("metrics is nil")
|
||||
}
|
||||
func NewConn(url string, queueLimit, sizeLimit int) *Conn {
|
||||
c, err := pgxpool.Connect(context.Background(), url)
|
||||
if err != nil {
|
||||
log.Fatalf("pgxpool.Connect err: %s", err)
|
||||
}
|
||||
conn := &Conn{}
|
||||
conn.initMetrics(metrics)
|
||||
conn.c, err = NewPool(c, conn.sqlRequestTime, conn.sqlRequestCounter)
|
||||
conn.c, err = NewPool(c)
|
||||
if err != nil {
|
||||
log.Fatalf("can't create new pool wrapper: %s", err)
|
||||
}
|
||||
conn.bulks = NewBulkSet(conn.c, metrics)
|
||||
conn.batches = NewBatchSet(conn.c, queueLimit, sizeLimit, metrics)
|
||||
conn.bulks = NewBulkSet(conn.c)
|
||||
conn.batches = NewBatchSet(conn.c, queueLimit, sizeLimit)
|
||||
return conn
|
||||
}
|
||||
|
||||
|
|
@ -55,26 +46,6 @@ func (conn *Conn) Close() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (conn *Conn) initMetrics(metrics *monitoring.Metrics) {
|
||||
var err error
|
||||
conn.batchSizeBytes, err = metrics.RegisterHistogram("batch_size_bytes")
|
||||
if err != nil {
|
||||
log.Printf("can't create batchSizeBytes metric: %s", err)
|
||||
}
|
||||
conn.batchSizeLines, err = metrics.RegisterHistogram("batch_size_lines")
|
||||
if err != nil {
|
||||
log.Printf("can't create batchSizeLines metric: %s", err)
|
||||
}
|
||||
conn.sqlRequestTime, err = metrics.RegisterHistogram("sql_request_time")
|
||||
if err != nil {
|
||||
log.Printf("can't create sqlRequestTime metric: %s", err)
|
||||
}
|
||||
conn.sqlRequestCounter, err = metrics.RegisterCounter("sql_request_number")
|
||||
if err != nil {
|
||||
log.Printf("can't create sqlRequestNumber metric: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (conn *Conn) insertAutocompleteValue(sessionID uint64, projectID uint32, tp string, value string) {
|
||||
if len(value) == 0 {
|
||||
return
|
||||
|
|
|
|||
|
|
@ -3,12 +3,12 @@ package postgres
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"github.com/jackc/pgx/v4"
|
||||
"github.com/jackc/pgx/v4/pgxpool"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v4"
|
||||
"github.com/jackc/pgx/v4/pgxpool"
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
)
|
||||
|
||||
// Pool is a pgx.Pool wrapper with metrics integration
|
||||
|
|
@ -22,19 +22,15 @@ type Pool interface {
|
|||
}
|
||||
|
||||
type poolImpl struct {
|
||||
conn *pgxpool.Pool
|
||||
sqlRequestTime syncfloat64.Histogram
|
||||
sqlRequestCounter syncfloat64.Counter
|
||||
conn *pgxpool.Pool
|
||||
}
|
||||
|
||||
func (p *poolImpl) Query(sql string, args ...interface{}) (pgx.Rows, error) {
|
||||
start := time.Now()
|
||||
res, err := p.conn.Query(getTimeoutContext(), sql, args...)
|
||||
method, table := methodName(sql)
|
||||
p.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()),
|
||||
attribute.String("method", method), attribute.String("table", table))
|
||||
p.sqlRequestCounter.Add(context.Background(), 1,
|
||||
attribute.String("method", method), attribute.String("table", table))
|
||||
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
|
||||
database.IncreaseTotalRequests(method, table)
|
||||
return res, err
|
||||
}
|
||||
|
||||
|
|
@ -42,10 +38,8 @@ func (p *poolImpl) QueryRow(sql string, args ...interface{}) pgx.Row {
|
|||
start := time.Now()
|
||||
res := p.conn.QueryRow(getTimeoutContext(), sql, args...)
|
||||
method, table := methodName(sql)
|
||||
p.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()),
|
||||
attribute.String("method", method), attribute.String("table", table))
|
||||
p.sqlRequestCounter.Add(context.Background(), 1,
|
||||
attribute.String("method", method), attribute.String("table", table))
|
||||
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
|
||||
database.IncreaseTotalRequests(method, table)
|
||||
return res
|
||||
}
|
||||
|
||||
|
|
@ -53,45 +47,37 @@ func (p *poolImpl) Exec(sql string, arguments ...interface{}) error {
|
|||
start := time.Now()
|
||||
_, err := p.conn.Exec(getTimeoutContext(), sql, arguments...)
|
||||
method, table := methodName(sql)
|
||||
p.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()),
|
||||
attribute.String("method", method), attribute.String("table", table))
|
||||
p.sqlRequestCounter.Add(context.Background(), 1,
|
||||
attribute.String("method", method), attribute.String("table", table))
|
||||
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
|
||||
database.IncreaseTotalRequests(method, table)
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *poolImpl) SendBatch(b *pgx.Batch) pgx.BatchResults {
|
||||
start := time.Now()
|
||||
res := p.conn.SendBatch(getTimeoutContext(), b)
|
||||
p.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()),
|
||||
attribute.String("method", "sendBatch"))
|
||||
p.sqlRequestCounter.Add(context.Background(), 1,
|
||||
attribute.String("method", "sendBatch"))
|
||||
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "sendBatch", "")
|
||||
database.IncreaseTotalRequests("sendBatch", "")
|
||||
return res
|
||||
}
|
||||
|
||||
func (p *poolImpl) Begin() (*_Tx, error) {
|
||||
start := time.Now()
|
||||
tx, err := p.conn.Begin(context.Background())
|
||||
p.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()),
|
||||
attribute.String("method", "begin"))
|
||||
p.sqlRequestCounter.Add(context.Background(), 1,
|
||||
attribute.String("method", "begin"))
|
||||
return &_Tx{tx, p.sqlRequestTime, p.sqlRequestCounter}, err
|
||||
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "begin", "")
|
||||
database.IncreaseTotalRequests("begin", "")
|
||||
return &_Tx{tx}, err
|
||||
}
|
||||
|
||||
func (p *poolImpl) Close() {
|
||||
p.conn.Close()
|
||||
}
|
||||
|
||||
func NewPool(conn *pgxpool.Pool, sqlRequestTime syncfloat64.Histogram, sqlRequestCounter syncfloat64.Counter) (Pool, error) {
|
||||
func NewPool(conn *pgxpool.Pool) (Pool, error) {
|
||||
if conn == nil {
|
||||
return nil, errors.New("conn is empty")
|
||||
}
|
||||
return &poolImpl{
|
||||
conn: conn,
|
||||
sqlRequestTime: sqlRequestTime,
|
||||
sqlRequestCounter: sqlRequestCounter,
|
||||
conn: conn,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
@ -99,38 +85,30 @@ func NewPool(conn *pgxpool.Pool, sqlRequestTime syncfloat64.Histogram, sqlReques
|
|||
|
||||
type _Tx struct {
|
||||
pgx.Tx
|
||||
sqlRequestTime syncfloat64.Histogram
|
||||
sqlRequestCounter syncfloat64.Counter
|
||||
}
|
||||
|
||||
func (tx *_Tx) exec(sql string, args ...interface{}) error {
|
||||
start := time.Now()
|
||||
_, err := tx.Exec(context.Background(), sql, args...)
|
||||
method, table := methodName(sql)
|
||||
tx.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()),
|
||||
attribute.String("method", method), attribute.String("table", table))
|
||||
tx.sqlRequestCounter.Add(context.Background(), 1,
|
||||
attribute.String("method", method), attribute.String("table", table))
|
||||
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
|
||||
database.IncreaseTotalRequests(method, table)
|
||||
return err
|
||||
}
|
||||
|
||||
func (tx *_Tx) rollback() error {
|
||||
start := time.Now()
|
||||
err := tx.Rollback(context.Background())
|
||||
tx.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()),
|
||||
attribute.String("method", "rollback"))
|
||||
tx.sqlRequestCounter.Add(context.Background(), 1,
|
||||
attribute.String("method", "rollback"))
|
||||
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "rollback", "")
|
||||
database.IncreaseTotalRequests("rollback", "")
|
||||
return err
|
||||
}
|
||||
|
||||
func (tx *_Tx) commit() error {
|
||||
start := time.Now()
|
||||
err := tx.Commit(context.Background())
|
||||
tx.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()),
|
||||
attribute.String("method", "commit"))
|
||||
tx.sqlRequestCounter.Add(context.Background(), 1,
|
||||
attribute.String("method", "commit"))
|
||||
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "commit", "")
|
||||
database.IncreaseTotalRequests("commit", "")
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
@ -169,7 +147,8 @@ func methodName(sql string) (string, string) {
|
|||
case "update":
|
||||
table = strings.TrimSpace(parts[1])
|
||||
case "insert":
|
||||
table = strings.TrimSpace(parts[2])
|
||||
tableNameParts := strings.Split(strings.TrimSpace(parts[2]), "(")
|
||||
table = tableNameParts[0]
|
||||
}
|
||||
return cmd, table
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package messages
|
|||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"openreplay/backend/pkg/metrics/sink"
|
||||
)
|
||||
|
||||
type sinkMessageIteratorImpl struct {
|
||||
|
|
@ -53,6 +54,8 @@ func (i *sinkMessageIteratorImpl) sendBatchEnd() {
|
|||
}
|
||||
|
||||
func (i *sinkMessageIteratorImpl) Iterate(batchData []byte, batchInfo *BatchInfo) {
|
||||
sink.RecordBatchSize(float64(len(batchData)))
|
||||
sink.IncreaseTotalBatches()
|
||||
// Create new message reader
|
||||
reader := NewMessageReader(batchData)
|
||||
|
||||
|
|
|
|||
|
|
@ -74,12 +74,13 @@ func (i *messageIteratorImpl) Iterate(batchData []byte, batchInfo *BatchInfo) {
|
|||
i.messageInfo.Index++
|
||||
|
||||
msg := reader.Message()
|
||||
msgType := msg.TypeID()
|
||||
|
||||
// Preprocess "system" messages
|
||||
if _, ok := i.preFilter[msg.TypeID()]; ok {
|
||||
msg = msg.Decode()
|
||||
if msg == nil {
|
||||
log.Printf("decode error, type: %d, info: %s", msg.TypeID(), i.batchInfo.Info())
|
||||
log.Printf("decode error, type: %d, info: %s", msgType, i.batchInfo.Info())
|
||||
return
|
||||
}
|
||||
msg = transformDeprecated(msg)
|
||||
|
|
@ -99,7 +100,7 @@ func (i *messageIteratorImpl) Iterate(batchData []byte, batchInfo *BatchInfo) {
|
|||
if i.autoDecode {
|
||||
msg = msg.Decode()
|
||||
if msg == nil {
|
||||
log.Printf("decode error, type: %d, info: %s", msg.TypeID(), i.batchInfo.Info())
|
||||
log.Printf("decode error, type: %d, info: %s", msgType, i.batchInfo.Info())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -40,13 +40,6 @@ func SplitMessages(data []byte) ([]*msgInfo, error) {
|
|||
return nil, fmt.Errorf("read message type err: %s", err)
|
||||
}
|
||||
|
||||
if msgType == MsgRedux {
|
||||
log.Printf("redux")
|
||||
}
|
||||
if msgType == MsgFetch {
|
||||
log.Printf("fetch")
|
||||
}
|
||||
|
||||
// Read message body
|
||||
_, err = ReadMessage(msgType, reader)
|
||||
if err != nil {
|
||||
|
|
|
|||
72
backend/pkg/metrics/assets/metrics.go
Normal file
72
backend/pkg/metrics/assets/metrics.go
Normal file
|
|
@ -0,0 +1,72 @@
|
|||
package assets
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"openreplay/backend/pkg/metrics/common"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var assetsProcessedSessions = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "assets",
|
||||
Name: "processed_total",
|
||||
Help: "A counter displaying the total count of processed assets.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseProcessesSessions() {
|
||||
assetsProcessedSessions.Inc()
|
||||
}
|
||||
|
||||
var assetsSavedSessions = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "assets",
|
||||
Name: "saved_total",
|
||||
Help: "A counter displaying the total number of cached assets.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseSavedSessions() {
|
||||
assetsSavedSessions.Inc()
|
||||
}
|
||||
|
||||
var assetsDownloadDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "assets",
|
||||
Name: "download_duration_seconds",
|
||||
Help: "A histogram displaying the duration of downloading for each asset in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"response_code"},
|
||||
)
|
||||
|
||||
func RecordDownloadDuration(durMillis float64, code int) {
|
||||
assetsDownloadDuration.WithLabelValues(strconv.Itoa(code)).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var assetsUploadDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "assets",
|
||||
Name: "upload_s3_duration_seconds",
|
||||
Help: "A histogram displaying the duration of uploading to s3 for each asset in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"failed"},
|
||||
)
|
||||
|
||||
func RecordUploadDuration(durMillis float64, isFailed bool) {
|
||||
failed := "false"
|
||||
if isFailed {
|
||||
failed = "true"
|
||||
}
|
||||
assetsUploadDuration.WithLabelValues(failed).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
func List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
assetsProcessedSessions,
|
||||
assetsSavedSessions,
|
||||
assetsDownloadDuration,
|
||||
assetsUploadDuration,
|
||||
}
|
||||
}
|
||||
11
backend/pkg/metrics/common/metrics.go
Normal file
11
backend/pkg/metrics/common/metrics.go
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
package common
|
||||
|
||||
// DefaultDurationBuckets is a set of buckets from 5 milliseconds to 1000 seconds (16.6667 minutes)
|
||||
var DefaultDurationBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 25, 50, 100, 250, 500, 1000}
|
||||
|
||||
// DefaultSizeBuckets is a set of buckets from 1 byte to 1_000_000_000 bytes (~1 Gb)
|
||||
var DefaultSizeBuckets = []float64{1, 10, 50, 100, 250, 500, 1000, 2500, 5000, 10000, 25000, 50000, 100_000, 250_000,
|
||||
500_000, 1_000_000, 10_000_000, 100_000_000, 1_000_000_000}
|
||||
|
||||
// DefaultBuckets is a set of buckets from 1 to 1_000_000 elements
|
||||
var DefaultBuckets = []float64{1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10_000, 50_000, 100_000, 1_000_000}
|
||||
127
backend/pkg/metrics/database/metrics.go
Normal file
127
backend/pkg/metrics/database/metrics.go
Normal file
|
|
@ -0,0 +1,127 @@
|
|||
package database
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"openreplay/backend/pkg/metrics/common"
|
||||
)
|
||||
|
||||
var dbBatchSize = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "db",
|
||||
Name: "batch_size_bytes",
|
||||
Help: "A histogram displaying the batch size in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
)
|
||||
|
||||
func RecordBatchSize(size float64) {
|
||||
dbBatchSize.Observe(size)
|
||||
}
|
||||
|
||||
var dbBatchElements = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "db",
|
||||
Name: "batch_size_elements",
|
||||
Help: "A histogram displaying the number of SQL commands in each batch.",
|
||||
Buckets: common.DefaultBuckets,
|
||||
},
|
||||
)
|
||||
|
||||
func RecordBatchElements(number float64) {
|
||||
dbBatchElements.Observe(number)
|
||||
}
|
||||
|
||||
var dbBatchInsertDuration = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "db",
|
||||
Name: "batch_insert_duration_seconds",
|
||||
Help: "A histogram displaying the duration of batch inserts in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
)
|
||||
|
||||
func RecordBatchInsertDuration(durMillis float64) {
|
||||
dbBatchInsertDuration.Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var dbBulkSize = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "db",
|
||||
Name: "bulk_size_bytes",
|
||||
Help: "A histogram displaying the bulk size in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
[]string{"db", "table"},
|
||||
)
|
||||
|
||||
func RecordBulkSize(size float64, db, table string) {
|
||||
dbBulkSize.WithLabelValues(db, table).Observe(size)
|
||||
}
|
||||
|
||||
var dbBulkElements = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "db",
|
||||
Name: "bulk_size_elements",
|
||||
Help: "A histogram displaying the size of data set in each bulk.",
|
||||
Buckets: common.DefaultBuckets,
|
||||
},
|
||||
[]string{"db", "table"},
|
||||
)
|
||||
|
||||
func RecordBulkElements(size float64, db, table string) {
|
||||
dbBulkElements.WithLabelValues(db, table).Observe(size)
|
||||
}
|
||||
|
||||
var dbBulkInsertDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "db",
|
||||
Name: "bulk_insert_duration_seconds",
|
||||
Help: "A histogram displaying the duration of bulk inserts in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"db", "table"},
|
||||
)
|
||||
|
||||
func RecordBulkInsertDuration(durMillis float64, db, table string) {
|
||||
dbBulkInsertDuration.WithLabelValues(db, table).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var dbRequestDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "db",
|
||||
Name: "request_duration_seconds",
|
||||
Help: "A histogram displaying the duration of each sql request in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"method", "table"},
|
||||
)
|
||||
|
||||
func RecordRequestDuration(durMillis float64, method, table string) {
|
||||
dbRequestDuration.WithLabelValues(method, table).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var dbTotalRequests = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "db",
|
||||
Name: "requests_total",
|
||||
Help: "A counter showing the total number of all SQL requests.",
|
||||
},
|
||||
[]string{"method", "table"},
|
||||
)
|
||||
|
||||
func IncreaseTotalRequests(method, table string) {
|
||||
dbTotalRequests.WithLabelValues(method, table).Inc()
|
||||
}
|
||||
|
||||
func List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
dbBatchSize,
|
||||
dbBatchElements,
|
||||
dbBatchInsertDuration,
|
||||
dbBulkSize,
|
||||
dbBulkElements,
|
||||
dbBulkInsertDuration,
|
||||
dbRequestDuration,
|
||||
dbTotalRequests,
|
||||
}
|
||||
}
|
||||
51
backend/pkg/metrics/ender/metrics.go
Normal file
51
backend/pkg/metrics/ender/metrics.go
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
package ender
|
||||
|
||||
import "github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
var enderActiveSessions = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "ender",
|
||||
Name: "sessions_active",
|
||||
Help: "A gauge displaying the number of active (live) sessions.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseActiveSessions() {
|
||||
enderActiveSessions.Inc()
|
||||
}
|
||||
|
||||
func DecreaseActiveSessions() {
|
||||
enderActiveSessions.Dec()
|
||||
}
|
||||
|
||||
var enderClosedSessions = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "ender",
|
||||
Name: "sessions_closed",
|
||||
Help: "A counter displaying the number of closed sessions (sent SessionEnd).",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseClosedSessions() {
|
||||
enderClosedSessions.Inc()
|
||||
}
|
||||
|
||||
var enderTotalSessions = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "ender",
|
||||
Name: "sessions_total",
|
||||
Help: "A counter displaying the number of all processed sessions.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseTotalSessions() {
|
||||
enderTotalSessions.Inc()
|
||||
}
|
||||
|
||||
func List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
enderActiveSessions,
|
||||
enderClosedSessions,
|
||||
enderTotalSessions,
|
||||
}
|
||||
}
|
||||
55
backend/pkg/metrics/http/metrics.go
Normal file
55
backend/pkg/metrics/http/metrics.go
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"openreplay/backend/pkg/metrics/common"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var httpRequestSize = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "http",
|
||||
Name: "request_size_bytes",
|
||||
Help: "A histogram displaying the size of each HTTP request in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
[]string{"url", "response_code"},
|
||||
)
|
||||
|
||||
func RecordRequestSize(size float64, url string, code int) {
|
||||
httpRequestSize.WithLabelValues(url, strconv.Itoa(code)).Observe(size)
|
||||
}
|
||||
|
||||
var httpRequestDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "http",
|
||||
Name: "request_duration_seconds",
|
||||
Help: "A histogram displaying the duration of each HTTP request in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"url", "response_code"},
|
||||
)
|
||||
|
||||
func RecordRequestDuration(durMillis float64, url string, code int) {
|
||||
httpRequestDuration.WithLabelValues(url, strconv.Itoa(code)).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var httpTotalRequests = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "http",
|
||||
Name: "requests_total",
|
||||
Help: "A counter displaying the number all HTTP requests.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseTotalRequests() {
|
||||
httpTotalRequests.Inc()
|
||||
}
|
||||
|
||||
func List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
httpRequestSize,
|
||||
httpRequestDuration,
|
||||
httpTotalRequests,
|
||||
}
|
||||
}
|
||||
40
backend/pkg/metrics/server.go
Normal file
40
backend/pkg/metrics/server.go
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/collectors"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"log"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type MetricServer struct {
|
||||
registry *prometheus.Registry
|
||||
}
|
||||
|
||||
func New() *MetricServer {
|
||||
registry := prometheus.NewRegistry()
|
||||
// Add go runtime metrics and process collectors.
|
||||
registry.MustRegister(
|
||||
collectors.NewGoCollector(),
|
||||
collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}),
|
||||
)
|
||||
// Expose /metrics HTTP endpoint using the created custom registry.
|
||||
http.Handle(
|
||||
"/metrics", promhttp.HandlerFor(
|
||||
registry,
|
||||
promhttp.HandlerOpts{
|
||||
EnableOpenMetrics: true,
|
||||
}),
|
||||
)
|
||||
go func() {
|
||||
log.Println(http.ListenAndServe(":8888", nil))
|
||||
}()
|
||||
return &MetricServer{
|
||||
registry: registry,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *MetricServer) Register(cs []prometheus.Collector) {
|
||||
s.registry.MustRegister(cs...)
|
||||
}
|
||||
185
backend/pkg/metrics/sink/metrics.go
Normal file
185
backend/pkg/metrics/sink/metrics.go
Normal file
|
|
@ -0,0 +1,185 @@
|
|||
package sink
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"openreplay/backend/pkg/metrics/common"
|
||||
)
|
||||
|
||||
var sinkMessageSize = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "sink",
|
||||
Name: "message_size_bytes",
|
||||
Help: "A histogram displaying the size of each message in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
)
|
||||
|
||||
func RecordMessageSize(size float64) {
|
||||
sinkMessageSize.Observe(size)
|
||||
}
|
||||
|
||||
var sinkWrittenMessages = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "sink",
|
||||
Name: "messages_written",
|
||||
Help: "A counter displaying the total number of all written messages.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseWrittenMessages() {
|
||||
sinkWrittenMessages.Inc()
|
||||
}
|
||||
|
||||
var sinkTotalMessages = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "sink",
|
||||
Name: "messages_total",
|
||||
Help: "A counter displaying the total number of all processed messages.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseTotalMessages() {
|
||||
sinkTotalMessages.Inc()
|
||||
}
|
||||
|
||||
var sinkBatchSize = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "sink",
|
||||
Name: "batch_size_bytes",
|
||||
Help: "A histogram displaying the size of each batch in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
)
|
||||
|
||||
func RecordBatchSize(size float64) {
|
||||
sinkBatchSize.Observe(size)
|
||||
}
|
||||
|
||||
var sinkTotalBatches = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "sink",
|
||||
Name: "batches_total",
|
||||
Help: "A counter displaying the total number of all written batches.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseTotalBatches() {
|
||||
sinkTotalBatches.Inc()
|
||||
}
|
||||
|
||||
var sinkWrittenBytes = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "sink",
|
||||
Name: "written_bytes",
|
||||
Help: "A histogram displaying the size of buffer in bytes written to session file.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordWrittenBytes(size float64, fileType string) {
|
||||
if size == 0 {
|
||||
return
|
||||
}
|
||||
sinkWrittenBytes.WithLabelValues(fileType).Observe(size)
|
||||
IncreaseTotalWrittenBytes(size, fileType)
|
||||
}
|
||||
|
||||
var sinkTotalWrittenBytes = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "sink",
|
||||
Name: "written_bytes_total",
|
||||
Help: "A counter displaying the total number of bytes written to all session files.",
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func IncreaseTotalWrittenBytes(size float64, fileType string) {
|
||||
if size == 0 {
|
||||
return
|
||||
}
|
||||
sinkTotalWrittenBytes.WithLabelValues(fileType).Add(size)
|
||||
}
|
||||
|
||||
var sinkCachedAssets = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "sink",
|
||||
Name: "assets_cached",
|
||||
Help: "A gauge displaying the current number of cached assets.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseCachedAssets() {
|
||||
sinkCachedAssets.Inc()
|
||||
}
|
||||
|
||||
func DecreaseCachedAssets() {
|
||||
sinkCachedAssets.Dec()
|
||||
}
|
||||
|
||||
var sinkSkippedAssets = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "sink",
|
||||
Name: "assets_skipped",
|
||||
Help: "A counter displaying the total number of all skipped assets.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseSkippedAssets() {
|
||||
sinkSkippedAssets.Inc()
|
||||
}
|
||||
|
||||
var sinkTotalAssets = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "sink",
|
||||
Name: "assets_total",
|
||||
Help: "A counter displaying the total number of all processed assets.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseTotalAssets() {
|
||||
sinkTotalAssets.Inc()
|
||||
}
|
||||
|
||||
var sinkAssetSize = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "sink",
|
||||
Name: "asset_size_bytes",
|
||||
Help: "A histogram displaying the size of each asset in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
)
|
||||
|
||||
func RecordAssetSize(size float64) {
|
||||
sinkAssetSize.Observe(size)
|
||||
}
|
||||
|
||||
var sinkProcessAssetDuration = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "sink",
|
||||
Name: "asset_process_duration_seconds",
|
||||
Help: "A histogram displaying the duration of processing for each asset in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
)
|
||||
|
||||
func RecordProcessAssetDuration(durMillis float64) {
|
||||
sinkProcessAssetDuration.Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
func List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
sinkMessageSize,
|
||||
sinkWrittenMessages,
|
||||
sinkTotalMessages,
|
||||
sinkBatchSize,
|
||||
sinkTotalBatches,
|
||||
sinkWrittenBytes,
|
||||
sinkTotalWrittenBytes,
|
||||
sinkCachedAssets,
|
||||
sinkSkippedAssets,
|
||||
sinkTotalAssets,
|
||||
sinkAssetSize,
|
||||
sinkProcessAssetDuration,
|
||||
}
|
||||
}
|
||||
114
backend/pkg/metrics/storage/metrics.go
Normal file
114
backend/pkg/metrics/storage/metrics.go
Normal file
|
|
@ -0,0 +1,114 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"openreplay/backend/pkg/metrics/common"
|
||||
)
|
||||
|
||||
var storageSessionSize = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "session_size_bytes",
|
||||
Help: "A histogram displaying the size of each session file in bytes prior to any manipulation.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionSize(fileSize float64, fileType string) {
|
||||
storageSessionSize.WithLabelValues(fileType).Observe(fileSize)
|
||||
}
|
||||
|
||||
var storageTotalSessions = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "storage",
|
||||
Name: "sessions_total",
|
||||
Help: "A counter displaying the total number of all processed sessions.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseStorageTotalSessions() {
|
||||
storageTotalSessions.Inc()
|
||||
}
|
||||
|
||||
var storageSessionReadDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "read_duration_seconds",
|
||||
Help: "A histogram displaying the duration of reading for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionReadDuration(durMillis float64, fileType string) {
|
||||
storageSessionReadDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var storageSessionSortDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "sort_duration_seconds",
|
||||
Help: "A histogram displaying the duration of sorting for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionSortDuration(durMillis float64, fileType string) {
|
||||
storageSessionSortDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var storageSessionEncodeDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "encode_duration_seconds",
|
||||
Help: "A histogram displaying the duration of encoding for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionEncodeDuration(durMillis float64, fileType string) {
|
||||
storageSessionEncodeDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var storageSessionCompressDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "compress_duration_seconds",
|
||||
Help: "A histogram displaying the duration of compressing for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionCompressDuration(durMillis float64, fileType string) {
|
||||
storageSessionCompressDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var storageSessionUploadDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "upload_duration_seconds",
|
||||
Help: "A histogram displaying the duration of uploading to s3 for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionUploadDuration(durMillis float64, fileType string) {
|
||||
storageSessionUploadDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
func List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
storageSessionSize,
|
||||
storageTotalSessions,
|
||||
storageSessionReadDuration,
|
||||
storageSessionSortDuration,
|
||||
storageSessionEncodeDuration,
|
||||
storageSessionCompressDuration,
|
||||
storageSessionUploadDuration,
|
||||
}
|
||||
}
|
||||
|
|
@ -1,138 +0,0 @@
|
|||
package monitoring
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"go.opentelemetry.io/otel/exporters/prometheus"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/global"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram"
|
||||
controller "go.opentelemetry.io/otel/sdk/metric/controller/basic"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
|
||||
processor "go.opentelemetry.io/otel/sdk/metric/processor/basic"
|
||||
selector "go.opentelemetry.io/otel/sdk/metric/selector/simple"
|
||||
)
|
||||
|
||||
// Metrics stores all collected metrics
|
||||
type Metrics struct {
|
||||
meter metric.Meter
|
||||
counters map[string]syncfloat64.Counter
|
||||
upDownCounters map[string]syncfloat64.UpDownCounter
|
||||
histograms map[string]syncfloat64.Histogram
|
||||
}
|
||||
|
||||
func New(name string) *Metrics {
|
||||
m := &Metrics{
|
||||
counters: make(map[string]syncfloat64.Counter),
|
||||
upDownCounters: make(map[string]syncfloat64.UpDownCounter),
|
||||
histograms: make(map[string]syncfloat64.Histogram),
|
||||
}
|
||||
m.initPrometheusDataExporter()
|
||||
m.initMetrics(name)
|
||||
return m
|
||||
}
|
||||
|
||||
// initPrometheusDataExporter allows to use collected metrics in prometheus
|
||||
func (m *Metrics) initPrometheusDataExporter() {
|
||||
config := prometheus.Config{
|
||||
DefaultHistogramBoundaries: []float64{1, 2, 5, 10, 20, 50, 100, 250, 500, 1000},
|
||||
}
|
||||
c := controller.New(
|
||||
processor.NewFactory(
|
||||
selector.NewWithHistogramDistribution(
|
||||
histogram.WithExplicitBoundaries(config.DefaultHistogramBoundaries),
|
||||
),
|
||||
aggregation.CumulativeTemporalitySelector(),
|
||||
processor.WithMemory(true),
|
||||
),
|
||||
)
|
||||
exporter, err := prometheus.New(config, c)
|
||||
if err != nil {
|
||||
log.Panicf("failed to initialize prometheus exporter %v", err)
|
||||
}
|
||||
|
||||
global.SetMeterProvider(exporter.MeterProvider())
|
||||
|
||||
http.HandleFunc("/metrics", exporter.ServeHTTP)
|
||||
go func() {
|
||||
_ = http.ListenAndServe(":8888", nil)
|
||||
}()
|
||||
|
||||
fmt.Println("Prometheus server running on :8888")
|
||||
}
|
||||
|
||||
func (m *Metrics) initMetrics(name string) {
|
||||
m.meter = global.Meter(name)
|
||||
}
|
||||
|
||||
/*
|
||||
Counter is a synchronous instrument that measures additive non-decreasing values, for example, the number of:
|
||||
- processed requests
|
||||
- received bytes
|
||||
- disk reads
|
||||
*/
|
||||
|
||||
func (m *Metrics) RegisterCounter(name string) (syncfloat64.Counter, error) {
|
||||
if counter, ok := m.counters[name]; ok {
|
||||
return counter, nil
|
||||
}
|
||||
counter, err := m.meter.SyncFloat64().Counter(name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize counter: %v", err)
|
||||
}
|
||||
m.counters[name] = counter
|
||||
return counter, nil
|
||||
}
|
||||
|
||||
func (m *Metrics) GetCounter(name string) syncfloat64.Counter {
|
||||
return m.counters[name]
|
||||
}
|
||||
|
||||
/*
|
||||
UpDownCounter is a synchronous instrument which measures additive values that increase or decrease with time,
|
||||
for example, the number of:
|
||||
- active requests
|
||||
- open connections
|
||||
- memory in use (megabytes)
|
||||
*/
|
||||
|
||||
func (m *Metrics) RegisterUpDownCounter(name string) (syncfloat64.UpDownCounter, error) {
|
||||
if counter, ok := m.upDownCounters[name]; ok {
|
||||
return counter, nil
|
||||
}
|
||||
counter, err := m.meter.SyncFloat64().UpDownCounter(name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize upDownCounter: %v", err)
|
||||
}
|
||||
m.upDownCounters[name] = counter
|
||||
return counter, nil
|
||||
}
|
||||
|
||||
func (m *Metrics) GetUpDownCounter(name string) syncfloat64.UpDownCounter {
|
||||
return m.upDownCounters[name]
|
||||
}
|
||||
|
||||
/*
|
||||
Histogram is a synchronous instrument that produces a histogram from recorded values, for example:
|
||||
- request latency
|
||||
- request size
|
||||
*/
|
||||
|
||||
func (m *Metrics) RegisterHistogram(name string) (syncfloat64.Histogram, error) {
|
||||
if hist, ok := m.histograms[name]; ok {
|
||||
return hist, nil
|
||||
}
|
||||
hist, err := m.meter.SyncFloat64().Histogram(name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize histogram: %v", err)
|
||||
}
|
||||
m.histograms[name] = hist
|
||||
return hist, nil
|
||||
}
|
||||
|
||||
func (m *Metrics) GetHistogram(name string) syncfloat64.Histogram {
|
||||
return m.histograms[name]
|
||||
}
|
||||
1
ee/api/.gitignore
vendored
1
ee/api/.gitignore
vendored
|
|
@ -263,5 +263,6 @@ Pipfile.lock
|
|||
/chalicelib/core/saved_search.py
|
||||
/app_alerts.py
|
||||
/build_alerts.sh
|
||||
/build_crons.sh
|
||||
/routers/subs/v1_api.py
|
||||
#exp /chalicelib/core/dashboards.py
|
||||
|
|
|
|||
|
|
@ -6,41 +6,41 @@ logging.basicConfig(level=config("LOGLEVEL", default=logging.INFO))
|
|||
from . import sessions as sessions_legacy
|
||||
|
||||
if config("EXP_SESSIONS_SEARCH", cast=bool, default=False):
|
||||
print(">>> Using experimental sessions search")
|
||||
logging.info(">>> Using experimental sessions search")
|
||||
from . import sessions_exp as sessions
|
||||
else:
|
||||
from . import sessions as sessions
|
||||
|
||||
if config("EXP_AUTOCOMPLETE", cast=bool, default=False):
|
||||
print(">>> Using experimental autocomplete")
|
||||
logging.info(">>> Using experimental autocomplete")
|
||||
from . import autocomplete_exp as autocomplete
|
||||
else:
|
||||
from . import autocomplete as autocomplete
|
||||
|
||||
if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
|
||||
print(">>> Using experimental error search")
|
||||
logging.info(">>> Using experimental error search")
|
||||
from . import errors as errors_legacy
|
||||
from . import errors_exp as errors
|
||||
|
||||
if config("EXP_ERRORS_GET", cast=bool, default=False):
|
||||
print(">>> Using experimental error get")
|
||||
logging.info(">>> Using experimental error get")
|
||||
else:
|
||||
from . import errors as errors
|
||||
|
||||
if config("EXP_METRICS", cast=bool, default=False):
|
||||
print(">>> Using experimental metrics")
|
||||
logging.info(">>> Using experimental metrics")
|
||||
from . import metrics_exp as metrics
|
||||
else:
|
||||
from . import metrics as metrics
|
||||
|
||||
if config("EXP_ALERTS", cast=bool, default=False):
|
||||
print(">>> Using experimental alerts")
|
||||
logging.info(">>> Using experimental alerts")
|
||||
from . import alerts_processor_exp as alerts_processor
|
||||
else:
|
||||
from . import alerts_processor as alerts_processor
|
||||
|
||||
if config("EXP_FUNNELS", cast=bool, default=False):
|
||||
print(">>> Using experimental funnels")
|
||||
logging.info(">>> Using experimental funnels")
|
||||
if not config("EXP_SESSIONS_SEARCH", cast=bool, default=False):
|
||||
from . import sessions as sessions_legacy
|
||||
|
||||
|
|
@ -49,4 +49,4 @@ else:
|
|||
from . import significance as significance
|
||||
|
||||
if config("EXP_RESOURCES", cast=bool, default=False):
|
||||
print(">>> Using experimental resources for session-replay")
|
||||
logging.info(">>> Using experimental resources for session-replay")
|
||||
|
|
|
|||
|
|
@ -54,10 +54,12 @@ LeftToDb = {
|
|||
schemas.AlertColumn.errors__4xx_5xx__count: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)", "formula": "COUNT(session_id)",
|
||||
"condition": "status/100!=2"},
|
||||
schemas.AlertColumn.errors__4xx__count: {"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COUNT(session_id)", "condition": "status/100=4"},
|
||||
schemas.AlertColumn.errors__5xx__count: {"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COUNT(session_id)", "condition": "status/100=5"},
|
||||
schemas.AlertColumn.errors__4xx__count: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COUNT(session_id)", "condition": "status/100=4"},
|
||||
schemas.AlertColumn.errors__5xx__count: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COUNT(session_id)", "condition": "status/100=5"},
|
||||
schemas.AlertColumn.errors__javascript__impacted_sessions__count: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COUNT(DISTINCT session_id)", "condition": "success= FALSE AND type='script'"},
|
||||
|
|
@ -100,7 +102,7 @@ def can_check(a) -> bool:
|
|||
a["options"].get("lastNotification") is None or
|
||||
a["options"]["lastNotification"] <= 0 or
|
||||
((now - a["options"]["lastNotification"]) > a["options"]["renotifyInterval"] * 60 * 1000)) \
|
||||
and ((now - a["createdAt"]) % (TimeInterval[repetitionBase] * 60 * 1000)) < 60 * 1000
|
||||
and ((now - a["createdAt"]) % (TimeInterval[repetitionBase] * 60 * 1000)) < 60 * 1000
|
||||
|
||||
|
||||
def Build(a):
|
||||
|
|
@ -124,7 +126,7 @@ def Build(a):
|
|||
subQ = f"""SELECT {colDef["formula"]} AS value
|
||||
FROM {colDef["table"]}
|
||||
WHERE project_id = %(project_id)s
|
||||
{"AND " + colDef["condition"] if colDef.get("condition") is not None else ""}"""
|
||||
{"AND " + colDef["condition"] if colDef.get("condition") else ""}"""
|
||||
j_s = colDef.get("joinSessions", True)
|
||||
main_table = colDef["table"]
|
||||
is_ss = main_table == "public.sessions"
|
||||
|
|
@ -147,8 +149,7 @@ def Build(a):
|
|||
"startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000,
|
||||
"timestamp_sub2": TimeUTC.now() - 2 * a["options"]["currentPeriod"] * 60 * 1000}
|
||||
else:
|
||||
sub1 = f"""{subQ} AND timestamp>=%(startDate)s
|
||||
AND timestamp<=%(now)s
|
||||
sub1 = f"""{subQ} {"AND timestamp >= %(startDate)s AND timestamp <= %(now)s" if not is_ss else ""}
|
||||
{"AND start_ts >= %(startDate)s AND start_ts <= %(now)s" if j_s else ""}"""
|
||||
params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000
|
||||
sub2 = f"""{subQ} {"AND timestamp < %(startDate)s AND timestamp >= %(timestamp_sub2)s" if not is_ss else ""}
|
||||
|
|
@ -211,7 +212,7 @@ def process():
|
|||
cur = cur.recreate(rollback=True)
|
||||
if len(notifications) > 0:
|
||||
cur.execute(
|
||||
cur.mogrify(f"""UPDATE public.Alerts
|
||||
cur.mogrify(f"""UPDATE public.alerts
|
||||
SET options = options||'{{"lastNotification":{TimeUTC.now()}}}'::jsonb
|
||||
WHERE alert_id IN %(ids)s;""", {"ids": tuple([n["alertId"] for n in notifications])}))
|
||||
if len(notifications) > 0:
|
||||
|
|
|
|||
|
|
@ -4,9 +4,10 @@ from decouple import config
|
|||
|
||||
import schemas
|
||||
from chalicelib.core import alerts_listener, alerts_processor
|
||||
from chalicelib.core import sessions, alerts
|
||||
from chalicelib.core import alerts
|
||||
from chalicelib.utils import pg_client, ch_client, exp_ch_helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.core import sessions_exp as sessions
|
||||
|
||||
logging.basicConfig(level=config("LOGLEVEL", default=logging.INFO))
|
||||
|
||||
|
|
@ -135,7 +136,7 @@ def Build(a):
|
|||
FROM {colDef["table"](now)}
|
||||
WHERE project_id = %(project_id)s
|
||||
{"AND event_type=%(event_type)s" if params["event_type"] else ""}
|
||||
{"AND " + colDef["condition"] if colDef.get("condition") is not None else ""}"""
|
||||
{"AND " + colDef["condition"] if colDef.get("condition") else ""}"""
|
||||
|
||||
q = f"""SELECT coalesce(value,0) AS value, coalesce(value,0) {a["query"]["operator"]} {a["query"]["right"]} AS valid"""
|
||||
|
||||
|
|
@ -198,9 +199,14 @@ def process():
|
|||
if alert["query"]["left"] != "CUSTOM":
|
||||
continue
|
||||
if alerts_processor.can_check(alert):
|
||||
logging.info(f"Querying alertId:{alert['alertId']} name: {alert['name']}")
|
||||
query, params = Build(alert)
|
||||
query = ch_cur.format(query, params)
|
||||
try:
|
||||
query = ch_cur.format(query, params)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
f"!!!Error while building alert query for alertId:{alert['alertId']} name: {alert['name']}")
|
||||
logging.error(e)
|
||||
continue
|
||||
logging.debug(alert)
|
||||
logging.debug(query)
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -13,11 +13,11 @@ from chalicelib.utils.TimeUTC import TimeUTC
|
|||
def __exists_by_name(tenant_id: int, name: str, exclude_id: Optional[int]) -> bool:
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(f"""SELECT EXISTS(SELECT 1
|
||||
FROM public.projects
|
||||
WHERE deleted_at IS NULL
|
||||
AND name ILIKE %(name)s
|
||||
AND tenant_id = %(tenant_id)s
|
||||
{"AND project_id!=%(exclude_id))s" if exclude_id else ""}) AS exists;""",
|
||||
FROM public.projects
|
||||
WHERE deleted_at IS NULL
|
||||
AND name ILIKE %(name)s
|
||||
AND tenant_id = %(tenant_id)s
|
||||
{"AND project_id!=%(exclude_id)s" if exclude_id else ""}) AS exists;""",
|
||||
{"tenant_id": tenant_id, "name": name, "exclude_id": exclude_id})
|
||||
|
||||
cur.execute(query=query)
|
||||
|
|
|
|||
|
|
@ -12,11 +12,11 @@ from chalicelib.utils.TimeUTC import TimeUTC
|
|||
def __exists_by_name(tenant_id: int, name: str, exclude_id: Optional[int]) -> bool:
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(f"""SELECT EXISTS(SELECT 1
|
||||
FROM public.roles
|
||||
WHERE tenant_id = %(tenant_id)s
|
||||
AND name ILIKE %(name)s
|
||||
AND deleted_at ISNULL
|
||||
{"role_id!=%(exclude_id)s" if exclude_id else ""}) AS exists;""",
|
||||
FROM public.roles
|
||||
WHERE tenant_id = %(tenant_id)s
|
||||
AND name ILIKE %(name)s
|
||||
AND deleted_at ISNULL
|
||||
{"AND role_id!=%(exclude_id)s" if exclude_id else ""}) AS exists;""",
|
||||
{"tenant_id": tenant_id, "name": name, "exclude_id": exclude_id})
|
||||
cur.execute(query=query)
|
||||
row = cur.fetchone()
|
||||
|
|
|
|||
|
|
@ -304,7 +304,7 @@ def search2_series(data: schemas.SessionsSearchPayloadSchema, project_id: int, d
|
|||
extra_col = ""
|
||||
extra_where = ""
|
||||
pre_query = ""
|
||||
distinct_on="s.session_id"
|
||||
distinct_on = "s.session_id"
|
||||
if metric_of == schemas.MetricOfTable.user_country:
|
||||
main_col = "user_country"
|
||||
elif metric_of == schemas.MetricOfTable.user_device:
|
||||
|
|
@ -324,7 +324,7 @@ def search2_series(data: schemas.SessionsSearchPayloadSchema, project_id: int, d
|
|||
elif metric_of == schemas.MetricOfTable.visited_url:
|
||||
main_col = "path"
|
||||
extra_col = ", path"
|
||||
distinct_on+=",path"
|
||||
distinct_on += ",path"
|
||||
main_query = cur.mogrify(f"""{pre_query}
|
||||
SELECT COUNT(*) AS count, COALESCE(JSONB_AGG(users_sessions) FILTER ( WHERE rn <= 200 ), '[]'::JSONB) AS values
|
||||
FROM (SELECT {main_col} AS name,
|
||||
|
|
@ -1197,8 +1197,9 @@ def delete_sessions_by_user_ids(project_id, user_ids):
|
|||
|
||||
def count_all():
|
||||
with pg_client.PostgresClient(unlimited_query=True) as cur:
|
||||
row = cur.execute(query="SELECT COUNT(session_id) AS count FROM public.sessions")
|
||||
return row.get("count", 0)
|
||||
cur.execute(query="SELECT COUNT(session_id) AS count FROM public.sessions")
|
||||
row = cur.fetchone()
|
||||
return row.get("count", 0) if row else 0
|
||||
|
||||
|
||||
def session_exists(project_id, session_id):
|
||||
|
|
@ -1206,7 +1207,8 @@ def session_exists(project_id, session_id):
|
|||
query = cur.mogrify("""SELECT 1
|
||||
FROM public.sessions
|
||||
WHERE session_id=%(session_id)s
|
||||
AND project_id=%(project_id)s""",
|
||||
AND project_id=%(project_id)s
|
||||
LIMIT 1;""",
|
||||
{"project_id": project_id, "session_id": session_id})
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
|
|
|
|||
|
|
@ -202,7 +202,7 @@ def _isUndefined_operator(op: schemas.SearchEventOperator):
|
|||
|
||||
# This function executes the query and return result
|
||||
def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_id, errors_only=False,
|
||||
error_status=schemas.ErrorStatus.all, count_only=False, issue=None):
|
||||
error_status=schemas.ErrorStatus.all, count_only=False, issue=None, ids_only=False):
|
||||
full_args, query_part = search_query_parts_ch(data=data, error_status=error_status, errors_only=errors_only,
|
||||
favorite_only=data.bookmarked, issue=issue, project_id=project_id,
|
||||
user_id=user_id)
|
||||
|
|
@ -264,6 +264,12 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_
|
|||
GROUP BY user_id
|
||||
) AS users_sessions;""",
|
||||
full_args)
|
||||
elif ids_only:
|
||||
main_query = cur.format(f"""SELECT DISTINCT ON(s.session_id) s.session_id
|
||||
{query_part}
|
||||
ORDER BY s.session_id desc
|
||||
LIMIT %(sessions_limit)s OFFSET %(sessions_limit_s)s;""",
|
||||
full_args)
|
||||
else:
|
||||
if data.order is None:
|
||||
data.order = schemas.SortOrderType.desc.value
|
||||
|
|
@ -302,8 +308,8 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_
|
|||
print(data.json())
|
||||
print("--------------------")
|
||||
raise err
|
||||
if errors_only:
|
||||
return helper.list_to_camel_case(cur.fetchall())
|
||||
if errors_only or ids_only:
|
||||
return helper.list_to_camel_case(sessions)
|
||||
|
||||
if len(sessions) > 0:
|
||||
sessions = sessions[0]
|
||||
|
|
@ -1170,6 +1176,9 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
|||
) {"" if or_events else (f"AS event_{event_index} " + ("ON(TRUE)" if event_index > 0 else ""))}\
|
||||
""")
|
||||
event_index += 1
|
||||
# limit THEN-events to 7 in CH because sequenceMatch cannot take more arguments
|
||||
if event_index == 7 and data.events_order == schemas.SearchEventOrder._then:
|
||||
break
|
||||
|
||||
if event_index < 2:
|
||||
data.events_order = schemas.SearchEventOrder._or
|
||||
|
|
@ -1520,17 +1529,18 @@ def delete_sessions_by_user_ids(project_id, user_ids):
|
|||
|
||||
|
||||
def count_all():
|
||||
with pg_client.PostgresClient(unlimited_query=True) as cur:
|
||||
row = cur.execute(query="SELECT COUNT(session_id) AS count FROM public.sessions")
|
||||
with ch_client.ClickHouseClient() as cur:
|
||||
row = cur.execute(query=f"SELECT COUNT(session_id) AS count FROM {exp_ch_helper.get_main_sessions_table()}")
|
||||
return row.get("count", 0)
|
||||
|
||||
|
||||
def session_exists(project_id, session_id):
|
||||
with ch_client.ClickHouseClient() as cur:
|
||||
query = cur.format("""SELECT 1
|
||||
FROM public.sessions
|
||||
WHERE session_id=%(session_id)s
|
||||
AND project_id=%(project_id)s""",
|
||||
query = cur.format(f"""SELECT 1
|
||||
FROM {exp_ch_helper.get_main_sessions_table()}
|
||||
WHERE session_id=%(session_id)s
|
||||
AND project_id=%(project_id)s
|
||||
LIMIT 1""",
|
||||
{"project_id": project_id, "session_id": session_id})
|
||||
row = cur.execute(query)
|
||||
return row is not None
|
||||
|
|
|
|||
|
|
@ -173,7 +173,7 @@ def query_requests_by_period(project_id, start_time, end_time, filters: Optional
|
|||
if n == n_:
|
||||
data_['value'] = v[0]
|
||||
data_['oldValue'] = v[1]
|
||||
data_['change'] = 100* v[2]
|
||||
data_['change'] = 100 * v[2]
|
||||
data_['isNew'] = False
|
||||
break
|
||||
results.append(data_)
|
||||
|
|
@ -252,12 +252,12 @@ def query_most_errors_by_period(project_id, start_time, end_time,
|
|||
for n in common_errors:
|
||||
if n is None:
|
||||
continue
|
||||
old_errors = _sum_table_index(_table_where(table_hh2, names_idx, n), names_idx)
|
||||
if old_errors == 0:
|
||||
sum_old_errors = _sum_table_index(_table_where(table_hh2, names_idx, n), sessions_idx)
|
||||
if sum_old_errors == 0:
|
||||
continue
|
||||
new_errors = _sum_table_index(_table_where(table_hh1, names_idx, n), names_idx)
|
||||
sum_new_errors = _sum_table_index(_table_where(table_hh1, names_idx, n), sessions_idx)
|
||||
# error_increase[n] = (new_errors - old_errors) / old_errors
|
||||
error_values[n] = new_errors, old_errors, (new_errors - old_errors) / old_errors
|
||||
error_values[n] = sum_new_errors, sum_old_errors, (sum_new_errors - sum_old_errors) / sum_old_errors
|
||||
ratio = sorted(percentage_errors.items(), key=lambda k: k[1], reverse=True)
|
||||
increase = sorted(error_values.items(), key=lambda k: k[1][-1], reverse=True)
|
||||
names_ = set([k[0] for k in increase[:3] + ratio[:3]] + new_errors[:3])
|
||||
|
|
@ -347,18 +347,20 @@ def query_cpu_memory_by_period(project_id, start_time, end_time,
|
|||
output = list()
|
||||
if cpu_oldvalue is not None or cpu_newvalue is not None:
|
||||
output.append({'category': schemas_ee.InsightCategories.resources,
|
||||
'name': 'cpu',
|
||||
'value': cpu_newvalue,
|
||||
'oldValue': cpu_oldvalue,
|
||||
'change': 100 * (cpu_newvalue - cpu_oldvalue) / cpu_oldvalue if cpu_ratio is not None else cpu_ratio,
|
||||
'isNew': True if cpu_newvalue is not None and cpu_oldvalue is None else False})
|
||||
'name': 'cpu',
|
||||
'value': cpu_newvalue,
|
||||
'oldValue': cpu_oldvalue,
|
||||
'change': 100 * (
|
||||
cpu_newvalue - cpu_oldvalue) / cpu_oldvalue if cpu_ratio is not None else cpu_ratio,
|
||||
'isNew': True if cpu_newvalue is not None and cpu_oldvalue is None else False})
|
||||
if mem_oldvalue is not None or mem_newvalue is not None:
|
||||
output.append({'category': schemas_ee.InsightCategories.resources,
|
||||
'name': 'memory',
|
||||
'value': mem_newvalue,
|
||||
'oldValue': mem_oldvalue,
|
||||
'change': 100 * (mem_newvalue - mem_oldvalue) / mem_oldvalue if mem_ratio is not None else mem_ratio,
|
||||
'isNew': True if mem_newvalue is not None and mem_oldvalue is None else False})
|
||||
'name': 'memory',
|
||||
'value': mem_newvalue,
|
||||
'oldValue': mem_oldvalue,
|
||||
'change': 100 * (
|
||||
mem_newvalue - mem_oldvalue) / mem_oldvalue if mem_ratio is not None else mem_ratio,
|
||||
'isNew': True if mem_newvalue is not None and mem_oldvalue is None else False})
|
||||
return output
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -591,19 +591,6 @@ def set_password_invitation(tenant_id, user_id, new_password):
|
|||
}
|
||||
|
||||
|
||||
def count_members(tenant_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(
|
||||
"""SELECT
|
||||
COUNT(user_id)
|
||||
FROM public.users WHERE tenant_id = %(tenant_id)s AND deleted_at IS NULL;""",
|
||||
{"tenant_id": tenant_id})
|
||||
)
|
||||
r = cur.fetchone()
|
||||
return r["count"]
|
||||
|
||||
|
||||
def email_exists(email):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
|
|
|
|||
|
|
@ -116,12 +116,12 @@ def exists_by_name(tenant_id: int, name: str, exclude_id: Optional[int],
|
|||
webhook_type: str = schemas.WebhookType.webhook) -> bool:
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(f"""SELECT EXISTS(SELECT 1
|
||||
FROM public.webhooks
|
||||
WHERE name ILIKE %(name)s
|
||||
AND deleted_at ISNULL
|
||||
AND tenant_id=%(tenant_id)s
|
||||
AND type=%(webhook_type)s
|
||||
{"AND webhook_id!=%(exclude_id))s" if exclude_id else ""}) AS exists;""",
|
||||
FROM public.webhooks
|
||||
WHERE name ILIKE %(name)s
|
||||
AND deleted_at ISNULL
|
||||
AND tenant_id=%(tenant_id)s
|
||||
AND type=%(webhook_type)s
|
||||
{"AND webhook_id!=%(exclude_id)s" if exclude_id else ""}) AS exists;""",
|
||||
{"tenant_id": tenant_id, "name": name, "exclude_id": exclude_id,
|
||||
"webhook_type": webhook_type})
|
||||
cur.execute(query)
|
||||
|
|
|
|||
|
|
@ -19,7 +19,6 @@ func New(pg *cache.PGCache, cfg *db.Config) *Saver {
|
|||
var producer types.Producer = nil
|
||||
if cfg.UseQuickwit {
|
||||
producer = queue.NewProducer(cfg.MessageSizeLimit, true)
|
||||
defer producer.Close(15000)
|
||||
}
|
||||
return &Saver{pg: pg, producer: producer, topic: cfg.QuickwitTopic}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,6 +5,8 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/v2/lib/driver"
|
||||
)
|
||||
|
|
@ -16,19 +18,23 @@ type Bulk interface {
|
|||
|
||||
type bulkImpl struct {
|
||||
conn driver.Conn
|
||||
table string
|
||||
query string
|
||||
values [][]interface{}
|
||||
}
|
||||
|
||||
func NewBulk(conn driver.Conn, query string) (Bulk, error) {
|
||||
func NewBulk(conn driver.Conn, table, query string) (Bulk, error) {
|
||||
switch {
|
||||
case conn == nil:
|
||||
return nil, errors.New("clickhouse connection is empty")
|
||||
case table == "":
|
||||
return nil, errors.New("table is empty")
|
||||
case query == "":
|
||||
return nil, errors.New("query is empty")
|
||||
}
|
||||
return &bulkImpl{
|
||||
conn: conn,
|
||||
table: table,
|
||||
query: query,
|
||||
values: make([][]interface{}, 0),
|
||||
}, nil
|
||||
|
|
@ -40,6 +46,7 @@ func (b *bulkImpl) Append(args ...interface{}) error {
|
|||
}
|
||||
|
||||
func (b *bulkImpl) Send() error {
|
||||
start := time.Now()
|
||||
batch, err := b.conn.PrepareBatch(context.Background(), b.query)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't create new batch: %s", err)
|
||||
|
|
@ -50,6 +57,11 @@ func (b *bulkImpl) Send() error {
|
|||
log.Printf("failed query: %s", b.query)
|
||||
}
|
||||
}
|
||||
err = batch.Send()
|
||||
// Save bulk metrics
|
||||
database.RecordBulkElements(float64(len(b.values)), "ch", b.table)
|
||||
database.RecordBulkInsertDuration(float64(time.Now().Sub(start).Milliseconds()), "ch", b.table)
|
||||
// Prepare values slice for a new data
|
||||
b.values = make([][]interface{}, 0)
|
||||
return batch.Send()
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,18 +3,16 @@ package clickhouse
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/ClickHouse/clickhouse-go/v2"
|
||||
"github.com/ClickHouse/clickhouse-go/v2/lib/driver"
|
||||
"log"
|
||||
"openreplay/backend/pkg/db/types"
|
||||
"openreplay/backend/pkg/hashid"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/url"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/v2"
|
||||
"github.com/ClickHouse/clickhouse-go/v2/lib/driver"
|
||||
|
||||
"openreplay/backend/pkg/license"
|
||||
)
|
||||
|
||||
|
|
@ -52,28 +50,14 @@ type connectorImpl struct {
|
|||
finished chan struct{}
|
||||
}
|
||||
|
||||
// Check env variables. If not present, return default value.
|
||||
func getEnv(key, fallback string) string {
|
||||
if value, ok := os.LookupEnv(key); ok {
|
||||
return value
|
||||
}
|
||||
return fallback
|
||||
}
|
||||
|
||||
func NewConnector(url string) Connector {
|
||||
license.CheckLicense()
|
||||
// Check username, password, database
|
||||
userName := getEnv("CH_USERNAME", "default")
|
||||
password := getEnv("CH_PASSWORD", "")
|
||||
database := getEnv("CH_DATABASE", "default")
|
||||
url = strings.TrimPrefix(url, "tcp://")
|
||||
url = strings.TrimSuffix(url, "/"+database)
|
||||
url = strings.TrimSuffix(url, "/default")
|
||||
conn, err := clickhouse.Open(&clickhouse.Options{
|
||||
Addr: []string{url},
|
||||
Auth: clickhouse.Auth{
|
||||
Database: database,
|
||||
Username: userName,
|
||||
Password: password,
|
||||
Database: "default",
|
||||
},
|
||||
MaxOpenConns: 20,
|
||||
MaxIdleConns: 15,
|
||||
|
|
@ -99,7 +83,7 @@ func NewConnector(url string) Connector {
|
|||
}
|
||||
|
||||
func (c *connectorImpl) newBatch(name, query string) error {
|
||||
batch, err := NewBulk(c.conn, query)
|
||||
batch, err := NewBulk(c.conn, name, query)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't create new batch: %s", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,6 +5,148 @@ $$
|
|||
SELECT 'v1.10.0-ee'
|
||||
$$ LANGUAGE sql IMMUTABLE;
|
||||
|
||||
-- Backup dashboard & search data:
|
||||
DO
|
||||
$$
|
||||
BEGIN
|
||||
IF NOT (SELECT EXISTS(SELECT schema_name
|
||||
FROM information_schema.schemata
|
||||
WHERE schema_name = 'backup_v1_10_0')) THEN
|
||||
CREATE SCHEMA backup_v1_10_0;
|
||||
CREATE TABLE backup_v1_10_0.dashboards
|
||||
(
|
||||
dashboard_id integer,
|
||||
project_id integer,
|
||||
user_id integer,
|
||||
name text NOT NULL,
|
||||
description text NOT NULL DEFAULT '',
|
||||
is_public boolean NOT NULL DEFAULT TRUE,
|
||||
is_pinned boolean NOT NULL DEFAULT FALSE,
|
||||
created_at timestamp NOT NULL DEFAULT timezone('utc'::text, now()),
|
||||
deleted_at timestamp NULL DEFAULT NULL
|
||||
);
|
||||
CREATE TABLE backup_v1_10_0.dashboard_widgets
|
||||
(
|
||||
widget_id integer,
|
||||
dashboard_id integer,
|
||||
metric_id integer,
|
||||
user_id integer,
|
||||
created_at timestamp NOT NULL DEFAULT timezone('utc'::text, now()),
|
||||
config jsonb NOT NULL DEFAULT '{}'::jsonb
|
||||
);
|
||||
CREATE TABLE backup_v1_10_0.searches
|
||||
(
|
||||
search_id integer,
|
||||
project_id integer,
|
||||
user_id integer,
|
||||
name text not null,
|
||||
filter jsonb not null,
|
||||
created_at timestamp default timezone('utc'::text, now()) not null,
|
||||
deleted_at timestamp,
|
||||
is_public boolean NOT NULL DEFAULT False
|
||||
);
|
||||
CREATE TABLE backup_v1_10_0.metrics
|
||||
(
|
||||
metric_id integer,
|
||||
project_id integer,
|
||||
user_id integer,
|
||||
name text NOT NULL,
|
||||
is_public boolean NOT NULL DEFAULT FALSE,
|
||||
active boolean NOT NULL DEFAULT TRUE,
|
||||
created_at timestamp NOT NULL DEFAULT timezone('utc'::text, now()),
|
||||
deleted_at timestamp,
|
||||
edited_at timestamp NOT NULL DEFAULT timezone('utc'::text, now()),
|
||||
metric_type text NOT NULL,
|
||||
view_type text NOT NULL,
|
||||
metric_of text NOT NULL DEFAULT 'sessionCount',
|
||||
metric_value text[] NOT NULL DEFAULT '{}'::text[],
|
||||
metric_format text,
|
||||
category text NULL DEFAULT 'custom',
|
||||
is_pinned boolean NOT NULL DEFAULT FALSE,
|
||||
is_predefined boolean NOT NULL DEFAULT FALSE,
|
||||
is_template boolean NOT NULL DEFAULT FALSE,
|
||||
predefined_key text NULL DEFAULT NULL,
|
||||
default_config jsonb NOT NULL
|
||||
);
|
||||
CREATE TABLE backup_v1_10_0.metric_series
|
||||
(
|
||||
series_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY,
|
||||
metric_id integer REFERENCES metrics (metric_id) ON DELETE CASCADE,
|
||||
index integer NOT NULL,
|
||||
name text NULL,
|
||||
filter jsonb NOT NULL,
|
||||
created_at timestamp DEFAULT timezone('utc'::text, now()) NOT NULL,
|
||||
deleted_at timestamp
|
||||
);
|
||||
|
||||
INSERT INTO backup_v1_10_0.dashboards(dashboard_id, project_id, user_id, name, description, is_public,
|
||||
is_pinned,
|
||||
created_at, deleted_at)
|
||||
SELECT dashboard_id,
|
||||
project_id,
|
||||
user_id,
|
||||
name,
|
||||
description,
|
||||
is_public,
|
||||
is_pinned,
|
||||
created_at,
|
||||
deleted_at
|
||||
FROM public.dashboards
|
||||
ORDER BY dashboard_id;
|
||||
|
||||
INSERT INTO backup_v1_10_0.metrics(metric_id, project_id, user_id, name, is_public, active, created_at,
|
||||
deleted_at, edited_at, metric_type, view_type, metric_of, metric_value,
|
||||
metric_format, category, is_pinned, is_predefined, is_template,
|
||||
predefined_key, default_config)
|
||||
SELECT metric_id,
|
||||
project_id,
|
||||
user_id,
|
||||
name,
|
||||
is_public,
|
||||
active,
|
||||
created_at,
|
||||
deleted_at,
|
||||
edited_at,
|
||||
metric_type,
|
||||
view_type,
|
||||
metric_of,
|
||||
metric_value,
|
||||
metric_format,
|
||||
category,
|
||||
is_pinned,
|
||||
is_predefined,
|
||||
is_template,
|
||||
predefined_key,
|
||||
default_config
|
||||
FROM public.metrics
|
||||
ORDER BY metric_id;
|
||||
|
||||
INSERT INTO backup_v1_10_0.metric_series(series_id, metric_id, index, name, filter, created_at, deleted_at)
|
||||
SELECT series_id, metric_id, index, name, filter, created_at, deleted_at
|
||||
FROM public.metric_series
|
||||
ORDER BY series_id;
|
||||
|
||||
INSERT INTO backup_v1_10_0.dashboard_widgets(widget_id, dashboard_id, metric_id, user_id, created_at, config)
|
||||
SELECT widget_id, dashboard_id, metric_id, user_id, created_at, config
|
||||
FROM public.dashboard_widgets
|
||||
ORDER BY widget_id;
|
||||
|
||||
INSERT INTO backup_v1_10_0.searches(search_id, project_id, user_id, name, filter, created_at, deleted_at,
|
||||
is_public)
|
||||
SELECT search_id,
|
||||
project_id,
|
||||
user_id,
|
||||
name,
|
||||
filter,
|
||||
created_at,
|
||||
deleted_at,
|
||||
is_public
|
||||
FROM public.searches
|
||||
ORDER BY search_id;
|
||||
END IF;
|
||||
END
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS frontend_signals
|
||||
(
|
||||
project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE,
|
||||
|
|
@ -36,6 +178,73 @@ ALTER TYPE webhook_type ADD VALUE IF NOT EXISTS 'msteams';
|
|||
UPDATE metrics
|
||||
SET is_public= TRUE;
|
||||
|
||||
CREATE OR REPLACE FUNCTION get_global_key(key text)
|
||||
RETURNS text AS
|
||||
$$
|
||||
DECLARE
|
||||
events_map CONSTANT JSONB := '{
|
||||
"SESSIONS": "sessions",
|
||||
"sessionCount": "sessionCount",
|
||||
"CLICK": "click",
|
||||
"INPUT": "input",
|
||||
"LOCATION": "location",
|
||||
"CUSTOM": "custom",
|
||||
"REQUEST": "request",
|
||||
"FETCH": "fetch",
|
||||
"GRAPHQL": "graphql",
|
||||
"STATEACTION": "stateAction",
|
||||
"ERROR": "error",
|
||||
"CLICK_IOS": "clickIos",
|
||||
"INPUT_IOS": "inputIos",
|
||||
"VIEW_IOS": "viewIos",
|
||||
"CUSTOM_IOS": "customIos",
|
||||
"REQUEST_IOS": "requestIos",
|
||||
"ERROR_IOS": "errorIos",
|
||||
"DOM_COMPLETE": "domComplete",
|
||||
"LARGEST_CONTENTFUL_PAINT_TIME": "largestContentfulPaintTime",
|
||||
"TIME_BETWEEN_EVENTS": "timeBetweenEvents",
|
||||
"TTFB": "ttfb",
|
||||
"AVG_CPU_LOAD": "avgCpuLoad",
|
||||
"AVG_MEMORY_USAGE": "avgMemoryUsage",
|
||||
"FETCH_FAILED": "fetchFailed",
|
||||
"FETCH_URL": "fetchUrl",
|
||||
"FETCH_STATUS_CODE": "fetchStatusCode",
|
||||
"FETCH_METHOD": "fetchMethod",
|
||||
"FETCH_DURATION": "fetchDuration",
|
||||
"FETCH_REQUEST_BODY": "fetchRequestBody",
|
||||
"FETCH_RESPONSE_BODY": "fetchResponseBody",
|
||||
"GRAPHQL_NAME": "graphqlName",
|
||||
"GRAPHQL_METHOD": "graphqlMethod",
|
||||
"GRAPHQL_REQUEST_BODY": "graphqlRequestBody",
|
||||
"GRAPHQL_RESPONSE_BODY": "graphqlResponseBody",
|
||||
"USEROS": "userOs",
|
||||
"USERBROWSER": "userBrowser",
|
||||
"USERDEVICE": "userDevice",
|
||||
"USERCOUNTRY": "userCountry",
|
||||
"USERID": "userId",
|
||||
"USERANONYMOUSID": "userAnonymousId",
|
||||
"REFERRER": "referrer",
|
||||
"REVID": "revId",
|
||||
"USEROS_IOS": "userOsIos",
|
||||
"USERDEVICE_IOS": "userDeviceIos",
|
||||
"USERCOUNTRY_IOS": "userCountryIos",
|
||||
"USERID_IOS": "userIdIos",
|
||||
"USERANONYMOUSID_IOS": "userAnonymousIdIos",
|
||||
"REVID_IOS": "revIdIos",
|
||||
"DURATION": "duration",
|
||||
"PLATFORM": "platform",
|
||||
"METADATA": "metadata",
|
||||
"ISSUE": "issue",
|
||||
"EVENTS_COUNT": "eventsCount",
|
||||
"UTM_SOURCE": "utmSource",
|
||||
"UTM_MEDIUM": "utmMedium",
|
||||
"UTM_CAMPAIGN": "utmCampaign"
|
||||
}';
|
||||
BEGIN
|
||||
RETURN jsonb_extract_path(events_map, key);
|
||||
END;
|
||||
$$ LANGUAGE plpgsql IMMUTABLE;
|
||||
|
||||
ALTER TABLE IF EXISTS metrics
|
||||
ALTER COLUMN metric_type TYPE text,
|
||||
ALTER COLUMN metric_type SET DEFAULT 'timeseries',
|
||||
|
|
@ -50,6 +259,11 @@ $$
|
|||
FROM information_schema.columns
|
||||
WHERE table_name = 'metrics'
|
||||
and column_name = 'is_predefined') THEN
|
||||
-- 0. change metric_of
|
||||
UPDATE metrics
|
||||
SET metric_of=coalesce(replace(get_global_key(metric_of), '"', ''),
|
||||
left(metric_of, 1) || right(replace(initcap(metric_of), '_', ''), -1))
|
||||
WHERE not is_predefined;
|
||||
|
||||
-- 1. pre transform structure
|
||||
ALTER TABLE IF EXISTS metrics
|
||||
|
|
@ -138,9 +352,8 @@ ALTER TABLE IF EXISTS projects
|
|||
ADD COLUMN IF NOT EXISTS beacon_size integer NOT NULL DEFAULT 0;
|
||||
|
||||
-- To migrate saved search data
|
||||
-- SET client_min_messages TO NOTICE;
|
||||
|
||||
-- SET client_min_messages TO NOTICE;
|
||||
SET client_min_messages TO NOTICE;
|
||||
CREATE OR REPLACE FUNCTION get_new_event_key(key text)
|
||||
RETURNS text AS
|
||||
$$
|
||||
|
|
@ -326,9 +539,109 @@ $$
|
|||
$$
|
||||
LANGUAGE plpgsql;
|
||||
|
||||
|
||||
-- To migrate metric_series data
|
||||
DO
|
||||
$$
|
||||
DECLARE
|
||||
row RECORD;
|
||||
events_att JSONB;
|
||||
event_filters_att JSONB;
|
||||
filters_att JSONB;
|
||||
element JSONB;
|
||||
s_element JSONB;
|
||||
new_value TEXT;
|
||||
new_events JSONB[];
|
||||
new_filters JSONB[];
|
||||
new_event_filters JSONB[];
|
||||
changed BOOLEAN;
|
||||
planned_update JSONB[];
|
||||
BEGIN
|
||||
planned_update := '{}'::jsonb[];
|
||||
FOR row IN SELECT * FROM metric_series
|
||||
LOOP
|
||||
-- Transform events attributes
|
||||
events_att := row.filter -> 'events';
|
||||
IF events_att IS NOT NULL THEN
|
||||
new_events := '{}'::jsonb[];
|
||||
FOR element IN SELECT jsonb_array_elements(events_att)
|
||||
LOOP
|
||||
changed := FALSE;
|
||||
new_value := get_new_event_key(element ->> 'type');
|
||||
if new_value IS NOT NULL THEN
|
||||
changed := TRUE;
|
||||
new_value := replace(new_value, '"', '');
|
||||
element := element || jsonb_build_object('type', new_value);
|
||||
END IF;
|
||||
-- Transform event's sub-filters attributes
|
||||
event_filters_att := element -> 'filters';
|
||||
new_event_filters := '{}'::jsonb[];
|
||||
IF event_filters_att IS NOT NULL AND jsonb_array_length(event_filters_att) > 0 THEN
|
||||
FOR s_element IN SELECT jsonb_array_elements(event_filters_att)
|
||||
LOOP
|
||||
new_value := get_new_event_filter_key(s_element ->> 'type');
|
||||
if new_value IS NOT NULL THEN
|
||||
changed := TRUE;
|
||||
new_value := replace(new_value, '"', '');
|
||||
s_element := s_element || jsonb_build_object('type', new_value);
|
||||
new_event_filters := array_append(new_event_filters, s_element);
|
||||
END IF;
|
||||
END LOOP;
|
||||
element := element || jsonb_build_object('filters', new_event_filters);
|
||||
END IF;
|
||||
IF changed THEN
|
||||
new_events := array_append(new_events, element);
|
||||
END IF;
|
||||
END LOOP;
|
||||
IF array_length(new_events, 1) > 0 THEN
|
||||
row.filter := row.filter || jsonb_build_object('events', new_events);
|
||||
END IF;
|
||||
END IF;
|
||||
|
||||
-- Transform filters attributes
|
||||
filters_att := row.filter -> 'filters';
|
||||
IF filters_att IS NOT NULL THEN
|
||||
new_filters := '{}'::jsonb;
|
||||
FOR element IN SELECT jsonb_array_elements(filters_att)
|
||||
LOOP
|
||||
new_value := get_new_filter_key(element ->> 'type');
|
||||
if new_value IS NOT NULL THEN
|
||||
new_value := replace(new_value, '"', '');
|
||||
element := element || jsonb_build_object('type', new_value);
|
||||
new_filters := array_append(new_filters, element);
|
||||
END IF;
|
||||
END LOOP;
|
||||
IF array_length(new_filters, 1) > 0 THEN
|
||||
row.filter := row.filter || jsonb_build_object('filters', new_filters);
|
||||
END IF;
|
||||
END IF;
|
||||
|
||||
IF array_length(new_events, 1) > 0 OR array_length(new_filters, 1) > 0 THEN
|
||||
planned_update := array_append(planned_update,
|
||||
jsonb_build_object('id', row.series_id, 'change', row.filter));
|
||||
END IF;
|
||||
END LOOP;
|
||||
|
||||
-- Update metric_series
|
||||
IF array_length(planned_update, 1) > 0 THEN
|
||||
raise notice 'must update % elements',array_length(planned_update, 1);
|
||||
|
||||
UPDATE metric_series
|
||||
SET filter=changes.change -> 'change'
|
||||
FROM (SELECT unnest(planned_update)) AS changes(change)
|
||||
WHERE series_id = (changes.change -> 'id')::integer;
|
||||
raise notice 'update done';
|
||||
ELSE
|
||||
raise notice 'nothing to update';
|
||||
END IF;
|
||||
END ;
|
||||
$$
|
||||
LANGUAGE plpgsql;
|
||||
|
||||
DROP FUNCTION get_new_filter_key;
|
||||
DROP FUNCTION get_new_event_filter_key;
|
||||
DROP FUNCTION get_new_event_key;
|
||||
DROP FUNCTION get_global_key;
|
||||
|
||||
DROP TABLE IF EXISTS public.funnels;
|
||||
ALTER TABLE IF EXISTS public.metrics
|
||||
|
|
|
|||
|
|
@ -747,6 +747,7 @@ $$
|
|||
metric_value text[] NOT NULL DEFAULT '{}'::text[],
|
||||
metric_format text,
|
||||
thumbnail text,
|
||||
is_pinned boolean NOT NULL DEFAULT FALSE,
|
||||
default_config jsonb NOT NULL DEFAULT '{
|
||||
"col": 2,
|
||||
"row": 2,
|
||||
|
|
|
|||
1
ee/utilities/.gitignore
vendored
1
ee/utilities/.gitignore
vendored
|
|
@ -15,5 +15,4 @@ servers/sourcemaps-server.js
|
|||
/utils/helper.js
|
||||
/utils/assistHelper.js
|
||||
.local
|
||||
run-dev.sh
|
||||
*.mmdb
|
||||
|
|
|
|||
|
|
@ -18,4 +18,4 @@ USER 1001
|
|||
ADD --chown=1001 https://static.openreplay.com/geoip/GeoLite2-Country.mmdb $MAXMINDDB_FILE
|
||||
|
||||
ENTRYPOINT ["/sbin/tini", "--"]
|
||||
CMD npm start
|
||||
CMD npm start
|
||||
1180
ee/utilities/package-lock.json
generated
Normal file
1180
ee/utilities/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
"name": "utilities-server",
|
||||
"name": "assist-server",
|
||||
"version": "1.0.0",
|
||||
"description": "assist server to get live sessions & sourcemaps reader to get stack trace",
|
||||
"main": "peerjs-server.js",
|
||||
|
|
|
|||
6
ee/utilities/run-dev.sh
Executable file
6
ee/utilities/run-dev.sh
Executable file
|
|
@ -0,0 +1,6 @@
|
|||
#!/bin/bash
|
||||
set -a
|
||||
source .env
|
||||
set +a
|
||||
|
||||
npm start
|
||||
|
|
@ -24,7 +24,7 @@ const {
|
|||
const {createAdapter} = require("@socket.io/redis-adapter");
|
||||
const {createClient} = require("redis");
|
||||
const wsRouter = express.Router();
|
||||
const REDIS_URL = process.env.REDIS_URL || "redis://localhost:6379";
|
||||
const REDIS_URL = (process.env.REDIS_URL || "localhost:6379").replace(/((^\w+:|^)\/\/|^)/, 'redis://');
|
||||
const pubClient = createClient({url: REDIS_URL});
|
||||
const subClient = pubClient.duplicate();
|
||||
console.log(`Using Redis: ${REDIS_URL}`);
|
||||
|
|
@ -309,7 +309,8 @@ module.exports = {
|
|||
debug && console.log(`notifying new agent about no SESSIONS`);
|
||||
io.to(socket.id).emit(EVENTS_DEFINITION.emit.NO_SESSIONS);
|
||||
}
|
||||
await io.of('/').adapter.remoteJoin(socket.id, socket.peerId);
|
||||
// await io.of('/').adapter.join(socket.id, socket.peerId);
|
||||
await socket.join(socket.peerId);
|
||||
let rooms = await io.of('/').adapter.allRooms();
|
||||
if (rooms.has(socket.peerId)) {
|
||||
let connectedSockets = await io.in(socket.peerId).fetchSockets();
|
||||
|
|
|
|||
|
|
@ -287,7 +287,7 @@ module.exports = {
|
|||
debug && console.log(`notifying new agent about no SESSIONS`);
|
||||
io.to(socket.id).emit(EVENTS_DEFINITION.emit.NO_SESSIONS);
|
||||
}
|
||||
socket.join(socket.peerId);
|
||||
await socket.join(socket.peerId);
|
||||
if (io.sockets.adapter.rooms.get(socket.peerId)) {
|
||||
debug && console.log(`${socket.id} joined room:${socket.peerId}, as:${socket.identity}, members:${io.sockets.adapter.rooms.get(socket.peerId).size}`);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,5 +22,5 @@ MINIO_ACCESS_KEY = ''
|
|||
MINIO_SECRET_KEY = ''
|
||||
|
||||
# APP and TRACKER VERSIONS
|
||||
VERSION = '1.9.0'
|
||||
TRACKER_VERSION = '4.1.9'
|
||||
VERSION = '1.10.0'
|
||||
TRACKER_VERSION = '5.0.0'
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ import AssistRouter from './AssistRouter';
|
|||
import { SideMenuitem } from 'UI';
|
||||
import { withSiteId, assist, recordings } from 'App/routes';
|
||||
import { connect } from 'react-redux';
|
||||
import { ENTERPRISE_REQUEIRED } from 'App/constants';
|
||||
|
||||
interface Props extends RouteComponentProps {
|
||||
siteId: string;
|
||||
|
|
@ -40,7 +41,7 @@ function Assist(props: Props) {
|
|||
iconName="record-circle"
|
||||
onClick={() => redirect(recordings())}
|
||||
disabled={!isEnterprise}
|
||||
tooltipTitle="This feature requires an enterprise license."
|
||||
tooltipTitle={ENTERPRISE_REQUEIRED}
|
||||
/>
|
||||
</div>
|
||||
<div className="side-menu-margined w-full">
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ interface Props {
|
|||
const IntegrationItem = (props: Props) => {
|
||||
const { integration, integrated, hide = false } = props;
|
||||
return hide ? <></> : (
|
||||
<div className={cn(stl.wrapper, 'mb-4', { [stl.integrated]: integrated })} onClick={(e) => props.onClick(e)}>
|
||||
<div className={cn(stl.wrapper, { [stl.integrated]: integrated })} onClick={(e) => props.onClick(e)}>
|
||||
{integrated && (
|
||||
<div className="m-2 absolute right-0 top-0 h-4 w-4 rounded-full bg-teal flex items-center justify-center">
|
||||
<Tooltip title="Integrated" delay={0}>
|
||||
|
|
|
|||
|
|
@ -99,7 +99,7 @@ function Integrations(props: Props) {
|
|||
</div>
|
||||
<div className="">{cat.description}</div>
|
||||
|
||||
<div className="flex flex-wrap mt-4">
|
||||
<div className="flex flex-wrap mt-4 gap-3">
|
||||
{cat.integrations.map((integration: any) => (
|
||||
<React.Fragment key={integration.slug}>
|
||||
<Tooltip
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
.wrapper {
|
||||
border-radius: 3px;
|
||||
/* border: solid thin $gray-light-shade; */
|
||||
margin-right: 10px;
|
||||
padding: 20px;
|
||||
cursor: pointer;
|
||||
width: 130px;
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ import { confirm } from 'UI';
|
|||
import { clearSearch } from 'Duck/search';
|
||||
import { clearSearch as clearSearchLive } from 'Duck/liveSearch';
|
||||
import { withStore } from 'App/mstore';
|
||||
import { toast } from 'react-toastify';
|
||||
|
||||
@connect(
|
||||
(state) => ({
|
||||
|
|
@ -61,9 +62,14 @@ export default class NewSiteForm extends React.PureComponent {
|
|||
return this.setState({ existsError: true });
|
||||
}
|
||||
if (site.exists()) {
|
||||
this.props.update(this.props.site, this.props.site.id).then(() => {
|
||||
this.props.onClose(null);
|
||||
this.props.fetchList();
|
||||
this.props.update(this.props.site, this.props.site.id).then((response) => {
|
||||
if (!response || !response.errors || response.errors.size === 0) {
|
||||
this.props.onClose(null);
|
||||
this.props.fetchList();
|
||||
toast.success('Project updated successfully');
|
||||
} else {
|
||||
toast.error(response.errors[0]);
|
||||
}
|
||||
});
|
||||
} else {
|
||||
this.props.save(this.props.site).then(() => {
|
||||
|
|
|
|||
|
|
@ -1,75 +1,86 @@
|
|||
import React from 'react';
|
||||
import { Form, Button, Input } from 'UI';
|
||||
import styles from './webhookForm.module.css';
|
||||
import { useStore } from 'App/mstore'
|
||||
import { observer } from 'mobx-react-lite'
|
||||
import { useStore } from 'App/mstore';
|
||||
import { observer } from 'mobx-react-lite';
|
||||
import { toast } from 'react-toastify';
|
||||
|
||||
function WebhookForm(props) {
|
||||
const { settingsStore } = useStore()
|
||||
const { webhookInst: webhook, hooksLoading: loading, saveWebhook, editWebhook } = settingsStore
|
||||
const write = ({ target: { value, name } }) => editWebhook({ [name]: value });
|
||||
const { settingsStore } = useStore();
|
||||
const { webhookInst: webhook, hooksLoading: loading, saveWebhook, editWebhook } = settingsStore;
|
||||
const write = ({ target: { value, name } }) => editWebhook({ [name]: value });
|
||||
|
||||
const save = () => {
|
||||
saveWebhook(webhook).then(() => {
|
||||
props.onClose();
|
||||
});
|
||||
};
|
||||
const save = () => {
|
||||
saveWebhook(webhook)
|
||||
.then(() => {
|
||||
props.onClose();
|
||||
})
|
||||
.catch((e) => {
|
||||
const baseStr = 'Error saving webhook';
|
||||
if (e.response) {
|
||||
e.response.json().then(({ errors }) => {
|
||||
toast.error(baseStr + ': ' + errors.join(','));
|
||||
});
|
||||
} else {
|
||||
toast.error(baseStr);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="bg-white h-screen overflow-y-auto" style={{ width: '350px' }}>
|
||||
<h3 className="p-5 text-2xl">{webhook.exists() ? 'Update' : 'Add'} Webhook</h3>
|
||||
<Form className={styles.wrapper}>
|
||||
<Form.Field>
|
||||
<label>{'Name'}</label>
|
||||
<Input
|
||||
name="name"
|
||||
value={webhook.name}
|
||||
onChange={write}
|
||||
placeholder="Name"
|
||||
maxLength={50}
|
||||
/>
|
||||
</Form.Field>
|
||||
|
||||
return (
|
||||
<div className="bg-white h-screen overflow-y-auto" style={{ width: '350px' }}>
|
||||
<h3 className="p-5 text-2xl">{webhook.exists() ? 'Update' : 'Add'} Webhook</h3>
|
||||
<Form className={styles.wrapper}>
|
||||
<Form.Field>
|
||||
<label>{'Name'}</label>
|
||||
<Input
|
||||
name="name"
|
||||
value={webhook.name}
|
||||
onChange={write}
|
||||
placeholder="Name"
|
||||
maxLength={50}
|
||||
/>
|
||||
</Form.Field>
|
||||
<Form.Field>
|
||||
<label>{'Endpoint'}</label>
|
||||
<Input name="endpoint" value={webhook.endpoint} onChange={write} placeholder="Endpoint" />
|
||||
</Form.Field>
|
||||
|
||||
<Form.Field>
|
||||
<label>{'Endpoint'}</label>
|
||||
<Input
|
||||
name="endpoint"
|
||||
value={webhook.endpoint}
|
||||
onChange={write}
|
||||
placeholder="Endpoint"
|
||||
/>
|
||||
</Form.Field>
|
||||
<Form.Field>
|
||||
<label>{'Auth Header (optional)'}</label>
|
||||
<Input
|
||||
name="authHeader"
|
||||
value={webhook.authHeader}
|
||||
onChange={write}
|
||||
placeholder="Auth Header"
|
||||
/>
|
||||
</Form.Field>
|
||||
|
||||
<Form.Field>
|
||||
<label>{'Auth Header (optional)'}</label>
|
||||
<Input
|
||||
name="authHeader"
|
||||
value={webhook.authHeader}
|
||||
onChange={write}
|
||||
placeholder="Auth Header"
|
||||
/>
|
||||
</Form.Field>
|
||||
|
||||
<div className="flex justify-between">
|
||||
<div className="flex items-center">
|
||||
<Button
|
||||
onClick={save}
|
||||
disabled={!webhook.validate()}
|
||||
loading={loading}
|
||||
variant="primary"
|
||||
className="float-left mr-2"
|
||||
>
|
||||
{webhook.exists() ? 'Update' : 'Add'}
|
||||
</Button>
|
||||
{webhook.exists() && <Button onClick={props.onClose}>{'Cancel'}</Button>}
|
||||
</div>
|
||||
{webhook.exists() &&
|
||||
<Button icon="trash" variant="text" onClick={() => props.onDelete(webhook.webhookId)}></Button>}
|
||||
</div>
|
||||
</Form>
|
||||
<div className="flex justify-between">
|
||||
<div className="flex items-center">
|
||||
<Button
|
||||
onClick={save}
|
||||
disabled={!webhook.validate()}
|
||||
loading={loading}
|
||||
variant="primary"
|
||||
className="float-left mr-2"
|
||||
>
|
||||
{webhook.exists() ? 'Update' : 'Add'}
|
||||
</Button>
|
||||
{webhook.exists() && <Button onClick={props.onClose}>{'Cancel'}</Button>}
|
||||
</div>
|
||||
{webhook.exists() && (
|
||||
<Button
|
||||
icon="trash"
|
||||
variant="text"
|
||||
onClick={() => props.onDelete(webhook.webhookId)}
|
||||
></Button>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
</Form>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export default observer(WebhookForm);
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import React from 'react';
|
|||
import { Input } from 'UI';
|
||||
import Select from 'Shared/Select';
|
||||
import { alertConditions as conditions } from 'App/constants';
|
||||
import Alert from 'Types/alert'
|
||||
|
||||
const thresholdOptions = [
|
||||
{ label: '15 minutes', value: 15 },
|
||||
|
|
@ -25,6 +26,7 @@ interface ICondition {
|
|||
writeQuery: (data: any) => void;
|
||||
writeQueryOption: (e: any, data: any) => void;
|
||||
unit: any;
|
||||
changeUnit: (value: string) => void;
|
||||
}
|
||||
|
||||
function Condition({
|
||||
|
|
@ -35,6 +37,7 @@ function Condition({
|
|||
writeQueryOption,
|
||||
writeQuery,
|
||||
unit,
|
||||
changeUnit,
|
||||
}: ICondition) {
|
||||
return (
|
||||
<div>
|
||||
|
|
@ -47,7 +50,7 @@ function Condition({
|
|||
options={changeOptions}
|
||||
name="change"
|
||||
defaultValue={instance.change}
|
||||
onChange={({ value }) => writeOption(null, { name: 'change', value })}
|
||||
onChange={({ value }) => changeUnit(value)}
|
||||
id="change-dropdown"
|
||||
/>
|
||||
</div>
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ import { DateTime } from 'luxon';
|
|||
import { withRouter, RouteComponentProps } from 'react-router-dom';
|
||||
import cn from 'classnames';
|
||||
import Alert from 'Types/alert';
|
||||
import { observer } from 'mobx-react-lite'
|
||||
|
||||
const getThreshold = (threshold: number) => {
|
||||
if (threshold === 15) return '15 Minutes';
|
||||
|
|
@ -42,9 +43,8 @@ const getNotifyChannel = (alert: Record<string, any>, webhooks: Array<any>) => {
|
|||
' (' +
|
||||
alert.msteamsInput
|
||||
.map((channelId: number) => {
|
||||
return (
|
||||
webhooks.find((hook) => hook.webhookId === channelId && hook.type === 'msteams')?.name
|
||||
);
|
||||
return webhooks.find((hook) => hook.webhookId === channelId && hook.type === 'msteams')
|
||||
?.name;
|
||||
})
|
||||
.join(', ') +
|
||||
')'
|
||||
|
|
@ -58,7 +58,7 @@ const getNotifyChannel = (alert: Record<string, any>, webhooks: Array<any>) => {
|
|||
}
|
||||
}
|
||||
if (alert.msteams) {
|
||||
str += (str === '' ? '' : ' and ') + 'MS Teams'
|
||||
str += (str === '' ? '' : ' and ') + 'MS Teams';
|
||||
if (alert.msteamsInput.length > 0) {
|
||||
str += getMsTeamsChannels();
|
||||
}
|
||||
|
|
@ -79,10 +79,11 @@ interface Props extends RouteComponentProps {
|
|||
init: (alert: Alert) => void;
|
||||
demo?: boolean;
|
||||
webhooks: Array<any>;
|
||||
triggerOptions: Record<string, any>;
|
||||
}
|
||||
|
||||
function AlertListItem(props: Props) {
|
||||
const { alert, siteId, history, init, demo, webhooks } = props;
|
||||
const { alert, siteId, history, init, demo, webhooks, triggerOptions } = props;
|
||||
|
||||
if (!alert) {
|
||||
return null;
|
||||
|
|
@ -95,6 +96,11 @@ function AlertListItem(props: Props) {
|
|||
history.push(path);
|
||||
};
|
||||
|
||||
const formTriggerName = () =>
|
||||
Number.isInteger(alert.query.left) && triggerOptions
|
||||
? triggerOptions.find((opt: { value: any, label: string }) => opt.value === alert.query.left).label
|
||||
: alert.query.left;
|
||||
|
||||
return (
|
||||
<div
|
||||
className={cn('px-6', !demo ? 'hover:bg-active-blue cursor-pointer border-t' : '')}
|
||||
|
|
@ -118,29 +124,36 @@ function AlertListItem(props: Props) {
|
|||
{demo
|
||||
? DateTime.fromMillis(+new Date()).toFormat('LLL dd, yyyy, hh:mm a')
|
||||
: checkForRecent(
|
||||
DateTime.fromMillis(alert.createdAt || +new Date()),
|
||||
'LLL dd, yyyy, hh:mm a'
|
||||
)}
|
||||
DateTime.fromMillis(alert.createdAt || +new Date()),
|
||||
'LLL dd, yyyy, hh:mm a'
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
<div className="color-gray-medium px-2 pb-2">
|
||||
{'When the '}
|
||||
<span className="font-semibold" style={{ fontFamily: 'Menlo, Monaco, Consolas' }}>{alert.detectionMethod}</span>
|
||||
<span className="font-semibold" style={{ fontFamily: 'Menlo, Monaco, Consolas' }}>
|
||||
{alert.detectionMethod}
|
||||
</span>
|
||||
{' of '}
|
||||
<span className="font-semibold" style={{ fontFamily: 'Menlo, Monaco, Consolas' }}>{alert.seriesName}</span>
|
||||
<span className="font-semibold" style={{ fontFamily: 'Menlo, Monaco, Consolas' }}>
|
||||
{triggerOptions ? formTriggerName() : alert.seriesName}
|
||||
</span>
|
||||
{' is '}
|
||||
<span className="font-semibold" style={{ fontFamily: 'Menlo, Monaco, Consolas' }}>
|
||||
{alert.query.operator}
|
||||
{numberWithCommas(alert.query.right)} {alert.metric?.unit}
|
||||
{numberWithCommas(alert.query.right)}
|
||||
{alert.change === 'percent' ? '%' : alert.metric?.unit}
|
||||
</span>
|
||||
{' over the past '}
|
||||
<span className="font-semibold" style={{ fontFamily: 'Menlo, Monaco, Consolas' }}>{getThreshold(
|
||||
alert.currentPeriod)}</span>
|
||||
<span className="font-semibold" style={{ fontFamily: 'Menlo, Monaco, Consolas' }}>
|
||||
{getThreshold(alert.currentPeriod)}
|
||||
</span>
|
||||
{alert.detectionMethod === 'change' ? (
|
||||
<>
|
||||
{' compared to the previous '}
|
||||
<span className="font-semibold" style={{ fontFamily: 'Menlo, Monaco, Consolas ' }}>{getThreshold(
|
||||
alert.previousPeriod)}</span>
|
||||
<span className="font-semibold" style={{ fontFamily: 'Menlo, Monaco, Consolas ' }}>
|
||||
{getThreshold(alert.previousPeriod)}
|
||||
</span>
|
||||
</>
|
||||
) : null}
|
||||
{', notify me on '}
|
||||
|
|
@ -153,4 +166,4 @@ function AlertListItem(props: Props) {
|
|||
);
|
||||
}
|
||||
|
||||
export default withRouter(AlertListItem);
|
||||
export default withRouter(observer(AlertListItem));
|
||||
|
|
|
|||
|
|
@ -17,10 +17,10 @@ function AlertsList({ siteId }: Props) {
|
|||
const { alertsStore, settingsStore } = useStore();
|
||||
const { fetchWebhooks, webhooks } = settingsStore
|
||||
const { alerts: alertsList, alertsSearch, fetchList, init } = alertsStore
|
||||
const page = alertsStore.page;
|
||||
|
||||
React.useEffect(() => { fetchList(); fetchWebhooks() }, []);
|
||||
const alertsArray = alertsList
|
||||
const [page, setPage] = React.useState(1);
|
||||
|
||||
const filteredAlerts = filterList(alertsArray, alertsSearch, ['name'], (item, query) => query.test(item.query.left))
|
||||
const list = alertsSearch !== '' ? filteredAlerts : alertsArray;
|
||||
|
|
@ -59,7 +59,7 @@ function AlertsList({ siteId }: Props) {
|
|||
<Pagination
|
||||
page={page}
|
||||
totalPages={Math.ceil(list.length / pageSize)}
|
||||
onPageChange={(page) => setPage(page)}
|
||||
onPageChange={(page) => alertsStore.updateKey('page', page)}
|
||||
limit={pageSize}
|
||||
debounceRequest={100}
|
||||
/>
|
||||
|
|
|
|||
|
|
@ -1,16 +1,30 @@
|
|||
import React from 'react';
|
||||
import React, { useEffect } from 'react';
|
||||
import { Button, PageTitle, Icon, Link } from 'UI';
|
||||
import withPageTitle from 'HOCs/withPageTitle';
|
||||
import { withSiteId, alertCreate } from 'App/routes';
|
||||
|
||||
import AlertsList from './AlertsList';
|
||||
import AlertsSearch from './AlertsSearch';
|
||||
import { useHistory } from 'react-router';
|
||||
import { useStore } from 'App/mstore';
|
||||
|
||||
interface IAlertsView {
|
||||
siteId: string;
|
||||
}
|
||||
|
||||
function AlertsView({ siteId }: IAlertsView) {
|
||||
const history = useHistory();
|
||||
const { alertsStore } = useStore();
|
||||
|
||||
|
||||
useEffect(() => {
|
||||
const unmount = history.listen((location) => {
|
||||
if (!location.pathname.includes('/alert')) {
|
||||
alertsStore.updateKey('page', 1);
|
||||
}
|
||||
});
|
||||
return unmount;
|
||||
}, [history]);
|
||||
return (
|
||||
<div style={{ maxWidth: '1300px', margin: 'auto'}} className="bg-white rounded py-4 border">
|
||||
<div className="flex items-center mb-4 justify-between px-6">
|
||||
|
|
|
|||
|
|
@ -167,6 +167,10 @@ const NewAlert = (props: IProps) => {
|
|||
edit({ query: { ...query, [name]: value } });
|
||||
};
|
||||
|
||||
const changeUnit = (value: string) => {
|
||||
alertsStore.changeUnit(value)
|
||||
}
|
||||
|
||||
const writeQuery = ({ target: { value, name } }: React.ChangeEvent<HTMLInputElement>) => {
|
||||
const { query } = instance;
|
||||
edit({ query: { ...query, [name]: value } });
|
||||
|
|
@ -243,6 +247,7 @@ const NewAlert = (props: IProps) => {
|
|||
instance={instance}
|
||||
triggerOptions={triggerOptions}
|
||||
writeQueryOption={writeQueryOption}
|
||||
changeUnit={changeUnit}
|
||||
writeQuery={writeQuery}
|
||||
unit={unit}
|
||||
/>
|
||||
|
|
@ -278,7 +283,13 @@ const NewAlert = (props: IProps) => {
|
|||
|
||||
<div className="bg-white mt-4 border rounded mb-10">
|
||||
{instance && (
|
||||
<AlertListItem alert={instance} demo siteId="" init={() => null} webhooks={webhooks} />
|
||||
<AlertListItem
|
||||
alert={instance}
|
||||
triggerOptions={triggerOptions}
|
||||
demo
|
||||
siteId=""
|
||||
init={() => null}
|
||||
webhooks={webhooks} />
|
||||
)}
|
||||
</div>
|
||||
</>
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import React from 'react';
|
||||
import { ItemMenu } from 'UI';
|
||||
import { connect } from 'react-redux';
|
||||
import { ENTERPRISE_REQUEIRED } from 'App/constants';
|
||||
|
||||
interface Props {
|
||||
editHandler: (isTitle: boolean) => void;
|
||||
|
|
@ -16,7 +17,7 @@ function DashboardOptions(props: Props) {
|
|||
{ icon: 'text-paragraph', text: `${!isTitlePresent ? 'Add' : 'Edit'} Description`, onClick: () => editHandler(false) },
|
||||
{ icon: 'users', text: 'Visibility & Access', onClick: editHandler },
|
||||
{ icon: 'trash', text: 'Delete', onClick: deleteHandler },
|
||||
{ icon: 'pdf-download', text: 'Download Report', onClick: renderReport, disabled: !isEnterprise, tooltipTitle: 'This feature requires an enterprise license.' }
|
||||
{ icon: 'pdf-download', text: 'Download Report', onClick: renderReport, disabled: !isEnterprise, tooltipTitle: ENTERPRISE_REQUEIRED }
|
||||
]
|
||||
|
||||
return (
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import { IconNames } from 'App/components/ui/SVG';
|
|||
import React from 'react';
|
||||
import { Icon, Tooltip } from 'UI';
|
||||
import cn from 'classnames';
|
||||
import { ENTERPRISE_REQUEIRED } from 'App/constants';
|
||||
|
||||
export interface MetricType {
|
||||
title: string;
|
||||
|
|
@ -23,7 +24,7 @@ function MetricTypeItem(props: Props) {
|
|||
onClick = () => {},
|
||||
} = props;
|
||||
return (
|
||||
<Tooltip disabled={!disabled} title="This feature requires an enterprise license." delay={0}>
|
||||
<Tooltip disabled={!disabled} title={ENTERPRISE_REQUEIRED} delay={0}>
|
||||
<div
|
||||
className={cn(
|
||||
'rounded color-gray-darkest flex items-start border border-transparent p-4 hover:bg-active-blue cursor-pointer group hover-color-teal',
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ import { withRouter, RouteComponentProps } from 'react-router-dom';
|
|||
import { dashboardMetricCreate, withSiteId } from 'App/routes';
|
||||
import { useStore } from 'App/mstore';
|
||||
import { connect } from 'react-redux';
|
||||
import { ENTERPRISE_REQUEIRED } from 'App/constants';
|
||||
|
||||
interface Props extends RouteComponentProps {
|
||||
dashboardId: number;
|
||||
|
|
@ -24,7 +25,7 @@ function MetricTypeList(props: Props) {
|
|||
return {
|
||||
...metric,
|
||||
disabled: metric.slug === INSIGHTS && !isEnterprise,
|
||||
tooltipTitle: disabled ? 'This feature requires an enterprise license.' : '',
|
||||
tooltipTitle: disabled ? ENTERPRISE_REQUEIRED : '',
|
||||
};
|
||||
});
|
||||
}, []);
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ function MetricsList({
|
|||
|
||||
const dashboard = dashboardStore.selectedDashboard;
|
||||
const existingCardIds = useMemo(() => dashboard?.widgets?.map(i => parseInt(i.metricId)), [dashboard]);
|
||||
const cards = useMemo(() => metricStore.filteredCards.filter(i => !existingCardIds?.includes(parseInt(i.metricId))), [metricStore.filteredCards]);
|
||||
const cards = useMemo(() => !!onSelectionChange ? metricStore.filteredCards.filter(i => !existingCardIds?.includes(parseInt(i.metricId))) : metricStore.filteredCards, [metricStore.filteredCards]);
|
||||
|
||||
useEffect(() => {
|
||||
metricStore.fetchList();
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue