Merge remote-tracking branch 'origin/api-v1.11.0' into dev
This commit is contained in:
commit
19872ee11d
90 changed files with 1785 additions and 1736 deletions
14
.github/workflows/assist-ee.yaml
vendored
14
.github/workflows/assist-ee.yaml
vendored
|
|
@ -6,10 +6,10 @@ on:
|
|||
- dev
|
||||
- api-*
|
||||
paths:
|
||||
- "ee/utilities/**"
|
||||
- "utilities/**"
|
||||
- "!utilities/.gitignore"
|
||||
- "!utilities/*-dev.sh"
|
||||
- "ee/assist/**"
|
||||
- "assist/**"
|
||||
- "!assist/.gitignore"
|
||||
- "!assist/*-dev.sh"
|
||||
|
||||
name: Build and Deploy Assist EE
|
||||
|
||||
|
|
@ -44,7 +44,7 @@ jobs:
|
|||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd utilities
|
||||
cd assist
|
||||
PUSH_IMAGE=0 bash -x ./build.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
|
|
@ -101,9 +101,9 @@ jobs:
|
|||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,chalice,quickwit} /tmp
|
||||
mv openreplay/charts/{ingress-nginx,assist,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,chalice,quickwit} openreplay/charts/
|
||||
mv /tmp/{ingress-nginx,assist,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
|
|
|
|||
12
.github/workflows/assist.yaml
vendored
12
.github/workflows/assist.yaml
vendored
|
|
@ -6,9 +6,9 @@ on:
|
|||
- dev
|
||||
- api-*
|
||||
paths:
|
||||
- "utilities/**"
|
||||
- "!utilities/.gitignore"
|
||||
- "!utilities/*-dev.sh"
|
||||
- "assist/**"
|
||||
- "!assist/.gitignore"
|
||||
- "!assist/*-dev.sh"
|
||||
|
||||
name: Build and Deploy Assist
|
||||
|
||||
|
|
@ -43,7 +43,7 @@ jobs:
|
|||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd utilities
|
||||
cd assist
|
||||
PUSH_IMAGE=0 bash -x ./build.sh
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
|
|
@ -100,9 +100,9 @@ jobs:
|
|||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,chalice,quickwit} /tmp
|
||||
mv openreplay/charts/{ingress-nginx,assist,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,chalice,quickwit} openreplay/charts/
|
||||
mv /tmp/{ingress-nginx,assist,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
|
|
|
|||
93
.github/workflows/peers-ee.yaml
vendored
93
.github/workflows/peers-ee.yaml
vendored
|
|
@ -1,6 +1,11 @@
|
|||
# This action will push the peers changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_security_checks:
|
||||
description: 'Skip Security checks if there is a unfixable vuln or error. Value: true/false'
|
||||
required: false
|
||||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
|
|
@ -11,7 +16,7 @@ on:
|
|||
- "!peers/.gitignore"
|
||||
- "!peers/*-dev.sh"
|
||||
|
||||
name: Build and Deploy Peers
|
||||
name: Build and Deploy Peers EE
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
|
|
@ -36,30 +41,98 @@ jobs:
|
|||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- name: Building and Pushing api image
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
|
||||
- name: Building and Pushing peers image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd peers
|
||||
PUSH_IMAGE=1 bash build.sh ee
|
||||
PUSH_IMAGE=0 bash -x ./build.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("peers")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("peers")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||
EOF
|
||||
done
|
||||
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.EE_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s#minio_access_key.*#minio_access_key: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\" #g" vars.yaml
|
||||
sed -i "s#minio_secret_key.*#minio_secret_key: \"${{ secrets.EE_MINIO_SECRET_KEY }}\" #g" vars.yaml
|
||||
sed -i "s#domain_name.*#domain_name: \"ee.openreplay.com\" #g" vars.yaml
|
||||
sed -i "s#kubeconfig.*#kubeconfig_path: ${KUBECONFIG}#g" vars.yaml
|
||||
sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml
|
||||
bash kube-install.sh --app peers
|
||||
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/peers/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,peers,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,peers,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: ee
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
|
|
|
|||
89
.github/workflows/peers.yaml
vendored
89
.github/workflows/peers.yaml
vendored
|
|
@ -1,6 +1,11 @@
|
|||
# This action will push the peers changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_security_checks:
|
||||
description: 'Skip Security checks if there is a unfixable vuln or error. Value: true/false'
|
||||
required: false
|
||||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
|
|
@ -35,30 +40,96 @@ jobs:
|
|||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- name: Building and Pushing api image
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
|
||||
- name: Building and Pushing peers image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd peers
|
||||
PUSH_IMAGE=1 bash build.sh
|
||||
PUSH_IMAGE=0 bash -x ./build.sh
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("peers")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("peers")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
tag: ${image_array[1]}
|
||||
EOF
|
||||
done
|
||||
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s#minio_access_key.*#minio_access_key: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\" #g" vars.yaml
|
||||
sed -i "s#minio_secret_key.*#minio_secret_key: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\" #g" vars.yaml
|
||||
sed -i "s#domain_name.*#domain_name: \"foss.openreplay.com\" #g" vars.yaml
|
||||
sed -i "s#kubeconfig.*#kubeconfig_path: ${KUBECONFIG}#g" vars.yaml
|
||||
sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml
|
||||
bash kube-install.sh --app peers
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/peers/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,peers,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,peers,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: foss
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
|
|
@ -66,4 +137,4 @@ jobs:
|
|||
# DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}
|
||||
# ENVIRONMENT: staging
|
||||
#
|
||||
|
||||
|
|
|
|||
8
.github/workflows/sourcemaps-reader.yaml
vendored
8
.github/workflows/sourcemaps-reader.yaml
vendored
|
|
@ -1,4 +1,4 @@
|
|||
# This action will push the chalice changes to aws
|
||||
# This action will push the sourcemapreader changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
|
|
@ -83,13 +83,13 @@ jobs:
|
|||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/chalice/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
sed -i "/sourcemapreader/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,chalice,quickwit} /tmp
|
||||
mv openreplay/charts/{ingress-nginx,sourcemapreader,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,chalice,quickwit} openreplay/charts/
|
||||
mv /tmp/{ingress-nginx,sourcemapreader,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
|
|
|
|||
82
api/app.py
82
api/app.py
|
|
@ -1,4 +1,5 @@
|
|||
import logging
|
||||
from contextlib import asynccontextmanager
|
||||
|
||||
from apscheduler.schedulers.asyncio import AsyncIOScheduler
|
||||
from decouple import config
|
||||
|
|
@ -12,9 +13,42 @@ from chalicelib.utils import pg_client
|
|||
from routers import core, core_dynamic
|
||||
from routers.crons import core_crons
|
||||
from routers.crons import core_dynamic_crons
|
||||
from routers.subs import insights, metrics, v1_api
|
||||
from routers.subs import insights, metrics, v1_api, health
|
||||
|
||||
app = FastAPI(root_path="/api", docs_url=config("docs_url", default=""), redoc_url=config("redoc_url", default=""))
|
||||
loglevel = config("LOGLEVEL", default=logging.INFO)
|
||||
print(f">Loglevel set to: {loglevel}")
|
||||
logging.basicConfig(level=loglevel)
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
# Startup
|
||||
logging.info(">>>>> starting up <<<<<")
|
||||
ap_logger = logging.getLogger('apscheduler')
|
||||
ap_logger.setLevel(loglevel)
|
||||
|
||||
app.schedule = AsyncIOScheduler()
|
||||
await pg_client.init()
|
||||
app.schedule.start()
|
||||
|
||||
for job in core_crons.cron_jobs + core_dynamic_crons.cron_jobs:
|
||||
app.schedule.add_job(id=job["func"].__name__, **job)
|
||||
|
||||
ap_logger.info(">Scheduled jobs:")
|
||||
for job in app.schedule.get_jobs():
|
||||
ap_logger.info({"Name": str(job.id), "Run Frequency": str(job.trigger), "Next Run": str(job.next_run_time)})
|
||||
|
||||
# App listening
|
||||
yield
|
||||
|
||||
# Shutdown
|
||||
logging.info(">>>>> shutting down <<<<<")
|
||||
app.schedule.shutdown(wait=False)
|
||||
await pg_client.terminate()
|
||||
|
||||
|
||||
app = FastAPI(root_path="/api", docs_url=config("docs_url", default=""), redoc_url=config("redoc_url", default=""),
|
||||
lifespan=lifespan)
|
||||
app.add_middleware(GZipMiddleware, minimum_size=1000)
|
||||
|
||||
|
||||
|
|
@ -51,39 +85,13 @@ app.include_router(core_dynamic.app_apikey)
|
|||
app.include_router(metrics.app)
|
||||
app.include_router(insights.app)
|
||||
app.include_router(v1_api.app_apikey)
|
||||
app.include_router(health.public_app)
|
||||
app.include_router(health.app)
|
||||
app.include_router(health.app_apikey)
|
||||
|
||||
loglevel = config("LOGLEVEL", default=logging.INFO)
|
||||
print(f">Loglevel set to: {loglevel}")
|
||||
logging.basicConfig(level=loglevel)
|
||||
ap_logger = logging.getLogger('apscheduler')
|
||||
ap_logger.setLevel(loglevel)
|
||||
app.schedule = AsyncIOScheduler()
|
||||
|
||||
|
||||
@app.on_event("startup")
|
||||
async def startup():
|
||||
logging.info(">>>>> starting up <<<<<")
|
||||
await pg_client.init()
|
||||
app.schedule.start()
|
||||
|
||||
for job in core_crons.cron_jobs + core_dynamic_crons.cron_jobs:
|
||||
app.schedule.add_job(id=job["func"].__name__, **job)
|
||||
|
||||
ap_logger.info(">Scheduled jobs:")
|
||||
for job in app.schedule.get_jobs():
|
||||
ap_logger.info({"Name": str(job.id), "Run Frequency": str(job.trigger), "Next Run": str(job.next_run_time)})
|
||||
|
||||
|
||||
@app.on_event("shutdown")
|
||||
async def shutdown():
|
||||
logging.info(">>>>> shutting down <<<<<")
|
||||
app.schedule.shutdown(wait=False)
|
||||
await pg_client.terminate()
|
||||
|
||||
|
||||
@app.get('/private/shutdown', tags=["private"])
|
||||
async def stop_server():
|
||||
logging.info("Requested shutdown")
|
||||
await shutdown()
|
||||
import os, signal
|
||||
os.kill(1, signal.SIGTERM)
|
||||
# @app.get('/private/shutdown', tags=["private"])
|
||||
# async def stop_server():
|
||||
# logging.info("Requested shutdown")
|
||||
# await shutdown()
|
||||
# import os, signal
|
||||
# os.kill(1, signal.SIGTERM)
|
||||
|
|
|
|||
|
|
@ -1,33 +1,17 @@
|
|||
import logging
|
||||
from contextlib import asynccontextmanager
|
||||
|
||||
from apscheduler.schedulers.asyncio import AsyncIOScheduler
|
||||
from decouple import config
|
||||
from fastapi import FastAPI
|
||||
from chalicelib.utils import pg_client
|
||||
|
||||
from chalicelib.core import alerts_processor
|
||||
|
||||
app = FastAPI(root_path="/alerts", docs_url=config("docs_url", default=""), redoc_url=config("redoc_url", default=""))
|
||||
logging.info("============= ALERTS =============")
|
||||
from chalicelib.utils import pg_client
|
||||
|
||||
|
||||
@app.get("/")
|
||||
async def root():
|
||||
return {"status": "Running"}
|
||||
|
||||
|
||||
app.schedule = AsyncIOScheduler()
|
||||
|
||||
loglevel = config("LOGLEVEL", default=logging.INFO)
|
||||
print(f">Loglevel set to: {loglevel}")
|
||||
logging.basicConfig(level=loglevel)
|
||||
ap_logger = logging.getLogger('apscheduler')
|
||||
ap_logger.setLevel(loglevel)
|
||||
app.schedule = AsyncIOScheduler()
|
||||
|
||||
|
||||
@app.on_event("startup")
|
||||
async def startup():
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
# Startup
|
||||
logging.info(">>>>> starting up <<<<<")
|
||||
await pg_client.init()
|
||||
app.schedule.start()
|
||||
|
|
@ -39,24 +23,44 @@ async def startup():
|
|||
for job in app.schedule.get_jobs():
|
||||
ap_logger.info({"Name": str(job.id), "Run Frequency": str(job.trigger), "Next Run": str(job.next_run_time)})
|
||||
|
||||
# App listening
|
||||
yield
|
||||
|
||||
@app.on_event("shutdown")
|
||||
async def shutdown():
|
||||
# Shutdown
|
||||
logging.info(">>>>> shutting down <<<<<")
|
||||
app.schedule.shutdown(wait=False)
|
||||
await pg_client.terminate()
|
||||
|
||||
|
||||
@app.get('/private/shutdown', tags=["private"])
|
||||
async def stop_server():
|
||||
logging.info("Requested shutdown")
|
||||
await shutdown()
|
||||
import os, signal
|
||||
os.kill(1, signal.SIGTERM)
|
||||
app = FastAPI(root_path="/alerts", docs_url=config("docs_url", default=""), redoc_url=config("redoc_url", default=""),
|
||||
lifespan=lifespan)
|
||||
logging.info("============= ALERTS =============")
|
||||
|
||||
|
||||
@app.get("/")
|
||||
async def root():
|
||||
return {"status": "Running"}
|
||||
|
||||
|
||||
@app.get("/health")
|
||||
async def get_health_status():
|
||||
return {"data": {
|
||||
"health": True,
|
||||
"details": {"version": config("version_number", default="unknown")}
|
||||
}}
|
||||
|
||||
|
||||
app.schedule = AsyncIOScheduler()
|
||||
|
||||
loglevel = config("LOGLEVEL", default=logging.INFO)
|
||||
print(f">Loglevel set to: {loglevel}")
|
||||
logging.basicConfig(level=loglevel)
|
||||
ap_logger = logging.getLogger('apscheduler')
|
||||
ap_logger.setLevel(loglevel)
|
||||
app.schedule = AsyncIOScheduler()
|
||||
|
||||
if config("LOCAL_DEV", default=False, cast=bool):
|
||||
@app.get('/private/trigger', tags=["private"])
|
||||
@app.get('/trigger', tags=["private"])
|
||||
async def trigger_main_cron():
|
||||
logging.info("Triggering main cron")
|
||||
alerts_processor.process()
|
||||
|
|
|
|||
|
|
@ -116,7 +116,7 @@ def process_notifications(data):
|
|||
BATCH_SIZE = 200
|
||||
for t in full.keys():
|
||||
for i in range(0, len(full[t]), BATCH_SIZE):
|
||||
notifications_list = full[t][i:i + BATCH_SIZE]
|
||||
notifications_list = full[t][i:min(i + BATCH_SIZE, len(full[t]))]
|
||||
if notifications_list is None or len(notifications_list) == 0:
|
||||
break
|
||||
|
||||
|
|
|
|||
172
api/chalicelib/core/health.py
Normal file
172
api/chalicelib/core/health.py
Normal file
|
|
@ -0,0 +1,172 @@
|
|||
from urllib.parse import urlparse
|
||||
|
||||
import redis
|
||||
import requests
|
||||
from decouple import config
|
||||
|
||||
from chalicelib.utils import pg_client
|
||||
|
||||
if config("LOCAL_DEV", cast=bool, default=False):
|
||||
HEALTH_ENDPOINTS = {
|
||||
"alerts": "http://127.0.0.1:8888/metrics",
|
||||
"assets": "http://127.0.0.1:8888/metrics",
|
||||
"assist": "http://127.0.0.1:8888/metrics",
|
||||
"chalice": "http://127.0.0.1:8888/metrics",
|
||||
"db": "http://127.0.0.1:8888/metrics",
|
||||
"ender": "http://127.0.0.1:8888/metrics",
|
||||
"heuristics": "http://127.0.0.1:8888/metrics",
|
||||
"http": "http://127.0.0.1:8888/metrics",
|
||||
"ingress-nginx": "http://127.0.0.1:8888/metrics",
|
||||
"integrations": "http://127.0.0.1:8888/metrics",
|
||||
"peers": "http://127.0.0.1:8888/metrics",
|
||||
"quickwit": "http://127.0.0.1:8888/metrics",
|
||||
"sink": "http://127.0.0.1:8888/metrics",
|
||||
"sourcemapreader": "http://127.0.0.1:8888/metrics",
|
||||
"storage": "http://127.0.0.1:8888/metrics",
|
||||
"utilities": "http://127.0.0.1:8888/metrics"
|
||||
}
|
||||
|
||||
else:
|
||||
HEALTH_ENDPOINTS = {
|
||||
"alerts": "http://alerts-openreplay.app.svc.cluster.local:8888/health",
|
||||
"assets": "http://assets-openreplay.app.svc.cluster.local:8888/metrics",
|
||||
"assist": "http://assist-openreplay.app.svc.cluster.local:8888/health",
|
||||
"chalice": "http://chalice-openreplay.app.svc.cluster.local:8888/metrics",
|
||||
"db": "http://db-openreplay.app.svc.cluster.local:8888/metrics",
|
||||
"ender": "http://ender-openreplay.app.svc.cluster.local:8888/metrics",
|
||||
"heuristics": "http://heuristics-openreplay.app.svc.cluster.local:8888/metrics",
|
||||
"http": "http://http-openreplay.app.svc.cluster.local:8888/metrics",
|
||||
"ingress-nginx": "http://ingress-nginx-openreplay.app.svc.cluster.local:8888/metrics",
|
||||
"integrations": "http://integrations-openreplay.app.svc.cluster.local:8888/metrics",
|
||||
"peers": "http://peers-openreplay.app.svc.cluster.local:8888/health",
|
||||
"sink": "http://sink-openreplay.app.svc.cluster.local:8888/metrics",
|
||||
"sourcemapreader": "http://sourcemapreader-openreplay.app.svc.cluster.local:8888/health",
|
||||
"storage": "http://storage-openreplay.app.svc.cluster.local:8888/metrics",
|
||||
}
|
||||
|
||||
|
||||
def __check_database_pg():
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute("SHOW server_version;")
|
||||
server_version = cur.fetchone()
|
||||
cur.execute("SELECT openreplay_version() AS version;")
|
||||
schema_version = cur.fetchone()
|
||||
return {
|
||||
"health": True,
|
||||
"details": {
|
||||
"version": server_version["server_version"],
|
||||
"schema": schema_version["version"]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def __not_supported():
|
||||
return {"errors": ["not supported"]}
|
||||
|
||||
|
||||
def __always_healthy():
|
||||
return {
|
||||
"health": True,
|
||||
"details": {}
|
||||
}
|
||||
|
||||
|
||||
def __always_healthy_with_version():
|
||||
return {
|
||||
"health": True,
|
||||
"details": {"version": config("version_number", default="unknown")}
|
||||
}
|
||||
|
||||
|
||||
def __check_be_service(service_name):
|
||||
def fn():
|
||||
fail_response = {
|
||||
"health": False,
|
||||
"details": {
|
||||
"errors": ["server health-check failed"]
|
||||
}
|
||||
}
|
||||
try:
|
||||
results = requests.get(HEALTH_ENDPOINTS.get(service_name), timeout=2)
|
||||
if results.status_code != 200:
|
||||
print(f"!! issue with the storage-health code:{results.status_code}")
|
||||
print(results.text)
|
||||
fail_response["details"]["errors"].append(results.text)
|
||||
return fail_response
|
||||
except requests.exceptions.Timeout:
|
||||
print(f"!! Timeout getting {service_name}-health")
|
||||
fail_response["details"]["errors"].append("timeout")
|
||||
return fail_response
|
||||
except Exception as e:
|
||||
print("!! Issue getting storage-health response")
|
||||
print(str(e))
|
||||
try:
|
||||
print(results.text)
|
||||
fail_response["details"]["errors"].append(results.text)
|
||||
except:
|
||||
print("couldn't get response")
|
||||
fail_response["details"]["errors"].append(str(e))
|
||||
return fail_response
|
||||
return {
|
||||
"health": True,
|
||||
"details": {}
|
||||
}
|
||||
|
||||
return fn
|
||||
|
||||
|
||||
def __check_redis():
|
||||
fail_response = {
|
||||
"health": False,
|
||||
"details": {"errors": ["server health-check failed"]}
|
||||
}
|
||||
if config("REDIS_STRING", default=None) is None:
|
||||
fail_response["details"]["errors"].append("REDIS_STRING not defined in env-vars")
|
||||
return fail_response
|
||||
|
||||
try:
|
||||
u = urlparse(config("REDIS_STRING"))
|
||||
r = redis.Redis(host=u.hostname, port=u.port, socket_timeout=2)
|
||||
r.ping()
|
||||
except Exception as e:
|
||||
print("!! Issue getting redis-health response")
|
||||
print(str(e))
|
||||
fail_response["details"]["errors"].append(str(e))
|
||||
return fail_response
|
||||
|
||||
return {
|
||||
"health": True,
|
||||
"details": {"version": r.execute_command('INFO')['redis_version']}
|
||||
}
|
||||
|
||||
|
||||
def get_health():
|
||||
health_map = {
|
||||
"databases": {
|
||||
"postgres": __check_database_pg
|
||||
},
|
||||
"ingestionPipeline": {
|
||||
"redis": __check_redis
|
||||
},
|
||||
"backendServices": {
|
||||
"alerts": __check_be_service("alerts"),
|
||||
"assets": __check_be_service("assets"),
|
||||
"assist": __check_be_service("assist"),
|
||||
"chalice": __always_healthy_with_version,
|
||||
"db": __check_be_service("db"),
|
||||
"ender": __check_be_service("ender"),
|
||||
"frontend": __always_healthy,
|
||||
"heuristics": __check_be_service("heuristics"),
|
||||
"http": __check_be_service("http"),
|
||||
"ingress-nginx": __always_healthy,
|
||||
"integrations": __check_be_service("integrations"),
|
||||
"peers": __check_be_service("peers"),
|
||||
"sink": __check_be_service("sink"),
|
||||
"sourcemapreader": __check_be_service("sourcemapreader"),
|
||||
"storage": __check_be_service("storage")
|
||||
}
|
||||
}
|
||||
for parent_key in health_map.keys():
|
||||
for element_key in health_map[parent_key]:
|
||||
health_map[parent_key][element_key] = health_map[parent_key][element_key]()
|
||||
return health_map
|
||||
|
|
@ -1,10 +1,7 @@
|
|||
from typing import List
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import events, metadata, events_ios, \
|
||||
sessions_mobs, issues, projects, resources, assist, performance_event, sessions_favorite, \
|
||||
sessions_devtool, sessions_notes
|
||||
from chalicelib.utils import errors_helper
|
||||
from chalicelib.core import events, metadata, projects, performance_event, sessions_favorite
|
||||
from chalicelib.utils import pg_client, helper, metrics_helper
|
||||
from chalicelib.utils import sql_helper as sh
|
||||
|
||||
|
|
@ -33,89 +30,6 @@ COALESCE((SELECT TRUE
|
|||
AND fs.user_id = %(userId)s LIMIT 1), FALSE) AS viewed """
|
||||
|
||||
|
||||
def __group_metadata(session, project_metadata):
|
||||
meta = {}
|
||||
for m in project_metadata.keys():
|
||||
if project_metadata[m] is not None and session.get(m) is not None:
|
||||
meta[project_metadata[m]] = session[m]
|
||||
session.pop(m)
|
||||
return meta
|
||||
|
||||
|
||||
def get_by_id2_pg(project_id, session_id, context: schemas.CurrentContext, full_data=False, include_fav_viewed=False,
|
||||
group_metadata=False, live=True):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
extra_query = []
|
||||
if include_fav_viewed:
|
||||
extra_query.append("""COALESCE((SELECT TRUE
|
||||
FROM public.user_favorite_sessions AS fs
|
||||
WHERE s.session_id = fs.session_id
|
||||
AND fs.user_id = %(userId)s), FALSE) AS favorite""")
|
||||
extra_query.append("""COALESCE((SELECT TRUE
|
||||
FROM public.user_viewed_sessions AS fs
|
||||
WHERE s.session_id = fs.session_id
|
||||
AND fs.user_id = %(userId)s), FALSE) AS viewed""")
|
||||
query = cur.mogrify(
|
||||
f"""\
|
||||
SELECT
|
||||
s.*,
|
||||
s.session_id::text AS session_id,
|
||||
(SELECT project_key FROM public.projects WHERE project_id = %(project_id)s LIMIT 1) AS project_key
|
||||
{"," if len(extra_query) > 0 else ""}{",".join(extra_query)}
|
||||
{(",json_build_object(" + ",".join([f"'{m}',p.{m}" for m in metadata.column_names()]) + ") AS project_metadata") if group_metadata else ''}
|
||||
FROM public.sessions AS s {"INNER JOIN public.projects AS p USING (project_id)" if group_metadata else ""}
|
||||
WHERE s.project_id = %(project_id)s
|
||||
AND s.session_id = %(session_id)s;""",
|
||||
{"project_id": project_id, "session_id": session_id, "userId": context.user_id}
|
||||
)
|
||||
# print("===============")
|
||||
# print(query)
|
||||
cur.execute(query=query)
|
||||
|
||||
data = cur.fetchone()
|
||||
if data is not None:
|
||||
data = helper.dict_to_camel_case(data)
|
||||
if full_data:
|
||||
if data["platform"] == 'ios':
|
||||
data['events'] = events_ios.get_by_sessionId(project_id=project_id, session_id=session_id)
|
||||
for e in data['events']:
|
||||
if e["type"].endswith("_IOS"):
|
||||
e["type"] = e["type"][:-len("_IOS")]
|
||||
data['crashes'] = events_ios.get_crashes_by_session_id(session_id=session_id)
|
||||
data['userEvents'] = events_ios.get_customs_by_sessionId(project_id=project_id,
|
||||
session_id=session_id)
|
||||
data['mobsUrl'] = sessions_mobs.get_ios(session_id=session_id)
|
||||
else:
|
||||
data['events'] = events.get_by_session_id(project_id=project_id, session_id=session_id,
|
||||
group_clickrage=True)
|
||||
all_errors = events.get_errors_by_session_id(session_id=session_id, project_id=project_id)
|
||||
data['stackEvents'] = [e for e in all_errors if e['source'] != "js_exception"]
|
||||
# to keep only the first stack
|
||||
# limit the number of errors to reduce the response-body size
|
||||
data['errors'] = [errors_helper.format_first_stack_frame(e) for e in all_errors
|
||||
if e['source'] == "js_exception"][:500]
|
||||
data['userEvents'] = events.get_customs_by_session_id(project_id=project_id,
|
||||
session_id=session_id)
|
||||
data['domURL'] = sessions_mobs.get_urls(session_id=session_id, project_id=project_id)
|
||||
data['mobsUrl'] = sessions_mobs.get_urls_depercated(session_id=session_id)
|
||||
data['devtoolsURL'] = sessions_devtool.get_urls(session_id=session_id, project_id=project_id)
|
||||
data['resources'] = resources.get_by_session_id(session_id=session_id, project_id=project_id,
|
||||
start_ts=data["startTs"], duration=data["duration"])
|
||||
|
||||
data['notes'] = sessions_notes.get_session_notes(tenant_id=context.tenant_id, project_id=project_id,
|
||||
session_id=session_id, user_id=context.user_id)
|
||||
data['metadata'] = __group_metadata(project_metadata=data.pop("projectMetadata"), session=data)
|
||||
data['issues'] = issues.get_by_session_id(session_id=session_id, project_id=project_id)
|
||||
data['live'] = live and assist.is_live(project_id=project_id, session_id=session_id,
|
||||
project_key=data["projectKey"])
|
||||
data["inDB"] = True
|
||||
return data
|
||||
elif live:
|
||||
return assist.get_live_session_by_id(project_id=project_id, session_id=session_id)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
# This function executes the query and return result
|
||||
def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_id, errors_only=False,
|
||||
error_status=schemas.ErrorStatus.all, count_only=False, issue=None, ids_only=False):
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
import schemas
|
||||
from chalicelib.core import sessions
|
||||
from chalicelib.utils import pg_client
|
||||
|
||||
|
||||
|
|
@ -8,11 +7,14 @@ def add_favorite_session(context: schemas.CurrentContext, project_id, session_id
|
|||
cur.execute(
|
||||
cur.mogrify(f"""\
|
||||
INSERT INTO public.user_favorite_sessions(user_id, session_id)
|
||||
VALUES (%(userId)s,%(session_id)s);""",
|
||||
VALUES (%(userId)s,%(session_id)s)
|
||||
RETURNING session_id;""",
|
||||
{"userId": context.user_id, "session_id": session_id})
|
||||
)
|
||||
return sessions.get_by_id2_pg(context=context, project_id=project_id, session_id=session_id,
|
||||
full_data=False, include_fav_viewed=True)
|
||||
row = cur.fetchone()
|
||||
if row:
|
||||
return {"data": {"sessionId": session_id}}
|
||||
return {"errors": ["something went wrong"]}
|
||||
|
||||
|
||||
def remove_favorite_session(context: schemas.CurrentContext, project_id, session_id):
|
||||
|
|
@ -21,11 +23,14 @@ def remove_favorite_session(context: schemas.CurrentContext, project_id, session
|
|||
cur.mogrify(f"""\
|
||||
DELETE FROM public.user_favorite_sessions
|
||||
WHERE user_id = %(userId)s
|
||||
AND session_id = %(session_id)s;""",
|
||||
AND session_id = %(session_id)s
|
||||
RETURNING session_id;""",
|
||||
{"userId": context.user_id, "session_id": session_id})
|
||||
)
|
||||
return sessions.get_by_id2_pg(context=context, project_id=project_id, session_id=session_id,
|
||||
full_data=False, include_fav_viewed=True)
|
||||
row = cur.fetchone()
|
||||
if row:
|
||||
return {"data": {"sessionId": session_id}}
|
||||
return {"errors": ["something went wrong"]}
|
||||
|
||||
|
||||
def favorite_session(context: schemas.CurrentContext, project_id, session_id):
|
||||
|
|
|
|||
186
api/chalicelib/core/sessions_replay.py
Normal file
186
api/chalicelib/core/sessions_replay.py
Normal file
|
|
@ -0,0 +1,186 @@
|
|||
import schemas
|
||||
from chalicelib.core import events, metadata, events_ios, \
|
||||
sessions_mobs, issues, resources, assist, sessions_devtool, sessions_notes
|
||||
from chalicelib.utils import errors_helper
|
||||
from chalicelib.utils import pg_client, helper
|
||||
|
||||
|
||||
def __group_metadata(session, project_metadata):
|
||||
meta = {}
|
||||
for m in project_metadata.keys():
|
||||
if project_metadata[m] is not None and session.get(m) is not None:
|
||||
meta[project_metadata[m]] = session[m]
|
||||
session.pop(m)
|
||||
return meta
|
||||
|
||||
|
||||
# for backward compatibility
|
||||
def get_by_id2_pg(project_id, session_id, context: schemas.CurrentContext, full_data=False, include_fav_viewed=False,
|
||||
group_metadata=False, live=True):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
extra_query = []
|
||||
if include_fav_viewed:
|
||||
extra_query.append("""COALESCE((SELECT TRUE
|
||||
FROM public.user_favorite_sessions AS fs
|
||||
WHERE s.session_id = fs.session_id
|
||||
AND fs.user_id = %(userId)s), FALSE) AS favorite""")
|
||||
extra_query.append("""COALESCE((SELECT TRUE
|
||||
FROM public.user_viewed_sessions AS fs
|
||||
WHERE s.session_id = fs.session_id
|
||||
AND fs.user_id = %(userId)s), FALSE) AS viewed""")
|
||||
query = cur.mogrify(
|
||||
f"""\
|
||||
SELECT
|
||||
s.*,
|
||||
s.session_id::text AS session_id,
|
||||
(SELECT project_key FROM public.projects WHERE project_id = %(project_id)s LIMIT 1) AS project_key
|
||||
{"," if len(extra_query) > 0 else ""}{",".join(extra_query)}
|
||||
{(",json_build_object(" + ",".join([f"'{m}',p.{m}" for m in metadata.column_names()]) + ") AS project_metadata") if group_metadata else ''}
|
||||
FROM public.sessions AS s {"INNER JOIN public.projects AS p USING (project_id)" if group_metadata else ""}
|
||||
WHERE s.project_id = %(project_id)s
|
||||
AND s.session_id = %(session_id)s;""",
|
||||
{"project_id": project_id, "session_id": session_id, "userId": context.user_id}
|
||||
)
|
||||
# print("===============")
|
||||
# print(query)
|
||||
cur.execute(query=query)
|
||||
|
||||
data = cur.fetchone()
|
||||
if data is not None:
|
||||
data = helper.dict_to_camel_case(data)
|
||||
if full_data:
|
||||
if data["platform"] == 'ios':
|
||||
data['events'] = events_ios.get_by_sessionId(project_id=project_id, session_id=session_id)
|
||||
for e in data['events']:
|
||||
if e["type"].endswith("_IOS"):
|
||||
e["type"] = e["type"][:-len("_IOS")]
|
||||
data['crashes'] = events_ios.get_crashes_by_session_id(session_id=session_id)
|
||||
data['userEvents'] = events_ios.get_customs_by_sessionId(project_id=project_id,
|
||||
session_id=session_id)
|
||||
data['mobsUrl'] = sessions_mobs.get_ios(session_id=session_id)
|
||||
else:
|
||||
data['events'] = events.get_by_session_id(project_id=project_id, session_id=session_id,
|
||||
group_clickrage=True)
|
||||
all_errors = events.get_errors_by_session_id(session_id=session_id, project_id=project_id)
|
||||
data['stackEvents'] = [e for e in all_errors if e['source'] != "js_exception"]
|
||||
# to keep only the first stack
|
||||
# limit the number of errors to reduce the response-body size
|
||||
data['errors'] = [errors_helper.format_first_stack_frame(e) for e in all_errors
|
||||
if e['source'] == "js_exception"][:500]
|
||||
data['userEvents'] = events.get_customs_by_session_id(project_id=project_id,
|
||||
session_id=session_id)
|
||||
data['domURL'] = sessions_mobs.get_urls(session_id=session_id, project_id=project_id)
|
||||
data['mobsUrl'] = sessions_mobs.get_urls_depercated(session_id=session_id)
|
||||
data['devtoolsURL'] = sessions_devtool.get_urls(session_id=session_id, project_id=project_id)
|
||||
data['resources'] = resources.get_by_session_id(session_id=session_id, project_id=project_id,
|
||||
start_ts=data["startTs"], duration=data["duration"])
|
||||
|
||||
data['notes'] = sessions_notes.get_session_notes(tenant_id=context.tenant_id, project_id=project_id,
|
||||
session_id=session_id, user_id=context.user_id)
|
||||
data['metadata'] = __group_metadata(project_metadata=data.pop("projectMetadata"), session=data)
|
||||
data['issues'] = issues.get_by_session_id(session_id=session_id, project_id=project_id)
|
||||
data['live'] = live and assist.is_live(project_id=project_id, session_id=session_id,
|
||||
project_key=data["projectKey"])
|
||||
data["inDB"] = True
|
||||
return data
|
||||
elif live:
|
||||
return assist.get_live_session_by_id(project_id=project_id, session_id=session_id)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def get_replay(project_id, session_id, context: schemas.CurrentContext, full_data=False, include_fav_viewed=False,
|
||||
group_metadata=False, live=True):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
extra_query = []
|
||||
if include_fav_viewed:
|
||||
extra_query.append("""COALESCE((SELECT TRUE
|
||||
FROM public.user_favorite_sessions AS fs
|
||||
WHERE s.session_id = fs.session_id
|
||||
AND fs.user_id = %(userId)s), FALSE) AS favorite""")
|
||||
extra_query.append("""COALESCE((SELECT TRUE
|
||||
FROM public.user_viewed_sessions AS fs
|
||||
WHERE s.session_id = fs.session_id
|
||||
AND fs.user_id = %(userId)s), FALSE) AS viewed""")
|
||||
query = cur.mogrify(
|
||||
f"""\
|
||||
SELECT
|
||||
s.*,
|
||||
s.session_id::text AS session_id,
|
||||
(SELECT project_key FROM public.projects WHERE project_id = %(project_id)s LIMIT 1) AS project_key
|
||||
{"," if len(extra_query) > 0 else ""}{",".join(extra_query)}
|
||||
{(",json_build_object(" + ",".join([f"'{m}',p.{m}" for m in metadata.column_names()]) + ") AS project_metadata") if group_metadata else ''}
|
||||
FROM public.sessions AS s {"INNER JOIN public.projects AS p USING (project_id)" if group_metadata else ""}
|
||||
WHERE s.project_id = %(project_id)s
|
||||
AND s.session_id = %(session_id)s;""",
|
||||
{"project_id": project_id, "session_id": session_id, "userId": context.user_id}
|
||||
)
|
||||
# print("===============")
|
||||
# print(query)
|
||||
cur.execute(query=query)
|
||||
|
||||
data = cur.fetchone()
|
||||
if data is not None:
|
||||
data = helper.dict_to_camel_case(data)
|
||||
if full_data:
|
||||
if data["platform"] == 'ios':
|
||||
data['mobsUrl'] = sessions_mobs.get_ios(session_id=session_id)
|
||||
else:
|
||||
data['domURL'] = sessions_mobs.get_urls(session_id=session_id, project_id=project_id)
|
||||
data['mobsUrl'] = sessions_mobs.get_urls_depercated(session_id=session_id)
|
||||
data['devtoolsURL'] = sessions_devtool.get_urls(session_id=session_id, project_id=project_id)
|
||||
|
||||
data['metadata'] = __group_metadata(project_metadata=data.pop("projectMetadata"), session=data)
|
||||
data['live'] = live and assist.is_live(project_id=project_id, session_id=session_id,
|
||||
project_key=data["projectKey"])
|
||||
data["inDB"] = True
|
||||
return data
|
||||
elif live:
|
||||
return assist.get_live_session_by_id(project_id=project_id, session_id=session_id)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def get_events(project_id, session_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
f"""SELECT session_id, platform, start_ts, duration
|
||||
FROM public.sessions AS s
|
||||
WHERE s.project_id = %(project_id)s
|
||||
AND s.session_id = %(session_id)s;""",
|
||||
{"project_id": project_id, "session_id": session_id}
|
||||
)
|
||||
# print("===============")
|
||||
# print(query)
|
||||
cur.execute(query=query)
|
||||
|
||||
s_data = cur.fetchone()
|
||||
if s_data is not None:
|
||||
s_data = helper.dict_to_camel_case(s_data)
|
||||
data = {}
|
||||
if s_data["platform"] == 'ios':
|
||||
data['events'] = events_ios.get_by_sessionId(project_id=project_id, session_id=session_id)
|
||||
for e in data['events']:
|
||||
if e["type"].endswith("_IOS"):
|
||||
e["type"] = e["type"][:-len("_IOS")]
|
||||
data['crashes'] = events_ios.get_crashes_by_session_id(session_id=session_id)
|
||||
data['userEvents'] = events_ios.get_customs_by_sessionId(project_id=project_id,
|
||||
session_id=session_id)
|
||||
else:
|
||||
data['events'] = events.get_by_session_id(project_id=project_id, session_id=session_id,
|
||||
group_clickrage=True)
|
||||
all_errors = events.get_errors_by_session_id(session_id=session_id, project_id=project_id)
|
||||
data['stackEvents'] = [e for e in all_errors if e['source'] != "js_exception"]
|
||||
# to keep only the first stack
|
||||
# limit the number of errors to reduce the response-body size
|
||||
data['errors'] = [errors_helper.format_first_stack_frame(e) for e in all_errors
|
||||
if e['source'] == "js_exception"][:500]
|
||||
data['userEvents'] = events.get_customs_by_session_id(project_id=project_id,
|
||||
session_id=session_id)
|
||||
data['resources'] = resources.get_by_session_id(session_id=session_id, project_id=project_id,
|
||||
start_ts=s_data["startTs"], duration=s_data["duration"])
|
||||
|
||||
data['issues'] = issues.get_by_session_id(session_id=session_id, project_id=project_id)
|
||||
return data
|
||||
else:
|
||||
return None
|
||||
|
|
@ -68,7 +68,7 @@ def update(tenant_id, user_id, data: schemas.UpdateTenantSchema):
|
|||
return edit_client(tenant_id=tenant_id, changes=changes)
|
||||
|
||||
|
||||
def tenants_exists():
|
||||
with pg_client.PostgresClient() as cur:
|
||||
def tenants_exists(use_pool=True):
|
||||
with pg_client.PostgresClient(use_pool=use_pool) as cur:
|
||||
cur.execute(f"SELECT EXISTS(SELECT 1 FROM public.tenants)")
|
||||
return cur.fetchone()["exists"]
|
||||
|
|
|
|||
|
|
@ -87,9 +87,10 @@ class PostgresClient:
|
|||
long_query = False
|
||||
unlimited_query = False
|
||||
|
||||
def __init__(self, long_query=False, unlimited_query=False):
|
||||
def __init__(self, long_query=False, unlimited_query=False, use_pool=True):
|
||||
self.long_query = long_query
|
||||
self.unlimited_query = unlimited_query
|
||||
self.use_pool = use_pool
|
||||
if unlimited_query:
|
||||
long_config = dict(_PG_CONFIG)
|
||||
long_config["application_name"] += "-UNLIMITED"
|
||||
|
|
@ -100,7 +101,7 @@ class PostgresClient:
|
|||
long_config["options"] = f"-c statement_timeout=" \
|
||||
f"{config('pg_long_timeout', cast=int, default=5 * 60) * 1000}"
|
||||
self.connection = psycopg2.connect(**long_config)
|
||||
elif not config('PG_POOL', cast=bool, default=True):
|
||||
elif not use_pool or not config('PG_POOL', cast=bool, default=True):
|
||||
single_config = dict(_PG_CONFIG)
|
||||
single_config["application_name"] += "-NOPOOL"
|
||||
single_config["options"] = f"-c statement_timeout={config('PG_TIMEOUT', cast=int, default=30) * 1000}"
|
||||
|
|
@ -120,11 +121,12 @@ class PostgresClient:
|
|||
try:
|
||||
self.connection.commit()
|
||||
self.cursor.close()
|
||||
if self.long_query or self.unlimited_query:
|
||||
if not self.use_pool or self.long_query or self.unlimited_query:
|
||||
self.connection.close()
|
||||
except Exception as error:
|
||||
logging.error("Error while committing/closing PG-connection", error)
|
||||
if str(error) == "connection already closed" \
|
||||
and self.use_pool \
|
||||
and not self.long_query \
|
||||
and not self.unlimited_query \
|
||||
and config('PG_POOL', cast=bool, default=True):
|
||||
|
|
@ -134,6 +136,7 @@ class PostgresClient:
|
|||
raise error
|
||||
finally:
|
||||
if config('PG_POOL', cast=bool, default=True) \
|
||||
and self.use_pool \
|
||||
and not self.long_query \
|
||||
and not self.unlimited_query:
|
||||
postgreSQL_pool.putconn(self.connection)
|
||||
|
|
|
|||
|
|
@ -1,3 +1,3 @@
|
|||
#!/bin/sh
|
||||
|
||||
uvicorn app:app --host 0.0.0.0 --port $LISTEN_PORT --reload --proxy-headers
|
||||
uvicorn app:app --host 0.0.0.0 --port $LISTEN_PORT --proxy-headers
|
||||
|
|
|
|||
|
|
@ -1,3 +1,3 @@
|
|||
#!/bin/sh
|
||||
export ASSIST_KEY=ignore
|
||||
uvicorn app:app --host 0.0.0.0 --port $LISTEN_PORT --reload
|
||||
uvicorn app:app --host 0.0.0.0 --port 8888
|
||||
|
|
|
|||
|
|
@ -52,4 +52,4 @@ PRESIGNED_URL_EXPIRATION=3600
|
|||
ASSIST_JWT_EXPIRATION=144000
|
||||
ASSIST_JWT_SECRET=
|
||||
PYTHONUNBUFFERED=1
|
||||
THUMBNAILS_BUCKET=thumbnails
|
||||
REDIS_STRING=redis://redis-master.db.svc.cluster.local:6379
|
||||
|
|
@ -8,7 +8,7 @@ jira==3.4.1
|
|||
|
||||
|
||||
|
||||
fastapi==0.92.0
|
||||
fastapi==0.94.1
|
||||
uvicorn[standard]==0.20.0
|
||||
python-decouple==3.7
|
||||
pydantic[email]==1.10.4
|
||||
|
|
|
|||
|
|
@ -8,8 +8,10 @@ jira==3.4.1
|
|||
|
||||
|
||||
|
||||
fastapi==0.92.0
|
||||
fastapi==0.95.0
|
||||
uvicorn[standard]==0.20.0
|
||||
python-decouple==3.7
|
||||
pydantic[email]==1.10.4
|
||||
apscheduler==3.10.0
|
||||
|
||||
redis==4.5.1
|
||||
|
|
@ -6,7 +6,7 @@ from starlette.responses import RedirectResponse, FileResponse
|
|||
|
||||
import schemas
|
||||
from chalicelib.core import sessions, errors, errors_viewed, errors_favorite, sessions_assignments, heatmaps, \
|
||||
sessions_favorite, assist, sessions_notes, click_maps
|
||||
sessions_favorite, assist, sessions_notes, click_maps, sessions_replay
|
||||
from chalicelib.core import sessions_viewed
|
||||
from chalicelib.core import tenants, users, projects, license
|
||||
from chalicelib.core import webhook
|
||||
|
|
@ -145,13 +145,14 @@ async def get_projects(context: schemas.CurrentContext = Depends(OR_context)):
|
|||
stack_integrations=True)}
|
||||
|
||||
|
||||
@app.get('/{projectId}/sessions/{sessionId}', tags=["sessions"])
|
||||
# for backward compatibility
|
||||
@app.get('/{projectId}/sessions/{sessionId}', tags=["sessions", "replay"])
|
||||
async def get_session(projectId: int, sessionId: Union[int, str], background_tasks: BackgroundTasks,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
if isinstance(sessionId, str):
|
||||
return {"errors": ["session not found"]}
|
||||
data = sessions.get_by_id2_pg(project_id=projectId, session_id=sessionId, full_data=True,
|
||||
include_fav_viewed=True, group_metadata=True, context=context)
|
||||
data = sessions_replay.get_by_id2_pg(project_id=projectId, session_id=sessionId, full_data=True,
|
||||
include_fav_viewed=True, group_metadata=True, context=context)
|
||||
if data is None:
|
||||
return {"errors": ["session not found"]}
|
||||
if data.get("inDB"):
|
||||
|
|
@ -162,6 +163,37 @@ async def get_session(projectId: int, sessionId: Union[int, str], background_tas
|
|||
}
|
||||
|
||||
|
||||
@app.get('/{projectId}/sessions/{sessionId}/replay', tags=["sessions", "replay"])
|
||||
async def get_session_events(projectId: int, sessionId: Union[int, str], background_tasks: BackgroundTasks,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
if isinstance(sessionId, str):
|
||||
return {"errors": ["session not found"]}
|
||||
data = sessions_replay.get_replay(project_id=projectId, session_id=sessionId, full_data=True,
|
||||
include_fav_viewed=True, group_metadata=True, context=context)
|
||||
if data is None:
|
||||
return {"errors": ["session not found"]}
|
||||
if data.get("inDB"):
|
||||
background_tasks.add_task(sessions_viewed.view_session, project_id=projectId, user_id=context.user_id,
|
||||
session_id=sessionId)
|
||||
return {
|
||||
'data': data
|
||||
}
|
||||
|
||||
|
||||
@app.get('/{projectId}/sessions/{sessionId}/events', tags=["sessions", "replay"])
|
||||
async def get_session_events(projectId: int, sessionId: Union[int, str],
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
if isinstance(sessionId, str):
|
||||
return {"errors": ["session not found"]}
|
||||
data = sessions_replay.get_events(project_id=projectId, session_id=sessionId)
|
||||
if data is None:
|
||||
return {"errors": ["session not found"]}
|
||||
|
||||
return {
|
||||
'data': data
|
||||
}
|
||||
|
||||
|
||||
@app.get('/{projectId}/sessions/{sessionId}/errors/{errorId}/sourcemaps', tags=["sessions", "sourcemaps"])
|
||||
async def get_error_trace(projectId: int, sessionId: int, errorId: str,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
|
|
@ -239,8 +271,8 @@ async def get_live_session(projectId: int, sessionId: str, background_tasks: Bac
|
|||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = assist.get_live_session_by_id(project_id=projectId, session_id=sessionId)
|
||||
if data is None:
|
||||
data = sessions.get_by_id2_pg(context=context, project_id=projectId, session_id=sessionId,
|
||||
full_data=True, include_fav_viewed=True, group_metadata=True, live=False)
|
||||
data = sessions_replay.get_replay(context=context, project_id=projectId, session_id=sessionId,
|
||||
full_data=True, include_fav_viewed=True, group_metadata=True, live=False)
|
||||
if data is None:
|
||||
return {"errors": ["session not found"]}
|
||||
if data.get("inDB"):
|
||||
|
|
|
|||
14
api/routers/subs/health.py
Normal file
14
api/routers/subs/health.py
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
from chalicelib.core import health, tenants
|
||||
from routers.base import get_routers
|
||||
|
||||
public_app, app, app_apikey = get_routers()
|
||||
|
||||
health_router = public_app
|
||||
|
||||
if tenants.tenants_exists(use_pool=False):
|
||||
health_router = app
|
||||
|
||||
|
||||
@health_router.get('/health', tags=["health-check"])
|
||||
def get_global_health_status():
|
||||
return {"data": health.get_health()}
|
||||
|
|
@ -1,3 +1,3 @@
|
|||
#!/bin/zsh
|
||||
|
||||
uvicorn app_alerts:app --reload
|
||||
uvicorn app_alerts:app --reload --port 8888
|
||||
0
utilities/.gitignore → assist/.gitignore
vendored
0
utilities/.gitignore → assist/.gitignore
vendored
|
|
@ -18,4 +18,4 @@ USER 1001
|
|||
ADD --chown=1001 https://static.openreplay.com/geoip/GeoLite2-Country.mmdb $MAXMINDDB_FILE
|
||||
|
||||
ENTRYPOINT ["/sbin/tini", "--"]
|
||||
CMD npm start
|
||||
CMD npm start
|
||||
|
|
@ -35,20 +35,20 @@ update_helm_release() {
|
|||
}
|
||||
|
||||
function build_api(){
|
||||
destination="_utilities"
|
||||
destination="_assist"
|
||||
[[ $1 == "ee" ]] && {
|
||||
destination="_utilities_ee"
|
||||
destination="_assist_ee"
|
||||
}
|
||||
cp -R ../utilities ../${destination}
|
||||
cp -R ../assist ../${destination}
|
||||
cd ../${destination}
|
||||
|
||||
# Copy enterprise code
|
||||
[[ $1 == "ee" ]] && {
|
||||
cp -rf ../ee/utilities/* ./
|
||||
cp -rf ../ee/assist/* ./
|
||||
}
|
||||
docker build -f ./Dockerfile --build-arg GIT_SHA=$git_sha -t ${DOCKER_REPO:-'local'}/assist:${image_tag} .
|
||||
|
||||
cd ../utilities
|
||||
cd ../assist
|
||||
rm -rf ../${destination}
|
||||
[[ $PUSH_IMAGE -eq 1 ]] && {
|
||||
docker push ${DOCKER_REPO:-'local'}/assist:${image_tag}
|
||||
|
|
@ -45,9 +45,9 @@
|
|||
}
|
||||
},
|
||||
"node_modules/@types/node": {
|
||||
"version": "18.14.1",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.14.1.tgz",
|
||||
"integrity": "sha512-QH+37Qds3E0eDlReeboBxfHbX9omAcBCXEzswCu6jySP642jiM3cYSIkU/REqwhCUqXdonHFuBfJDiAJxMNhaQ=="
|
||||
"version": "18.14.6",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.14.6.tgz",
|
||||
"integrity": "sha512-93+VvleD3mXwlLI/xASjw0FzKcwzl3OdTCzm1LaRfqgS21gfFtK3zDXM5Op9TeeMsJVOaJ2VRDpT9q4Y3d0AvA=="
|
||||
},
|
||||
"node_modules/accepts": {
|
||||
"version": "1.3.8",
|
||||
|
|
@ -987,9 +987,9 @@
|
|||
}
|
||||
},
|
||||
"node_modules/ua-parser-js": {
|
||||
"version": "1.0.33",
|
||||
"resolved": "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-1.0.33.tgz",
|
||||
"integrity": "sha512-RqshF7TPTE0XLYAqmjlu5cLLuGdKrNu9O1KLA/qp39QtbZwuzwv1dT46DZSopoUMsYgXpB3Cv8a03FI8b74oFQ==",
|
||||
"version": "1.0.34",
|
||||
"resolved": "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-1.0.34.tgz",
|
||||
"integrity": "sha512-K9mwJm/DaB6mRLZfw6q8IMXipcrmuT6yfhYmwhAkuh+81sChuYstYA+znlgaflUPaYUa3odxKPKGw6Vw/lANew==",
|
||||
"funding": [
|
||||
{
|
||||
"type": "opencollective",
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "assist-server",
|
||||
"version": "1.0.0",
|
||||
"version": "v1.11.0",
|
||||
"description": "assist server to get live sessions & sourcemaps reader to get stack trace",
|
||||
"main": "peerjs-server.js",
|
||||
"scripts": {
|
||||
|
|
@ -2,6 +2,7 @@ const dumps = require('./utils/HeapSnapshot');
|
|||
const express = require('express');
|
||||
const socket = require("./servers/websocket");
|
||||
const {request_logger} = require("./utils/helper");
|
||||
const health = require("./utils/health");
|
||||
const assert = require('assert').strict;
|
||||
|
||||
const debug = process.env.debug === "1";
|
||||
|
|
@ -10,7 +11,7 @@ const HOST = process.env.LISTEN_HOST || '0.0.0.0';
|
|||
const PORT = process.env.LISTEN_PORT || 9001;
|
||||
assert.ok(process.env.ASSIST_KEY, 'The "ASSIST_KEY" environment variable is required');
|
||||
const P_KEY = process.env.ASSIST_KEY;
|
||||
const PREFIX = process.env.PREFIX || process.env.prefix || `/assist`
|
||||
const PREFIX = process.env.PREFIX || process.env.prefix || `/assist`;
|
||||
|
||||
const wsapp = express();
|
||||
wsapp.use(express.json());
|
||||
|
|
@ -27,16 +28,9 @@ heapdump && wsapp.use(`${PREFIX}/${P_KEY}/heapdump`, dumps.router);
|
|||
|
||||
const wsserver = wsapp.listen(PORT, HOST, () => {
|
||||
console.log(`WS App listening on http://${HOST}:${PORT}`);
|
||||
console.log('Press Ctrl+C to quit.');
|
||||
health.healthApp.listen(health.PORT, HOST, health.listen_cb);
|
||||
});
|
||||
|
||||
wsapp.enable('trust proxy');
|
||||
socket.start(wsserver);
|
||||
module.exports = {wsserver};
|
||||
|
||||
wsapp.get('/private/shutdown', (req, res) => {
|
||||
console.log("Requested shutdown");
|
||||
res.statusCode = 200;
|
||||
res.end("ok!");
|
||||
process.kill(1, "SIGTERM");
|
||||
}
|
||||
);
|
||||
module.exports = {wsserver};
|
||||
|
|
@ -26,7 +26,7 @@ const debug = process.env.debug === "1";
|
|||
|
||||
const createSocketIOServer = function (server, prefix) {
|
||||
io = _io(server, {
|
||||
maxHttpBufferSize: (parseInt(process.env.maxHttpBufferSize) || 5) * 1e6,
|
||||
maxHttpBufferSize: (parseFloat(process.env.maxHttpBufferSize) || 5) * 1e6,
|
||||
cors: {
|
||||
origin: "*",
|
||||
methods: ["GET", "POST", "PUT"]
|
||||
|
|
@ -45,7 +45,22 @@ const respond = function (res, data) {
|
|||
res.setHeader('Content-Type', 'application/json');
|
||||
res.end(JSON.stringify({"data": data}));
|
||||
}
|
||||
|
||||
const countSessions = async function () {
|
||||
let count = 0;
|
||||
try {
|
||||
const arr = Array.from(io.sockets.adapter.rooms);
|
||||
const filtered = arr.filter(room => !room[1].has(room[0]));
|
||||
for (let i of filtered) {
|
||||
let {projectKey, sessionId} = extractPeerId(i[0]);
|
||||
if (projectKey !== null && sessionId !== null) {
|
||||
count++;
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
}
|
||||
return count;
|
||||
}
|
||||
const socketsList = async function (req, res) {
|
||||
debug && console.log("[WS]looking for all available sessions");
|
||||
let filters = extractPayloadFromRequest(req);
|
||||
|
|
@ -360,6 +375,7 @@ module.exports = {
|
|||
|
||||
socketConnexionTimeout(io);
|
||||
},
|
||||
countSessions,
|
||||
handlers: {
|
||||
socketsList,
|
||||
socketsListByProject,
|
||||
54
assist/utils/health.js
Normal file
54
assist/utils/health.js
Normal file
|
|
@ -0,0 +1,54 @@
|
|||
const express = require('express');
|
||||
const socket = require("../servers/websocket");
|
||||
const HOST = process.env.LISTEN_HOST || '0.0.0.0';
|
||||
const PORT = process.env.HEALTH_PORT || 8888;
|
||||
|
||||
|
||||
const {request_logger} = require("./helper");
|
||||
const debug = process.env.debug === "1";
|
||||
const respond = function (res, data) {
|
||||
res.statusCode = 200;
|
||||
res.setHeader('Content-Type', 'application/json');
|
||||
res.end(JSON.stringify({"data": data}));
|
||||
}
|
||||
|
||||
const check_health = async function (req, res) {
|
||||
debug && console.log("[WS]looking for all available sessions");
|
||||
respond(res, {
|
||||
"health": true,
|
||||
"details": {
|
||||
"version": process.env.npm_package_version,
|
||||
"connectedSessions": await socket.countSessions()
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
const healthApp = express();
|
||||
healthApp.use(express.json());
|
||||
healthApp.use(express.urlencoded({extended: true}));
|
||||
healthApp.use(request_logger("[healthApp]"));
|
||||
healthApp.get(['/'], (req, res) => {
|
||||
res.statusCode = 200;
|
||||
res.end("healthApp ok!");
|
||||
}
|
||||
);
|
||||
healthApp.get('/health', check_health);
|
||||
healthApp.get('/shutdown', (req, res) => {
|
||||
console.log("Requested shutdown");
|
||||
res.statusCode = 200;
|
||||
res.end("ok!");
|
||||
process.kill(1, "SIGTERM");
|
||||
}
|
||||
);
|
||||
|
||||
const listen_cb = async function () {
|
||||
console.log(`Health App listening on http://${HOST}:${PORT}`);
|
||||
console.log('Press Ctrl+C to quit.');
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
healthApp,
|
||||
PORT,
|
||||
listen_cb
|
||||
};
|
||||
4
ee/api/.gitignore
vendored
4
ee/api/.gitignore
vendored
|
|
@ -215,6 +215,7 @@ Pipfile.lock
|
|||
/chalicelib/core/log_tool_sumologic.py
|
||||
/chalicelib/core/metadata.py
|
||||
/chalicelib/core/mobile.py
|
||||
/chalicelib/core/sessions.py
|
||||
/chalicelib/core/sessions_assignments.py
|
||||
#exp /chalicelib/core/sessions_metas.py
|
||||
/chalicelib/core/sessions_mobs.py
|
||||
|
|
@ -264,5 +265,8 @@ Pipfile.lock
|
|||
/app_alerts.py
|
||||
/build_alerts.sh
|
||||
/build_crons.sh
|
||||
/run-dev.sh
|
||||
/run-alerts-dev.sh
|
||||
/routers/subs/health.py
|
||||
/routers/subs/v1_api.py
|
||||
#exp /chalicelib/core/dashboards.py
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
import logging
|
||||
import queue
|
||||
from contextlib import asynccontextmanager
|
||||
|
||||
from apscheduler.schedulers.asyncio import AsyncIOScheduler
|
||||
from decouple import config
|
||||
|
|
@ -10,17 +11,54 @@ from starlette import status
|
|||
from starlette.responses import StreamingResponse, JSONResponse
|
||||
|
||||
from chalicelib.core import traces
|
||||
from chalicelib.utils import events_queue
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils import events_queue
|
||||
from routers import core, core_dynamic, ee, saml
|
||||
from routers.crons import core_crons
|
||||
from routers.crons import core_dynamic_crons
|
||||
from routers.crons import ee_crons
|
||||
from routers.subs import insights, metrics, v1_api_ee
|
||||
from routers.subs import v1_api
|
||||
from routers.subs import v1_api, health
|
||||
|
||||
app = FastAPI(root_path="/api", docs_url=config("docs_url", default=""), redoc_url=config("redoc_url", default=""))
|
||||
loglevel = config("LOGLEVEL", default=logging.INFO)
|
||||
print(f">Loglevel set to: {loglevel}")
|
||||
logging.basicConfig(level=loglevel)
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
# Startup
|
||||
logging.info(">>>>> starting up <<<<<")
|
||||
ap_logger = logging.getLogger('apscheduler')
|
||||
ap_logger.setLevel(loglevel)
|
||||
|
||||
app.schedule = AsyncIOScheduler()
|
||||
app.queue_system = queue.Queue()
|
||||
await pg_client.init()
|
||||
await events_queue.init()
|
||||
app.schedule.start()
|
||||
|
||||
for job in core_crons.cron_jobs + core_dynamic_crons.cron_jobs + traces.cron_jobs + ee_crons.ee_cron_jobs:
|
||||
app.schedule.add_job(id=job["func"].__name__, **job)
|
||||
|
||||
ap_logger.info(">Scheduled jobs:")
|
||||
for job in app.schedule.get_jobs():
|
||||
ap_logger.info({"Name": str(job.id), "Run Frequency": str(job.trigger), "Next Run": str(job.next_run_time)})
|
||||
|
||||
# App listening
|
||||
yield
|
||||
|
||||
# Shutdown
|
||||
logging.info(">>>>> shutting down <<<<<")
|
||||
app.schedule.shutdown(wait=True)
|
||||
await traces.process_traces_queue()
|
||||
await events_queue.terminate()
|
||||
await pg_client.terminate()
|
||||
|
||||
|
||||
app = FastAPI(root_path="/api", docs_url=config("docs_url", default=""), redoc_url=config("redoc_url", default=""),
|
||||
lifespan=lifespan)
|
||||
app.add_middleware(GZipMiddleware, minimum_size=1000)
|
||||
|
||||
|
||||
|
|
@ -68,43 +106,6 @@ app.include_router(metrics.app)
|
|||
app.include_router(insights.app)
|
||||
app.include_router(v1_api.app_apikey)
|
||||
app.include_router(v1_api_ee.app_apikey)
|
||||
|
||||
loglevel = config("LOGLEVEL", default=logging.INFO)
|
||||
print(f">Loglevel set to: {loglevel}")
|
||||
logging.basicConfig(level=loglevel)
|
||||
ap_logger = logging.getLogger('apscheduler')
|
||||
ap_logger.setLevel(loglevel)
|
||||
app.schedule = AsyncIOScheduler()
|
||||
app.queue_system = queue.Queue()
|
||||
|
||||
|
||||
@app.on_event("startup")
|
||||
async def startup():
|
||||
logging.info(">>>>> starting up <<<<<")
|
||||
await pg_client.init()
|
||||
await events_queue.init()
|
||||
app.schedule.start()
|
||||
|
||||
for job in core_crons.cron_jobs + core_dynamic_crons.cron_jobs + traces.cron_jobs + ee_crons.ee_cron_jobs:
|
||||
app.schedule.add_job(id=job["func"].__name__, **job)
|
||||
|
||||
ap_logger.info(">Scheduled jobs:")
|
||||
for job in app.schedule.get_jobs():
|
||||
ap_logger.info({"Name": str(job.id), "Run Frequency": str(job.trigger), "Next Run": str(job.next_run_time)})
|
||||
|
||||
|
||||
@app.on_event("shutdown")
|
||||
async def shutdown():
|
||||
logging.info(">>>>> shutting down <<<<<")
|
||||
app.schedule.shutdown(wait=True)
|
||||
await traces.process_traces_queue()
|
||||
await events_queue.terminate()
|
||||
await pg_client.terminate()
|
||||
|
||||
|
||||
@app.get('/private/shutdown', tags=["private"])
|
||||
async def stop_server():
|
||||
logging.info("Requested shutdown")
|
||||
await shutdown()
|
||||
import os, signal
|
||||
os.kill(1, signal.SIGTERM)
|
||||
app.include_router(health.public_app)
|
||||
app.include_router(health.app)
|
||||
app.include_router(health.app_apikey)
|
||||
|
|
|
|||
228
ee/api/chalicelib/core/health.py
Normal file
228
ee/api/chalicelib/core/health.py
Normal file
|
|
@ -0,0 +1,228 @@
|
|||
from urllib.parse import urlparse
|
||||
|
||||
import redis
|
||||
import requests
|
||||
# from confluent_kafka.admin import AdminClient
|
||||
from decouple import config
|
||||
|
||||
from chalicelib.utils import pg_client, ch_client
|
||||
|
||||
if config("LOCAL_DEV", cast=bool, default=False):
|
||||
HEALTH_ENDPOINTS = {
|
||||
"alerts": "http://127.0.0.1:8888/metrics",
|
||||
"assets": "http://127.0.0.1:8888/metrics",
|
||||
"assist": "http://127.0.0.1:8888/metrics",
|
||||
"chalice": "http://127.0.0.1:8888/metrics",
|
||||
"db": "http://127.0.0.1:8888/metrics",
|
||||
"ender": "http://127.0.0.1:8888/metrics",
|
||||
"heuristics": "http://127.0.0.1:8888/metrics",
|
||||
"http": "http://127.0.0.1:8888/metrics",
|
||||
"ingress-nginx": "http://127.0.0.1:8888/metrics",
|
||||
"integrations": "http://127.0.0.1:8888/metrics",
|
||||
"peers": "http://127.0.0.1:8888/metrics",
|
||||
"quickwit": "http://127.0.0.1:8888/metrics",
|
||||
"sink": "http://127.0.0.1:8888/metrics",
|
||||
"sourcemapreader": "http://127.0.0.1:8888/metrics",
|
||||
"storage": "http://127.0.0.1:8888/metrics",
|
||||
"utilities": "http://127.0.0.1:8888/metrics"
|
||||
}
|
||||
|
||||
else:
|
||||
HEALTH_ENDPOINTS = {
|
||||
"alerts": "http://alerts-openreplay.app.svc.cluster.local:8888/health",
|
||||
"assets": "http://assets-openreplay.app.svc.cluster.local:8888/metrics",
|
||||
"assist": "http://assist-openreplay.app.svc.cluster.local:8888/health",
|
||||
"chalice": "http://chalice-openreplay.app.svc.cluster.local:8888/metrics",
|
||||
"db": "http://db-openreplay.app.svc.cluster.local:8888/metrics",
|
||||
"ender": "http://ender-openreplay.app.svc.cluster.local:8888/metrics",
|
||||
"heuristics": "http://heuristics-openreplay.app.svc.cluster.local:8888/metrics",
|
||||
"http": "http://http-openreplay.app.svc.cluster.local:8888/metrics",
|
||||
"ingress-nginx": "http://ingress-nginx-openreplay.app.svc.cluster.local:8888/metrics",
|
||||
"integrations": "http://integrations-openreplay.app.svc.cluster.local:8888/metrics",
|
||||
"peers": "http://peers-openreplay.app.svc.cluster.local:8888/health",
|
||||
"quickwit": "http://quickwit-openreplay.app.svc.cluster.local:8888/metrics",
|
||||
"sink": "http://sink-openreplay.app.svc.cluster.local:8888/metrics",
|
||||
"sourcemapreader": "http://sourcemapreader-openreplay.app.svc.cluster.local:8888/health",
|
||||
"storage": "http://storage-openreplay.app.svc.cluster.local:8888/metrics",
|
||||
}
|
||||
|
||||
|
||||
def __check_database_pg():
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute("SHOW server_version;")
|
||||
server_version = cur.fetchone()
|
||||
cur.execute("SELECT openreplay_version() AS version;")
|
||||
schema_version = cur.fetchone()
|
||||
return {
|
||||
"health": True,
|
||||
"details": {
|
||||
"version": server_version["server_version"],
|
||||
"schema": schema_version["version"]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def __not_supported():
|
||||
return {"errors": ["not supported"]}
|
||||
|
||||
|
||||
def __always_healthy():
|
||||
return {
|
||||
"health": True,
|
||||
"details": {}
|
||||
}
|
||||
|
||||
|
||||
def __always_healthy_with_version():
|
||||
return {
|
||||
"health": True,
|
||||
"details": {"version": config("version_number", default="unknown")}
|
||||
}
|
||||
|
||||
|
||||
def __check_be_service(service_name):
|
||||
def fn():
|
||||
fail_response = {
|
||||
"health": False,
|
||||
"details": {
|
||||
"errors": ["server health-check failed"]
|
||||
}
|
||||
}
|
||||
try:
|
||||
results = requests.get(HEALTH_ENDPOINTS.get(service_name), timeout=2)
|
||||
if results.status_code != 200:
|
||||
print(f"!! issue with the storage-health code:{results.status_code}")
|
||||
print(results.text)
|
||||
fail_response["details"]["errors"].append(results.text)
|
||||
return fail_response
|
||||
except requests.exceptions.Timeout:
|
||||
print(f"!! Timeout getting {service_name}-health")
|
||||
fail_response["details"]["errors"].append("timeout")
|
||||
return fail_response
|
||||
except Exception as e:
|
||||
print("!! Issue getting storage-health response")
|
||||
print(str(e))
|
||||
try:
|
||||
print(results.text)
|
||||
fail_response["details"]["errors"].append(results.text)
|
||||
except:
|
||||
print("couldn't get response")
|
||||
fail_response["details"]["errors"].append(str(e))
|
||||
return fail_response
|
||||
return {
|
||||
"health": True,
|
||||
"details": {}
|
||||
}
|
||||
|
||||
return fn
|
||||
|
||||
|
||||
def __check_redis():
|
||||
fail_response = {
|
||||
"health": False,
|
||||
"details": {"errors": ["server health-check failed"]}
|
||||
}
|
||||
if config("REDIS_STRING", default=None) is None:
|
||||
fail_response["details"]["errors"].append("REDIS_STRING not defined in env-vars")
|
||||
return fail_response
|
||||
|
||||
try:
|
||||
u = urlparse(config("REDIS_STRING"))
|
||||
r = redis.Redis(host=u.hostname, port=u.port, socket_timeout=2)
|
||||
r.ping()
|
||||
except Exception as e:
|
||||
print("!! Issue getting redis-health response")
|
||||
print(str(e))
|
||||
fail_response["details"]["errors"].append(str(e))
|
||||
return fail_response
|
||||
|
||||
return {
|
||||
"health": True,
|
||||
"details": {"version": r.execute_command('INFO')['redis_version']}
|
||||
}
|
||||
|
||||
|
||||
def get_health():
|
||||
health_map = {
|
||||
"databases": {
|
||||
"postgres": __check_database_pg,
|
||||
"clickhouse": __check_database_ch
|
||||
},
|
||||
"ingestionPipeline": {
|
||||
"redis": __check_redis,
|
||||
# "kafka": __check_kafka
|
||||
"kafka": __always_healthy
|
||||
},
|
||||
"backendServices": {
|
||||
"alerts": __check_be_service("alerts"),
|
||||
"assets": __check_be_service("assets"),
|
||||
"assist": __check_be_service("assist"),
|
||||
"chalice": __always_healthy_with_version,
|
||||
"db": __check_be_service("db"),
|
||||
"ender": __check_be_service("ender"),
|
||||
"frontend": __always_healthy,
|
||||
"heuristics": __check_be_service("heuristics"),
|
||||
"http": __check_be_service("http"),
|
||||
"ingress-nginx": __always_healthy,
|
||||
"integrations": __check_be_service("integrations"),
|
||||
"peers": __check_be_service("peers"),
|
||||
"quickwit": __check_be_service("quickwit"),
|
||||
"sink": __check_be_service("sink"),
|
||||
"sourcemapreader": __check_be_service("sourcemapreader"),
|
||||
"storage": __check_be_service("storage")
|
||||
}
|
||||
}
|
||||
for parent_key in health_map.keys():
|
||||
for element_key in health_map[parent_key]:
|
||||
health_map[parent_key][element_key] = health_map[parent_key][element_key]()
|
||||
return health_map
|
||||
|
||||
|
||||
def __check_database_ch():
|
||||
errors = {}
|
||||
with ch_client.ClickHouseClient() as ch:
|
||||
server_version = ch.execute("SELECT version() AS server_version;")
|
||||
schema_version = ch.execute("""SELECT 1
|
||||
FROM system.functions
|
||||
WHERE name = 'openreplay_version';""")
|
||||
if len(schema_version) > 0:
|
||||
schema_version = ch.execute("SELECT openreplay_version()() AS version;")
|
||||
schema_version = schema_version[0]["version"]
|
||||
else:
|
||||
schema_version = "unknown"
|
||||
errors = {"errors": ["clickhouse schema is outdated"]}
|
||||
return {
|
||||
"health": True,
|
||||
"details": {
|
||||
"version": server_version[0]["server_version"],
|
||||
"schema": schema_version,
|
||||
**errors
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# def __check_kafka():
|
||||
# fail_response = {
|
||||
# "health": False,
|
||||
# "details": {"errors": ["server health-check failed"]}
|
||||
# }
|
||||
# if config("KAFKA_SERVERS", default=None) is None:
|
||||
# fail_response["details"]["errors"].append("KAFKA_SERVERS not defined in env-vars")
|
||||
# return fail_response
|
||||
#
|
||||
# try:
|
||||
# a = AdminClient({'bootstrap.servers': config("KAFKA_SERVERS"), "socket.connection.setup.timeout.ms": 3000})
|
||||
# topics = a.list_topics().topics
|
||||
# if not topics:
|
||||
# raise Exception('topics not found')
|
||||
#
|
||||
# except Exception as e:
|
||||
# print("!! Issue getting kafka-health response")
|
||||
# print(str(e))
|
||||
# fail_response["details"]["errors"].append(str(e))
|
||||
# return fail_response
|
||||
#
|
||||
# return {
|
||||
# "health": True,
|
||||
# "details": {}
|
||||
# }
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -2,11 +2,8 @@ from typing import List, Union
|
|||
|
||||
import schemas
|
||||
import schemas_ee
|
||||
from chalicelib.core import events, metadata, events_ios, \
|
||||
sessions_mobs, issues, projects, resources, assist, performance_event, metrics, sessions_devtool, \
|
||||
sessions_notes
|
||||
from chalicelib.utils import pg_client, helper, metrics_helper, ch_client, exp_ch_helper, errors_helper
|
||||
from chalicelib.utils import sql_helper as sh
|
||||
from chalicelib.core import events, metadata, projects, performance_event, metrics
|
||||
from chalicelib.utils import pg_client, helper, metrics_helper, ch_client, exp_ch_helper
|
||||
|
||||
SESSION_PROJECTION_COLS_CH = """\
|
||||
s.project_id,
|
||||
|
|
@ -51,94 +48,6 @@ SESSION_PROJECTION_COLS_CH_MAP = """\
|
|||
"""
|
||||
|
||||
|
||||
def __group_metadata(session, project_metadata):
|
||||
meta = {}
|
||||
for m in project_metadata.keys():
|
||||
if project_metadata[m] is not None and session.get(m) is not None:
|
||||
meta[project_metadata[m]] = session[m]
|
||||
session.pop(m)
|
||||
return meta
|
||||
|
||||
|
||||
# This function should not use Clickhouse because it doesn't have `file_key`
|
||||
def get_by_id2_pg(project_id, session_id, context: schemas_ee.CurrentContext, full_data=False, include_fav_viewed=False,
|
||||
group_metadata=False, live=True):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
extra_query = []
|
||||
if include_fav_viewed:
|
||||
extra_query.append("""COALESCE((SELECT TRUE
|
||||
FROM public.user_favorite_sessions AS fs
|
||||
WHERE s.session_id = fs.session_id
|
||||
AND fs.user_id = %(userId)s), FALSE) AS favorite""")
|
||||
extra_query.append("""COALESCE((SELECT TRUE
|
||||
FROM public.user_viewed_sessions AS fs
|
||||
WHERE s.session_id = fs.session_id
|
||||
AND fs.user_id = %(userId)s), FALSE) AS viewed""")
|
||||
query = cur.mogrify(
|
||||
f"""\
|
||||
SELECT
|
||||
s.*,
|
||||
s.session_id::text AS session_id,
|
||||
(SELECT project_key FROM public.projects WHERE project_id = %(project_id)s LIMIT 1) AS project_key,
|
||||
encode(file_key,'hex') AS file_key
|
||||
{"," if len(extra_query) > 0 else ""}{",".join(extra_query)}
|
||||
{(",json_build_object(" + ",".join([f"'{m}',p.{m}" for m in metadata.column_names()]) + ") AS project_metadata") if group_metadata else ''}
|
||||
FROM public.sessions AS s {"INNER JOIN public.projects AS p USING (project_id)" if group_metadata else ""}
|
||||
WHERE s.project_id = %(project_id)s
|
||||
AND s.session_id = %(session_id)s;""",
|
||||
{"project_id": project_id, "session_id": session_id, "userId": context.user_id}
|
||||
)
|
||||
# print("===============")
|
||||
# print(query)
|
||||
cur.execute(query=query)
|
||||
|
||||
data = cur.fetchone()
|
||||
if data is not None:
|
||||
data = helper.dict_to_camel_case(data)
|
||||
if full_data:
|
||||
if data["platform"] == 'ios':
|
||||
data['events'] = events_ios.get_by_sessionId(project_id=project_id, session_id=session_id)
|
||||
for e in data['events']:
|
||||
if e["type"].endswith("_IOS"):
|
||||
e["type"] = e["type"][:-len("_IOS")]
|
||||
data['crashes'] = events_ios.get_crashes_by_session_id(session_id=session_id)
|
||||
data['userEvents'] = events_ios.get_customs_by_sessionId(project_id=project_id,
|
||||
session_id=session_id)
|
||||
data['mobsUrl'] = sessions_mobs.get_ios(session_id=session_id)
|
||||
else:
|
||||
data['events'] = events.get_by_session_id(project_id=project_id, session_id=session_id,
|
||||
group_clickrage=True)
|
||||
all_errors = events.get_errors_by_session_id(session_id=session_id, project_id=project_id)
|
||||
data['stackEvents'] = [e for e in all_errors if e['source'] != "js_exception"]
|
||||
# to keep only the first stack
|
||||
# limit the number of errors to reduce the response-body size
|
||||
data['errors'] = [errors_helper.format_first_stack_frame(e) for e in all_errors
|
||||
if e['source'] == "js_exception"][:500]
|
||||
data['userEvents'] = events.get_customs_by_session_id(project_id=project_id,
|
||||
session_id=session_id)
|
||||
data['domURL'] = sessions_mobs.get_urls(session_id=session_id, project_id=project_id)
|
||||
data['mobsUrl'] = sessions_mobs.get_urls_depercated(session_id=session_id)
|
||||
data['devtoolsURL'] = sessions_devtool.get_urls(session_id=session_id, project_id=project_id,
|
||||
context=context)
|
||||
data['resources'] = resources.get_by_session_id(session_id=session_id, project_id=project_id,
|
||||
start_ts=data["startTs"],
|
||||
duration=data["duration"])
|
||||
|
||||
data['notes'] = sessions_notes.get_session_notes(tenant_id=context.tenant_id, project_id=project_id,
|
||||
session_id=session_id, user_id=context.user_id)
|
||||
data['metadata'] = __group_metadata(project_metadata=data.pop("projectMetadata"), session=data)
|
||||
data['issues'] = issues.get_by_session_id(session_id=session_id, project_id=project_id)
|
||||
data['live'] = live and assist.is_live(project_id=project_id,
|
||||
session_id=session_id,
|
||||
project_key=data["projectKey"])
|
||||
data["inDB"] = True
|
||||
return data
|
||||
elif live:
|
||||
return assist.get_live_session_by_id(project_id=project_id, session_id=session_id)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def __get_sql_operator(op: schemas.SearchEventOperator):
|
||||
return {
|
||||
schemas.SearchEventOperator._is: "=",
|
||||
|
|
|
|||
|
|
@ -10,13 +10,15 @@ def add_favorite_session(context: schemas_ee.CurrentContext, project_id, session
|
|||
cur.execute(
|
||||
cur.mogrify(f"""\
|
||||
INSERT INTO public.user_favorite_sessions(user_id, session_id)
|
||||
VALUES (%(userId)s,%(sessionId)s);""",
|
||||
{"userId": context.user_id, "sessionId": session_id})
|
||||
VALUES (%(userId)s,%(session_id)s)
|
||||
RETURNING session_id;""",
|
||||
{"userId": context.user_id, "session_id": session_id})
|
||||
)
|
||||
|
||||
sessions_favorite_exp.add_favorite_session(project_id=project_id, user_id=context.user_id, session_id=session_id)
|
||||
return sessions.get_by_id2_pg(project_id=project_id, session_id=session_id,
|
||||
full_data=False, include_fav_viewed=True, context=context)
|
||||
row = cur.fetchone()
|
||||
if row:
|
||||
sessions_favorite_exp.add_favorite_session(project_id=project_id, user_id=context.user_id, session_id=session_id)
|
||||
return {"data": {"sessionId": session_id}}
|
||||
return {"errors": ["something went wrong"]}
|
||||
|
||||
|
||||
def remove_favorite_session(context: schemas_ee.CurrentContext, project_id, session_id):
|
||||
|
|
@ -25,12 +27,15 @@ def remove_favorite_session(context: schemas_ee.CurrentContext, project_id, sess
|
|||
cur.mogrify(f"""\
|
||||
DELETE FROM public.user_favorite_sessions
|
||||
WHERE user_id = %(userId)s
|
||||
AND session_id = %(sessionId)s;""",
|
||||
{"userId": context.user_id, "sessionId": session_id})
|
||||
AND session_id = %(session_id)s
|
||||
RETURNING session_id;""",
|
||||
{"userId": context.user_id, "session_id": session_id})
|
||||
)
|
||||
sessions_favorite_exp.remove_favorite_session(project_id=project_id, user_id=context.user_id, session_id=session_id)
|
||||
return sessions.get_by_id2_pg(project_id=project_id, session_id=session_id,
|
||||
full_data=False, include_fav_viewed=True, context=context)
|
||||
row = cur.fetchone()
|
||||
if row:
|
||||
sessions_favorite_exp.remove_favorite_session(project_id=project_id, user_id=context.user_id, session_id=session_id)
|
||||
return {"data": {"sessionId": session_id}}
|
||||
return {"errors": ["something went wrong"]}
|
||||
|
||||
|
||||
def favorite_session(context: schemas_ee.CurrentContext, project_id, session_id):
|
||||
|
|
|
|||
192
ee/api/chalicelib/core/sessions_replay.py
Normal file
192
ee/api/chalicelib/core/sessions_replay.py
Normal file
|
|
@ -0,0 +1,192 @@
|
|||
import schemas
|
||||
import schemas_ee
|
||||
from chalicelib.core import events, metadata, events_ios, \
|
||||
sessions_mobs, issues, resources, assist, sessions_devtool, sessions_notes
|
||||
from chalicelib.utils import errors_helper
|
||||
from chalicelib.utils import pg_client, helper
|
||||
|
||||
|
||||
def __group_metadata(session, project_metadata):
|
||||
meta = {}
|
||||
for m in project_metadata.keys():
|
||||
if project_metadata[m] is not None and session.get(m) is not None:
|
||||
meta[project_metadata[m]] = session[m]
|
||||
session.pop(m)
|
||||
return meta
|
||||
|
||||
|
||||
# for backward compatibility
|
||||
# This function should not use Clickhouse because it doesn't have `file_key`
|
||||
def get_by_id2_pg(project_id, session_id, context: schemas_ee.CurrentContext, full_data=False,
|
||||
include_fav_viewed=False, group_metadata=False, live=True):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
extra_query = []
|
||||
if include_fav_viewed:
|
||||
extra_query.append("""COALESCE((SELECT TRUE
|
||||
FROM public.user_favorite_sessions AS fs
|
||||
WHERE s.session_id = fs.session_id
|
||||
AND fs.user_id = %(userId)s), FALSE) AS favorite""")
|
||||
extra_query.append("""COALESCE((SELECT TRUE
|
||||
FROM public.user_viewed_sessions AS fs
|
||||
WHERE s.session_id = fs.session_id
|
||||
AND fs.user_id = %(userId)s), FALSE) AS viewed""")
|
||||
query = cur.mogrify(
|
||||
f"""\
|
||||
SELECT
|
||||
s.*,
|
||||
s.session_id::text AS session_id,
|
||||
(SELECT project_key FROM public.projects WHERE project_id = %(project_id)s LIMIT 1) AS project_key,
|
||||
encode(file_key,'hex') AS file_key
|
||||
{"," if len(extra_query) > 0 else ""}{",".join(extra_query)}
|
||||
{(",json_build_object(" + ",".join([f"'{m}',p.{m}" for m in metadata.column_names()]) + ") AS project_metadata") if group_metadata else ''}
|
||||
FROM public.sessions AS s {"INNER JOIN public.projects AS p USING (project_id)" if group_metadata else ""}
|
||||
WHERE s.project_id = %(project_id)s
|
||||
AND s.session_id = %(session_id)s;""",
|
||||
{"project_id": project_id, "session_id": session_id, "userId": context.user_id}
|
||||
)
|
||||
# print("===============")
|
||||
# print(query)
|
||||
cur.execute(query=query)
|
||||
|
||||
data = cur.fetchone()
|
||||
if data is not None:
|
||||
data = helper.dict_to_camel_case(data)
|
||||
if full_data:
|
||||
if data["platform"] == 'ios':
|
||||
data['events'] = events_ios.get_by_sessionId(project_id=project_id, session_id=session_id)
|
||||
for e in data['events']:
|
||||
if e["type"].endswith("_IOS"):
|
||||
e["type"] = e["type"][:-len("_IOS")]
|
||||
data['crashes'] = events_ios.get_crashes_by_session_id(session_id=session_id)
|
||||
data['userEvents'] = events_ios.get_customs_by_sessionId(project_id=project_id,
|
||||
session_id=session_id)
|
||||
data['mobsUrl'] = sessions_mobs.get_ios(session_id=session_id)
|
||||
else:
|
||||
data['events'] = events.get_by_session_id(project_id=project_id, session_id=session_id,
|
||||
group_clickrage=True)
|
||||
all_errors = events.get_errors_by_session_id(session_id=session_id, project_id=project_id)
|
||||
data['stackEvents'] = [e for e in all_errors if e['source'] != "js_exception"]
|
||||
# to keep only the first stack
|
||||
# limit the number of errors to reduce the response-body size
|
||||
data['errors'] = [errors_helper.format_first_stack_frame(e) for e in all_errors
|
||||
if e['source'] == "js_exception"][:500]
|
||||
data['userEvents'] = events.get_customs_by_session_id(project_id=project_id,
|
||||
session_id=session_id)
|
||||
data['domURL'] = sessions_mobs.get_urls(session_id=session_id, project_id=project_id)
|
||||
data['mobsUrl'] = sessions_mobs.get_urls_depercated(session_id=session_id)
|
||||
data['devtoolsURL'] = sessions_devtool.get_urls(session_id=session_id, project_id=project_id,
|
||||
context=context)
|
||||
data['resources'] = resources.get_by_session_id(session_id=session_id, project_id=project_id,
|
||||
start_ts=data["startTs"], duration=data["duration"])
|
||||
|
||||
data['notes'] = sessions_notes.get_session_notes(tenant_id=context.tenant_id, project_id=project_id,
|
||||
session_id=session_id, user_id=context.user_id)
|
||||
data['metadata'] = __group_metadata(project_metadata=data.pop("projectMetadata"), session=data)
|
||||
data['issues'] = issues.get_by_session_id(session_id=session_id, project_id=project_id)
|
||||
data['live'] = live and assist.is_live(project_id=project_id, session_id=session_id,
|
||||
project_key=data["projectKey"])
|
||||
data["inDB"] = True
|
||||
return data
|
||||
elif live:
|
||||
return assist.get_live_session_by_id(project_id=project_id, session_id=session_id)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
# This function should not use Clickhouse because it doesn't have `file_key`
|
||||
def get_replay(project_id, session_id, context: schemas.CurrentContext, full_data=False, include_fav_viewed=False,
|
||||
group_metadata=False, live=True):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
extra_query = []
|
||||
if include_fav_viewed:
|
||||
extra_query.append("""COALESCE((SELECT TRUE
|
||||
FROM public.user_favorite_sessions AS fs
|
||||
WHERE s.session_id = fs.session_id
|
||||
AND fs.user_id = %(userId)s), FALSE) AS favorite""")
|
||||
extra_query.append("""COALESCE((SELECT TRUE
|
||||
FROM public.user_viewed_sessions AS fs
|
||||
WHERE s.session_id = fs.session_id
|
||||
AND fs.user_id = %(userId)s), FALSE) AS viewed""")
|
||||
query = cur.mogrify(
|
||||
f"""\
|
||||
SELECT
|
||||
s.*,
|
||||
s.session_id::text AS session_id,
|
||||
(SELECT project_key FROM public.projects WHERE project_id = %(project_id)s LIMIT 1) AS project_key
|
||||
{"," if len(extra_query) > 0 else ""}{",".join(extra_query)}
|
||||
{(",json_build_object(" + ",".join([f"'{m}',p.{m}" for m in metadata.column_names()]) + ") AS project_metadata") if group_metadata else ''}
|
||||
FROM public.sessions AS s {"INNER JOIN public.projects AS p USING (project_id)" if group_metadata else ""}
|
||||
WHERE s.project_id = %(project_id)s
|
||||
AND s.session_id = %(session_id)s;""",
|
||||
{"project_id": project_id, "session_id": session_id, "userId": context.user_id}
|
||||
)
|
||||
# print("===============")
|
||||
# print(query)
|
||||
cur.execute(query=query)
|
||||
|
||||
data = cur.fetchone()
|
||||
if data is not None:
|
||||
data = helper.dict_to_camel_case(data)
|
||||
if full_data:
|
||||
if data["platform"] == 'ios':
|
||||
data['mobsUrl'] = sessions_mobs.get_ios(session_id=session_id)
|
||||
else:
|
||||
data['domURL'] = sessions_mobs.get_urls(session_id=session_id, project_id=project_id)
|
||||
data['mobsUrl'] = sessions_mobs.get_urls_depercated(session_id=session_id)
|
||||
data['devtoolsURL'] = sessions_devtool.get_urls(session_id=session_id, project_id=project_id,
|
||||
context=context)
|
||||
|
||||
data['metadata'] = __group_metadata(project_metadata=data.pop("projectMetadata"), session=data)
|
||||
data['live'] = live and assist.is_live(project_id=project_id, session_id=session_id,
|
||||
project_key=data["projectKey"])
|
||||
data["inDB"] = True
|
||||
return data
|
||||
elif live:
|
||||
return assist.get_live_session_by_id(project_id=project_id, session_id=session_id)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def get_events(project_id, session_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
f"""SELECT session_id, platform, start_ts, duration
|
||||
FROM public.sessions AS s
|
||||
WHERE s.project_id = %(project_id)s
|
||||
AND s.session_id = %(session_id)s;""",
|
||||
{"project_id": project_id, "session_id": session_id}
|
||||
)
|
||||
# print("===============")
|
||||
# print(query)
|
||||
cur.execute(query=query)
|
||||
|
||||
s_data = cur.fetchone()
|
||||
if s_data is not None:
|
||||
s_data = helper.dict_to_camel_case(s_data)
|
||||
data = {}
|
||||
if s_data["platform"] == 'ios':
|
||||
data['events'] = events_ios.get_by_sessionId(project_id=project_id, session_id=session_id)
|
||||
for e in data['events']:
|
||||
if e["type"].endswith("_IOS"):
|
||||
e["type"] = e["type"][:-len("_IOS")]
|
||||
data['crashes'] = events_ios.get_crashes_by_session_id(session_id=session_id)
|
||||
data['userEvents'] = events_ios.get_customs_by_sessionId(project_id=project_id,
|
||||
session_id=session_id)
|
||||
else:
|
||||
data['events'] = events.get_by_session_id(project_id=project_id, session_id=session_id,
|
||||
group_clickrage=True)
|
||||
all_errors = events.get_errors_by_session_id(session_id=session_id, project_id=project_id)
|
||||
data['stackEvents'] = [e for e in all_errors if e['source'] != "js_exception"]
|
||||
# to keep only the first stack
|
||||
# limit the number of errors to reduce the response-body size
|
||||
data['errors'] = [errors_helper.format_first_stack_frame(e) for e in all_errors
|
||||
if e['source'] == "js_exception"][:500]
|
||||
data['userEvents'] = events.get_customs_by_session_id(project_id=project_id,
|
||||
session_id=session_id)
|
||||
data['resources'] = resources.get_by_session_id(session_id=session_id, project_id=project_id,
|
||||
start_ts=s_data["startTs"], duration=s_data["duration"])
|
||||
|
||||
data['issues'] = issues.get_by_session_id(session_id=session_id, project_id=project_id)
|
||||
return data
|
||||
else:
|
||||
return None
|
||||
|
|
@ -51,7 +51,7 @@ def get_by_api_key(api_key):
|
|||
WHERE tenants.api_key = %(api_key)s
|
||||
AND tenants.deleted_at ISNULL
|
||||
LIMIT 1;""",
|
||||
{"api_key": api_key})
|
||||
{"api_key": api_key})
|
||||
cur.execute(query=query)
|
||||
return helper.dict_to_camel_case(cur.fetchone())
|
||||
|
||||
|
|
@ -94,7 +94,7 @@ def update(tenant_id, user_id, data: schemas.UpdateTenantSchema):
|
|||
return edit_client(tenant_id=tenant_id, changes=changes)
|
||||
|
||||
|
||||
def tenants_exists():
|
||||
with pg_client.PostgresClient() as cur:
|
||||
def tenants_exists(use_pool=True):
|
||||
with pg_client.PostgresClient(use_pool=use_pool) as cur:
|
||||
cur.execute(f"SELECT EXISTS(SELECT 1 FROM public.tenants)")
|
||||
return cur.fetchone()["exists"]
|
||||
|
|
|
|||
|
|
@ -37,13 +37,16 @@ def get_full_config():
|
|||
if __get_secret() is not None:
|
||||
for i in range(len(servers)):
|
||||
url = servers[i].split(",")[0]
|
||||
servers[i] = {"url": url} if url.lower().startswith("stun") else {"url": url, **credentials}
|
||||
# servers[i] = {"url": url} if url.lower().startswith("stun") else {"url": url, **credentials}
|
||||
servers[i] = {"urls": url} if url.lower().startswith("stun") else {"urls": url, **credentials}
|
||||
else:
|
||||
for i in range(len(servers)):
|
||||
s = servers[i].split(",")
|
||||
if len(s) == 3:
|
||||
servers[i] = {"url": s[0], "username": s[1], "credential": s[2]}
|
||||
# servers[i] = {"url": s[0], "username": s[1], "credential": s[2]}
|
||||
servers[i] = {"urls": s[0], "username": s[1], "credential": s[2]}
|
||||
else:
|
||||
servers[i] = {"url": s[0]}
|
||||
# servers[i] = {"url": s[0]}
|
||||
servers[i] = {"urls": s[0]}
|
||||
|
||||
return servers
|
||||
|
|
|
|||
|
|
@ -20,8 +20,9 @@ class ClickHouseClient:
|
|||
|
||||
def __init__(self):
|
||||
self.__client = clickhouse_driver.Client(host=config("ch_host"),
|
||||
database=config("ch_database",default="default", cast=str),
|
||||
password=config("ch_password",default="", cast=str),
|
||||
database=config("ch_database", default="default"),
|
||||
user=config("ch_user", default="default"),
|
||||
password=config("ch_password", default=""),
|
||||
port=config("ch_port", cast=int),
|
||||
settings=settings) \
|
||||
if self.__client is None else self.__client
|
||||
|
|
|
|||
|
|
@ -35,6 +35,7 @@ rm -rf ./chalicelib/core/log_tool_stackdriver.py
|
|||
rm -rf ./chalicelib/core/log_tool_sumologic.py
|
||||
rm -rf ./chalicelib/core/metadata.py
|
||||
rm -rf ./chalicelib/core/mobile.py
|
||||
rm -rf ./chalicelib/core/sessions.py
|
||||
rm -rf ./chalicelib/core/sessions_assignments.py
|
||||
#exp rm -rf ./chalicelib/core/sessions_metas.py
|
||||
rm -rf ./chalicelib/core/sessions_mobs.py
|
||||
|
|
@ -78,9 +79,12 @@ rm -rf ./Dockerfile_bundle
|
|||
rm -rf ./entrypoint.bundle.sh
|
||||
rm -rf ./chalicelib/core/heatmaps.py
|
||||
rm -rf ./schemas.py
|
||||
rm -rf ./routers/subs/health.py
|
||||
rm -rf ./routers/subs/v1_api.py
|
||||
#exp rm -rf ./chalicelib/core/custom_metrics.py
|
||||
rm -rf ./chalicelib/core/performance_event.py
|
||||
rm -rf ./chalicelib/core/saved_search.py
|
||||
rm -rf ./app_alerts.py
|
||||
rm -rf ./build_alerts.sh
|
||||
rm -rf ./run-dev.sh
|
||||
rm -rf ./run-alerts-dev.sh
|
||||
|
|
|
|||
|
|
@ -2,4 +2,4 @@
|
|||
sh env_vars.sh
|
||||
source /tmp/.env.override
|
||||
|
||||
uvicorn app:app --host 0.0.0.0 --port $LISTEN_PORT --reload --proxy-headers
|
||||
uvicorn app:app --host 0.0.0.0 --port $LISTEN_PORT --proxy-headers
|
||||
|
|
|
|||
|
|
@ -2,4 +2,4 @@
|
|||
export ASSIST_KEY=ignore
|
||||
sh env_vars.sh
|
||||
source /tmp/.env.override
|
||||
uvicorn app:app --host 0.0.0.0 --port $LISTEN_PORT --reload
|
||||
uvicorn app:app --host 0.0.0.0 --port 8888
|
||||
|
|
|
|||
|
|
@ -70,4 +70,7 @@ SESSION_MOB_PATTERN_E=%(sessionId)s/dom.mobe
|
|||
DEVTOOLS_MOB_PATTERN=%(sessionId)s/devtools.mob
|
||||
PRESIGNED_URL_EXPIRATION=3600
|
||||
ASSIST_JWT_EXPIRATION=144000
|
||||
ASSIST_JWT_SECRET=
|
||||
ASSIST_JWT_SECRET=
|
||||
REDIS_STRING=redis://redis-master.db.svc.cluster.local:6379
|
||||
KAFKA_SERVERS=kafka.db.svc.cluster.local:9092
|
||||
KAFKA_USE_SSL=false
|
||||
|
|
@ -8,7 +8,7 @@ jira==3.4.1
|
|||
|
||||
|
||||
|
||||
fastapi==0.92.0
|
||||
fastapi==0.94.1
|
||||
uvicorn[standard]==0.20.0
|
||||
python-decouple==3.7
|
||||
pydantic[email]==1.10.4
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ jira==3.4.1
|
|||
|
||||
|
||||
|
||||
fastapi==0.92.0
|
||||
fastapi==0.95.0
|
||||
uvicorn[standard]==0.20.0
|
||||
python-decouple==3.7
|
||||
pydantic[email]==1.10.4
|
||||
|
|
@ -17,3 +17,6 @@ apscheduler==3.10.0
|
|||
clickhouse-driver==0.2.5
|
||||
python3-saml==1.15.0
|
||||
python-multipart==0.0.5
|
||||
|
||||
redis==4.5.1
|
||||
#confluent-kafka==2.0.2
|
||||
|
|
@ -7,7 +7,7 @@ from starlette.responses import RedirectResponse, FileResponse
|
|||
import schemas
|
||||
import schemas_ee
|
||||
from chalicelib.core import sessions, assist, heatmaps, sessions_favorite, sessions_assignments, errors, errors_viewed, \
|
||||
errors_favorite, sessions_notes, click_maps
|
||||
errors_favorite, sessions_notes, click_maps, sessions_replay
|
||||
from chalicelib.core import sessions_viewed
|
||||
from chalicelib.core import tenants, users, projects, license
|
||||
from chalicelib.core import webhook
|
||||
|
|
@ -59,7 +59,8 @@ async def edit_account(data: schemas_ee.EditUserSchema = Body(...),
|
|||
|
||||
@app.post('/integrations/slack', tags=['integrations'])
|
||||
@app.put('/integrations/slack', tags=['integrations'])
|
||||
async def add_slack_client(data: schemas.AddCollaborationSchema, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
async def add_slack_integration(data: schemas.AddCollaborationSchema,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
n = Slack.add(tenant_id=context.tenant_id, data=data)
|
||||
if n is None:
|
||||
return {
|
||||
|
|
@ -155,13 +156,15 @@ async def get_projects(context: schemas.CurrentContext = Depends(OR_context)):
|
|||
stack_integrations=True, user_id=context.user_id)}
|
||||
|
||||
|
||||
@app.get('/{projectId}/sessions/{sessionId}', tags=["sessions"], dependencies=[OR_scope(Permissions.session_replay)])
|
||||
# for backward compatibility
|
||||
@app.get('/{projectId}/sessions/{sessionId}', tags=["sessions", "replay"],
|
||||
dependencies=[OR_scope(Permissions.session_replay)])
|
||||
async def get_session(projectId: int, sessionId: Union[int, str], background_tasks: BackgroundTasks,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
if isinstance(sessionId, str):
|
||||
return {"errors": ["session not found"]}
|
||||
data = sessions.get_by_id2_pg(project_id=projectId, session_id=sessionId, full_data=True,
|
||||
include_fav_viewed=True, group_metadata=True, context=context)
|
||||
data = sessions_replay.get_by_id2_pg(project_id=projectId, session_id=sessionId, full_data=True,
|
||||
include_fav_viewed=True, group_metadata=True, context=context)
|
||||
if data is None:
|
||||
return {"errors": ["session not found"]}
|
||||
if data.get("inDB"):
|
||||
|
|
@ -172,6 +175,39 @@ async def get_session(projectId: int, sessionId: Union[int, str], background_tas
|
|||
}
|
||||
|
||||
|
||||
@app.get('/{projectId}/sessions/{sessionId}/replay', tags=["sessions", "replay"],
|
||||
dependencies=[OR_scope(Permissions.session_replay)])
|
||||
async def get_session_events(projectId: int, sessionId: Union[int, str], background_tasks: BackgroundTasks,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
if isinstance(sessionId, str):
|
||||
return {"errors": ["session not found"]}
|
||||
data = sessions_replay.get_replay(project_id=projectId, session_id=sessionId, full_data=True,
|
||||
include_fav_viewed=True, group_metadata=True, context=context)
|
||||
if data is None:
|
||||
return {"errors": ["session not found"]}
|
||||
if data.get("inDB"):
|
||||
background_tasks.add_task(sessions_viewed.view_session, project_id=projectId, user_id=context.user_id,
|
||||
session_id=sessionId)
|
||||
return {
|
||||
'data': data
|
||||
}
|
||||
|
||||
|
||||
@app.get('/{projectId}/sessions/{sessionId}/events', tags=["sessions", "replay"],
|
||||
dependencies=[OR_scope(Permissions.session_replay)])
|
||||
async def get_session_events(projectId: int, sessionId: Union[int, str],
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
if isinstance(sessionId, str):
|
||||
return {"errors": ["session not found"]}
|
||||
data = sessions_replay.get_events(project_id=projectId, session_id=sessionId)
|
||||
if data is None:
|
||||
return {"errors": ["session not found"]}
|
||||
|
||||
return {
|
||||
'data': data
|
||||
}
|
||||
|
||||
|
||||
@app.get('/{projectId}/sessions/{sessionId}/errors/{errorId}/sourcemaps', tags=["sessions", "sourcemaps"],
|
||||
dependencies=[OR_scope(Permissions.dev_tools)])
|
||||
async def get_error_trace(projectId: int, sessionId: int, errorId: str,
|
||||
|
|
@ -250,8 +286,8 @@ async def get_live_session(projectId: int, sessionId: str, background_tasks: Bac
|
|||
context: schemas_ee.CurrentContext = Depends(OR_context)):
|
||||
data = assist.get_live_session_by_id(project_id=projectId, session_id=sessionId)
|
||||
if data is None:
|
||||
data = sessions.get_by_id2_pg(context=context, project_id=projectId, session_id=sessionId,
|
||||
full_data=True, include_fav_viewed=True, group_metadata=True, live=False)
|
||||
data = sessions_replay.get_replay(context=context, project_id=projectId, session_id=sessionId,
|
||||
full_data=True, include_fav_viewed=True, group_metadata=True, live=False)
|
||||
if data is None:
|
||||
return {"errors": ["session not found"]}
|
||||
if data.get("inDB"):
|
||||
|
|
|
|||
|
|
@ -1,3 +0,0 @@
|
|||
#!/bin/zsh
|
||||
|
||||
uvicorn app:app --reload
|
||||
|
|
@ -1,12 +1,12 @@
|
|||
{
|
||||
"name": "assist-server",
|
||||
"version": "1.0.0",
|
||||
"version": "v1.11.0-ee",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "assist-server",
|
||||
"version": "1.0.0",
|
||||
"version": "v1.11.0-ee",
|
||||
"license": "Elastic License 2.0 (ELv2)",
|
||||
"dependencies": {
|
||||
"@maxmind/geoip2-node": "^3.5.0",
|
||||
|
|
@ -38,9 +38,9 @@
|
|||
}
|
||||
},
|
||||
"node_modules/@redis/client": {
|
||||
"version": "1.5.5",
|
||||
"resolved": "https://registry.npmjs.org/@redis/client/-/client-1.5.5.tgz",
|
||||
"integrity": "sha512-fuMnpDYSjT5JXR9rrCW1YWA4L8N/9/uS4ImT3ZEC/hcaQRI1D/9FvwjriRj1UvepIgzZXthFVKMNRzP/LNL7BQ==",
|
||||
"version": "1.5.6",
|
||||
"resolved": "https://registry.npmjs.org/@redis/client/-/client-1.5.6.tgz",
|
||||
"integrity": "sha512-dFD1S6je+A47Lj22jN/upVU2fj4huR7S9APd7/ziUXsIXDL+11GPYti4Suv5y8FuXaN+0ZG4JF+y1houEJ7ToA==",
|
||||
"dependencies": {
|
||||
"cluster-key-slot": "1.1.2",
|
||||
"generic-pool": "3.9.0",
|
||||
|
|
@ -67,9 +67,9 @@
|
|||
}
|
||||
},
|
||||
"node_modules/@redis/search": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/@redis/search/-/search-1.1.1.tgz",
|
||||
"integrity": "sha512-pqCXTc5e7wJJgUuJiC3hBgfoFRoPxYzwn0BEfKgejTM7M/9zP3IpUcqcjgfp8hF+LoV8rHZzcNTz7V+pEIY7LQ==",
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/@redis/search/-/search-1.1.2.tgz",
|
||||
"integrity": "sha512-/cMfstG/fOh/SsE+4/BQGeuH/JJloeWuH+qJzM8dbxuWvdWibWAOAHHCZTMPhV3xIlH4/cUEIA8OV5QnYpaVoA==",
|
||||
"peerDependencies": {
|
||||
"@redis/client": "^1.0.0"
|
||||
}
|
||||
|
|
@ -117,9 +117,9 @@
|
|||
}
|
||||
},
|
||||
"node_modules/@types/node": {
|
||||
"version": "18.14.1",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.14.1.tgz",
|
||||
"integrity": "sha512-QH+37Qds3E0eDlReeboBxfHbX9omAcBCXEzswCu6jySP642jiM3cYSIkU/REqwhCUqXdonHFuBfJDiAJxMNhaQ=="
|
||||
"version": "18.15.1",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.15.1.tgz",
|
||||
"integrity": "sha512-U2TWca8AeHSmbpi314QBESRk7oPjSZjDsR+c+H4ECC1l+kFgpZf8Ydhv3SJpPy51VyZHHqxlb6mTTqYNNRVAIw=="
|
||||
},
|
||||
"node_modules/accepts": {
|
||||
"version": "1.3.8",
|
||||
|
|
@ -878,15 +878,15 @@
|
|||
}
|
||||
},
|
||||
"node_modules/redis": {
|
||||
"version": "4.6.4",
|
||||
"resolved": "https://registry.npmjs.org/redis/-/redis-4.6.4.tgz",
|
||||
"integrity": "sha512-wi2tgDdQ+Q8q+PR5FLRx4QvDiWaA+PoJbrzsyFqlClN5R4LplHqN3scs/aGjE//mbz++W19SgxiEnQ27jnCRaA==",
|
||||
"version": "4.6.5",
|
||||
"resolved": "https://registry.npmjs.org/redis/-/redis-4.6.5.tgz",
|
||||
"integrity": "sha512-O0OWA36gDQbswOdUuAhRL6mTZpHFN525HlgZgDaVNgCJIAZR3ya06NTESb0R+TUZ+BFaDpz6NnnVvoMx9meUFg==",
|
||||
"dependencies": {
|
||||
"@redis/bloom": "1.2.0",
|
||||
"@redis/client": "1.5.5",
|
||||
"@redis/client": "1.5.6",
|
||||
"@redis/graph": "1.1.0",
|
||||
"@redis/json": "1.0.4",
|
||||
"@redis/search": "1.1.1",
|
||||
"@redis/search": "1.1.2",
|
||||
"@redis/time-series": "1.0.4"
|
||||
}
|
||||
},
|
||||
|
|
@ -1085,9 +1085,9 @@
|
|||
}
|
||||
},
|
||||
"node_modules/ua-parser-js": {
|
||||
"version": "1.0.33",
|
||||
"resolved": "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-1.0.33.tgz",
|
||||
"integrity": "sha512-RqshF7TPTE0XLYAqmjlu5cLLuGdKrNu9O1KLA/qp39QtbZwuzwv1dT46DZSopoUMsYgXpB3Cv8a03FI8b74oFQ==",
|
||||
"version": "1.0.34",
|
||||
"resolved": "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-1.0.34.tgz",
|
||||
"integrity": "sha512-K9mwJm/DaB6mRLZfw6q8IMXipcrmuT6yfhYmwhAkuh+81sChuYstYA+znlgaflUPaYUa3odxKPKGw6Vw/lANew==",
|
||||
"funding": [
|
||||
{
|
||||
"type": "opencollective",
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "assist-server",
|
||||
"version": "1.0.0",
|
||||
"version": "v1.11.0-ee",
|
||||
"description": "assist server to get live sessions & sourcemaps reader to get stack trace",
|
||||
"main": "peerjs-server.js",
|
||||
"scripts": {
|
||||
|
|
@ -1,2 +1,2 @@
|
|||
#!/bin/bash
|
||||
rsync -avr --exclude=".*" --exclude="node_modules" --ignore-existing ../../utilities/* ./
|
||||
rsync -avr --exclude=".*" --exclude="node_modules" --ignore-existing ../../assist/* ./
|
||||
|
|
@ -1,6 +1,7 @@
|
|||
const dumps = require('./utils/HeapSnapshot');
|
||||
const {request_logger} = require('./utils/helper');
|
||||
const express = require('express');
|
||||
const health = require("./utils/health");
|
||||
const assert = require('assert').strict;
|
||||
|
||||
let socket;
|
||||
|
|
@ -14,7 +15,7 @@ const HOST = process.env.LISTEN_HOST || '0.0.0.0';
|
|||
const PORT = process.env.LISTEN_PORT || 9001;
|
||||
assert.ok(process.env.ASSIST_KEY, 'The "ASSIST_KEY" environment variable is required');
|
||||
const P_KEY = process.env.ASSIST_KEY;
|
||||
const PREFIX = process.env.PREFIX || process.env.prefix || `/assist`
|
||||
const PREFIX = process.env.PREFIX || process.env.prefix || `/assist`;
|
||||
|
||||
let debug = process.env.debug === "1";
|
||||
const heapdump = process.env.heapdump === "1";
|
||||
|
|
@ -31,18 +32,11 @@ if (process.env.uws !== "true") {
|
|||
);
|
||||
heapdump && wsapp.use(`${PREFIX}/${P_KEY}/heapdump`, dumps.router);
|
||||
wsapp.use(`${PREFIX}/${P_KEY}`, socket.wsRouter);
|
||||
wsapp.get('/private/shutdown', (req, res) => {
|
||||
console.log("Requested shutdown");
|
||||
res.statusCode = 200;
|
||||
res.end("ok!");
|
||||
process.kill(1, "SIGTERM");
|
||||
}
|
||||
);
|
||||
|
||||
wsapp.enable('trust proxy');
|
||||
const wsserver = wsapp.listen(PORT, HOST, () => {
|
||||
console.log(`WS App listening on http://${HOST}:${PORT}`);
|
||||
console.log('Press Ctrl+C to quit.');
|
||||
health.healthApp.listen(health.PORT, HOST, health.listen_cb);
|
||||
});
|
||||
|
||||
socket.start(wsserver);
|
||||
|
|
@ -102,13 +96,6 @@ if (process.env.uws !== "true") {
|
|||
uapp.post(`${PREFIX}/${P_KEY}/sockets-live/:projectKey`, uWrapper(socket.handlers.socketsLiveByProject));
|
||||
uapp.get(`${PREFIX}/${P_KEY}/sockets-live/:projectKey/:sessionId`, uWrapper(socket.handlers.socketsLiveByProject));
|
||||
|
||||
uapp.get('/private/shutdown', (res, req) => {
|
||||
console.log("Requested shutdown");
|
||||
res.writeStatus('200 OK').end("ok!");
|
||||
process.kill(1, "SIGTERM");
|
||||
}
|
||||
);
|
||||
|
||||
socket.start(uapp);
|
||||
|
||||
uapp.listen(HOST, PORT, (token) => {
|
||||
|
|
@ -116,7 +103,7 @@ if (process.env.uws !== "true") {
|
|||
console.warn("port already in use");
|
||||
}
|
||||
console.log(`WS App listening on http://${HOST}:${PORT}`);
|
||||
console.log('Press Ctrl+C to quit.');
|
||||
health.healthApp.listen(health.PORT, HOST, health.listen_cb);
|
||||
});
|
||||
|
||||
|
||||
|
|
@ -34,7 +34,7 @@ const debug = process.env.debug === "1";
|
|||
const createSocketIOServer = function (server, prefix) {
|
||||
if (process.env.uws !== "true") {
|
||||
io = _io(server, {
|
||||
maxHttpBufferSize: (parseInt(process.env.maxHttpBufferSize) || 5) * 1e6,
|
||||
maxHttpBufferSize: (parseFloat(process.env.maxHttpBufferSize) || 5) * 1e6,
|
||||
cors: {
|
||||
origin: "*",
|
||||
methods: ["GET", "POST", "PUT"]
|
||||
|
|
@ -43,7 +43,7 @@ const createSocketIOServer = function (server, prefix) {
|
|||
});
|
||||
} else {
|
||||
io = new _io.Server({
|
||||
maxHttpBufferSize: (parseInt(process.env.maxHttpBufferSize) || 5) * 1e6,
|
||||
maxHttpBufferSize: (parseFloat(process.env.maxHttpBufferSize) || 5) * 1e6,
|
||||
cors: {
|
||||
origin: "*",
|
||||
methods: ["GET", "POST", "PUT"]
|
||||
|
|
@ -83,6 +83,22 @@ const respond = function (res, data) {
|
|||
}
|
||||
}
|
||||
|
||||
const countSessions = async function () {
|
||||
let count = 0;
|
||||
try {
|
||||
let rooms = await io.of('/').adapter.allRooms();
|
||||
for (let i of rooms) {
|
||||
let {projectKey, sessionId} = extractPeerId(i);
|
||||
if (projectKey !== undefined && sessionId !== undefined) {
|
||||
count++;
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
const socketsList = async function (req, res) {
|
||||
debug && console.log("[WS]looking for all available sessions");
|
||||
let filters = await extractPayloadFromRequest(req, res);
|
||||
|
|
@ -417,6 +433,7 @@ module.exports = {
|
|||
process.exit(2);
|
||||
});
|
||||
},
|
||||
countSessions,
|
||||
handlers: {
|
||||
socketsList,
|
||||
socketsListByProject,
|
||||
|
|
@ -29,7 +29,7 @@ const debug = process.env.debug === "1";
|
|||
const createSocketIOServer = function (server, prefix) {
|
||||
if (process.env.uws !== "true") {
|
||||
io = _io(server, {
|
||||
maxHttpBufferSize: (parseInt(process.env.maxHttpBufferSize) || 5) * 1e6,
|
||||
maxHttpBufferSize: (parseFloat(process.env.maxHttpBufferSize) || 5) * 1e6,
|
||||
cors: {
|
||||
origin: "*",
|
||||
methods: ["GET", "POST", "PUT"]
|
||||
|
|
@ -38,7 +38,7 @@ const createSocketIOServer = function (server, prefix) {
|
|||
});
|
||||
} else {
|
||||
io = new _io.Server({
|
||||
maxHttpBufferSize: (parseInt(process.env.maxHttpBufferSize) || 5) * 1e6,
|
||||
maxHttpBufferSize: (parseFloat(process.env.maxHttpBufferSize) || 5) * 1e6,
|
||||
cors: {
|
||||
origin: "*",
|
||||
methods: ["GET", "POST", "PUT"]
|
||||
|
|
@ -66,6 +66,23 @@ const respond = function (res, data) {
|
|||
}
|
||||
}
|
||||
|
||||
const countSessions = async function () {
|
||||
let count = 0;
|
||||
try {
|
||||
const arr = Array.from(io.sockets.adapter.rooms);
|
||||
const filtered = arr.filter(room => !room[1].has(room[0]));
|
||||
for (let i of filtered) {
|
||||
let {projectKey, sessionId} = extractPeerId(i[0]);
|
||||
if (projectKey !== null && sessionId !== null) {
|
||||
count++;
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
const socketsList = async function (req, res) {
|
||||
debug && console.log("[WS]looking for all available sessions");
|
||||
let filters = await extractPayloadFromRequest(req, res);
|
||||
|
|
@ -379,6 +396,7 @@ module.exports = {
|
|||
|
||||
socketConnexionTimeout(io);
|
||||
},
|
||||
countSessions,
|
||||
handlers: {
|
||||
socketsList,
|
||||
socketsListByProject,
|
||||
61
ee/assist/utils/health.js
Normal file
61
ee/assist/utils/health.js
Normal file
|
|
@ -0,0 +1,61 @@
|
|||
const express = require('express');
|
||||
let socket;
|
||||
if (process.env.redis === "true") {
|
||||
socket = require("../servers/websocket-cluster");
|
||||
} else {
|
||||
socket = require("../servers/websocket");
|
||||
}
|
||||
const HOST = process.env.LISTEN_HOST || '0.0.0.0';
|
||||
const PORT = process.env.HEALTH_PORT || 8888;
|
||||
|
||||
|
||||
const {request_logger} = require("./helper");
|
||||
const debug = process.env.debug === "1";
|
||||
const respond = function (res, data) {
|
||||
res.statusCode = 200;
|
||||
res.setHeader('Content-Type', 'application/json');
|
||||
res.end(JSON.stringify({"data": data}));
|
||||
}
|
||||
|
||||
const check_health = async function (req, res) {
|
||||
debug && console.log("[WS]looking for all available sessions");
|
||||
respond(res, {
|
||||
"health": true,
|
||||
"details": {
|
||||
"version": process.env.npm_package_version,
|
||||
"connectedSessions": await socket.countSessions(),
|
||||
"uWebSocket": process.env.uws === "true",
|
||||
"redis": process.env.redis === "true"
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
const healthApp = express();
|
||||
healthApp.use(express.json());
|
||||
healthApp.use(express.urlencoded({extended: true}));
|
||||
healthApp.use(request_logger("[healthApp]"));
|
||||
healthApp.get(['/'], (req, res) => {
|
||||
res.statusCode = 200;
|
||||
res.end("healthApp ok!");
|
||||
}
|
||||
);
|
||||
healthApp.get('/health', check_health);
|
||||
healthApp.get('/shutdown', (req, res) => {
|
||||
console.log("Requested shutdown");
|
||||
res.statusCode = 200;
|
||||
res.end("ok!");
|
||||
process.kill(1, "SIGTERM");
|
||||
}
|
||||
);
|
||||
|
||||
const listen_cb = async function () {
|
||||
console.log(`Health App listening on http://${HOST}:${PORT}`);
|
||||
console.log('Press Ctrl+C to quit.');
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
healthApp,
|
||||
PORT,
|
||||
listen_cb
|
||||
};
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
CREATE OR REPLACE FUNCTION openreplay_version AS() -> 'v1.11.0-ee';
|
||||
|
||||
ALTER TABLE experimental.events
|
||||
MODIFY COLUMN issue_type Nullable(Enum8('click_rage'=1,'dead_click'=2,'excessive_scrolling'=3,'bad_request'=4,'missing_resource'=5,'memory'=6,'cpu'=7,'slow_resource'=8,'slow_page_load'=9,'crash'=10,'ml_cpu'=11,'ml_memory'=12,'ml_dead_click'=13,'ml_click_rage'=14,'ml_mouse_thrashing'=15,'ml_excessive_scrolling'=16,'ml_slow_resources'=17,'custom'=18,'js_exception'=19,'mouse_thrashing'=20));
|
||||
|
||||
ALTER TABLE experimental.issues
|
||||
MODIFY COLUMN type Enum8('click_rage'=1,'dead_click'=2,'excessive_scrolling'=3,'bad_request'=4,'missing_resource'=5,'memory'=6,'cpu'=7,'slow_resource'=8,'slow_page_load'=9,'crash'=10,'ml_cpu'=11,'ml_memory'=12,'ml_dead_click'=13,'ml_click_rage'=14,'ml_mouse_thrashing'=15,'ml_excessive_scrolling'=16,'ml_slow_resources'=17,'custom'=18,'js_exception'=19,'mouse_thrashing'=20);
|
||||
|
||||
|
|
@ -1,3 +1,4 @@
|
|||
CREATE OR REPLACE FUNCTION openreplay_version AS() -> 'v1.11.0-ee';
|
||||
CREATE DATABASE IF NOT EXISTS experimental;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS experimental.autocomplete
|
||||
|
|
@ -78,7 +79,7 @@ CREATE TABLE IF NOT EXISTS experimental.events
|
|||
success Nullable(UInt8),
|
||||
request_body Nullable(String),
|
||||
response_body Nullable(String),
|
||||
issue_type Nullable(Enum8('click_rage'=1,'dead_click'=2,'excessive_scrolling'=3,'bad_request'=4,'missing_resource'=5,'memory'=6,'cpu'=7,'slow_resource'=8,'slow_page_load'=9,'crash'=10,'ml_cpu'=11,'ml_memory'=12,'ml_dead_click'=13,'ml_click_rage'=14,'ml_mouse_thrashing'=15,'ml_excessive_scrolling'=16,'ml_slow_resources'=17,'custom'=18,'js_exception'=19)),
|
||||
issue_type Nullable(Enum8('click_rage'=1,'dead_click'=2,'excessive_scrolling'=3,'bad_request'=4,'missing_resource'=5,'memory'=6,'cpu'=7,'slow_resource'=8,'slow_page_load'=9,'crash'=10,'ml_cpu'=11,'ml_memory'=12,'ml_dead_click'=13,'ml_click_rage'=14,'ml_mouse_thrashing'=15,'ml_excessive_scrolling'=16,'ml_slow_resources'=17,'custom'=18,'js_exception'=19,'mouse_thrashing'=20)),
|
||||
issue_id Nullable(String),
|
||||
error_tags_keys Array(String),
|
||||
error_tags_values Array(Nullable(String)),
|
||||
|
|
@ -200,7 +201,7 @@ CREATE TABLE IF NOT EXISTS experimental.issues
|
|||
(
|
||||
project_id UInt16,
|
||||
issue_id String,
|
||||
type Enum8('click_rage'=1,'dead_click'=2,'excessive_scrolling'=3,'bad_request'=4,'missing_resource'=5,'memory'=6,'cpu'=7,'slow_resource'=8,'slow_page_load'=9,'crash'=10,'ml_cpu'=11,'ml_memory'=12,'ml_dead_click'=13,'ml_click_rage'=14,'ml_mouse_thrashing'=15,'ml_excessive_scrolling'=16,'ml_slow_resources'=17,'custom'=18,'js_exception'=19),
|
||||
type Enum8('click_rage'=1,'dead_click'=2,'excessive_scrolling'=3,'bad_request'=4,'missing_resource'=5,'memory'=6,'cpu'=7,'slow_resource'=8,'slow_page_load'=9,'crash'=10,'ml_cpu'=11,'ml_memory'=12,'ml_dead_click'=13,'ml_click_rage'=14,'ml_mouse_thrashing'=15,'ml_excessive_scrolling'=16,'ml_slow_resources'=17,'custom'=18,'js_exception'=19,'mouse_thrashing'=20),
|
||||
context_string String,
|
||||
context_keys Array(String),
|
||||
context_values Array(Nullable(String)),
|
||||
|
|
|
|||
42
ee/scripts/schema/db/init_dbs/postgresql/1.11.0/1.11.0.sql
Normal file
42
ee/scripts/schema/db/init_dbs/postgresql/1.11.0/1.11.0.sql
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
DO
|
||||
$$
|
||||
DECLARE
|
||||
previous_version CONSTANT text := 'v1.10.0-ee';
|
||||
next_version CONSTANT text := 'v1.11.0-ee';
|
||||
BEGIN
|
||||
IF (SELECT openreplay_version()) = previous_version THEN
|
||||
raise notice 'valid previous DB version';
|
||||
ELSEIF (SELECT openreplay_version()) = next_version THEN
|
||||
raise notice 'new version detected, nothing to do';
|
||||
ELSE
|
||||
RAISE EXCEPTION 'upgrade to % failed, invalid previous version, expected %, got %', next_version,previous_version,(SELECT openreplay_version());
|
||||
END IF;
|
||||
END ;
|
||||
$$
|
||||
LANGUAGE plpgsql;
|
||||
|
||||
BEGIN;
|
||||
CREATE OR REPLACE FUNCTION openreplay_version()
|
||||
RETURNS text AS
|
||||
$$
|
||||
SELECT 'v1.11.0-ee'
|
||||
$$ LANGUAGE sql IMMUTABLE;
|
||||
|
||||
ALTER TABLE events.inputs
|
||||
ADD COLUMN duration integer NULL,
|
||||
ADD COLUMN hesitation integer NULL;
|
||||
|
||||
ALTER TABLE public.projects
|
||||
ALTER COLUMN gdpr SET DEFAULT '{
|
||||
"maskEmails": true,
|
||||
"sampleRate": 33,
|
||||
"maskNumbers": false,
|
||||
"defaultInputMode": "obscured"
|
||||
}'::jsonb;
|
||||
|
||||
ALTER TYPE issue_type ADD VALUE IF NOT EXISTS 'mouse_thrashing';
|
||||
|
||||
ALTER TABLE events.clicks
|
||||
ADD COLUMN hesitation integer NULL;
|
||||
|
||||
COMMIT;
|
||||
|
|
@ -253,7 +253,7 @@ $$
|
|||
"maskEmails": true,
|
||||
"sampleRate": 33,
|
||||
"maskNumbers": false,
|
||||
"defaultInputMode": "plain"
|
||||
"defaultInputMode": "obscured"
|
||||
}'::jsonb,
|
||||
first_recorded_session_at timestamp without time zone NULL DEFAULT NULL,
|
||||
sessions_last_check_at timestamp without time zone NULL DEFAULT NULL,
|
||||
|
|
@ -947,13 +947,14 @@ $$
|
|||
|
||||
CREATE TABLE IF NOT EXISTS events.clicks
|
||||
(
|
||||
session_id bigint NOT NULL REFERENCES sessions (session_id) ON DELETE CASCADE,
|
||||
message_id bigint NOT NULL,
|
||||
timestamp bigint NOT NULL,
|
||||
label text DEFAULT NULL,
|
||||
url text DEFAULT '' NOT NULL,
|
||||
session_id bigint NOT NULL REFERENCES sessions (session_id) ON DELETE CASCADE,
|
||||
message_id bigint NOT NULL,
|
||||
timestamp bigint NOT NULL,
|
||||
label text DEFAULT NULL,
|
||||
url text DEFAULT '' NOT NULL,
|
||||
path text,
|
||||
selector text DEFAULT '' NOT NULL,
|
||||
selector text DEFAULT '' NOT NULL,
|
||||
hesitation integer DEFAULT NULL,
|
||||
PRIMARY KEY (session_id, message_id)
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS clicks_session_id_idx ON events.clicks (session_id);
|
||||
|
|
@ -974,8 +975,10 @@ $$
|
|||
session_id bigint NOT NULL REFERENCES sessions (session_id) ON DELETE CASCADE,
|
||||
message_id bigint NOT NULL,
|
||||
timestamp bigint NOT NULL,
|
||||
label text DEFAULT NULL,
|
||||
value text DEFAULT NULL,
|
||||
label text DEFAULT NULL,
|
||||
value text DEFAULT NULL,
|
||||
duration integer DEFAULT NULL,
|
||||
hesitation integer DEFAULT NULL,
|
||||
PRIMARY KEY (session_id, message_id)
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS inputs_session_id_idx ON events.inputs (session_id);
|
||||
|
|
|
|||
|
|
@ -41,7 +41,8 @@ function build_api(){
|
|||
}
|
||||
cp -R ../peers ../${destination}
|
||||
cd ../${destination}
|
||||
cp -R ../utilities/utils .
|
||||
cp -R ../assist/utils .
|
||||
cp ../sourcemap-reader/utils/health.js ./utils/.
|
||||
# Copy enterprise code
|
||||
[[ $1 == "ee" ]] && {
|
||||
cp -rf ../ee/peers/* ./
|
||||
|
|
|
|||
3
peers/clean-dev.sh
Executable file
3
peers/clean-dev.sh
Executable file
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/bash
|
||||
|
||||
rm -rf ./utils
|
||||
121
peers/package-lock.json
generated
121
peers/package-lock.json
generated
|
|
@ -1,16 +1,16 @@
|
|||
{
|
||||
"name": "peers-server",
|
||||
"version": "1.0.0",
|
||||
"version": "v1.11.0",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "peers-server",
|
||||
"version": "1.0.0",
|
||||
"version": "v1.11.0",
|
||||
"license": "Elastic License 2.0 (ELv2)",
|
||||
"dependencies": {
|
||||
"express": "^4.18.2",
|
||||
"peer": "^v1.0.0-rc.9"
|
||||
"peer": "^v1.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/body-parser": {
|
||||
|
|
@ -57,9 +57,9 @@
|
|||
"integrity": "sha512-Y4XFY5VJAuw0FgAqPNd6NNoV44jbq9Bz2L7Rh/J6jLTiHBSBJa9fxqQIvkIld4GsoDOcCbvzOUAbLPsSKKg+uA=="
|
||||
},
|
||||
"node_modules/@types/node": {
|
||||
"version": "18.13.0",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.13.0.tgz",
|
||||
"integrity": "sha512-gC3TazRzGoOnoKAhUx+Q0t8S9Tzs74z7m0ipwGpSqQrleP14hKxP4/JUeEQcD3W1/aIpnWl8pHowI7WokuZpXg=="
|
||||
"version": "18.15.5",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.15.5.tgz",
|
||||
"integrity": "sha512-Ark2WDjjZO7GmvsyFFf81MXuGTA/d6oP38anyxWOL6EREyBKAxKoFHwBhaZxCfLRLpO8JgVXwqOwSwa7jRcjew=="
|
||||
},
|
||||
"node_modules/@types/qs": {
|
||||
"version": "6.9.7",
|
||||
|
|
@ -72,9 +72,9 @@
|
|||
"integrity": "sha512-EEhsLsD6UsDM1yFhAvy0Cjr6VwmpMWqFBCb9w07wVugF7w9nfajxLuVmngTIpgS6svCnm6Vaw+MZhoDCKnOfsw=="
|
||||
},
|
||||
"node_modules/@types/serve-static": {
|
||||
"version": "1.15.0",
|
||||
"resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.15.0.tgz",
|
||||
"integrity": "sha512-z5xyF6uh8CbjAu9760KDKsH2FcDxZ2tFCsA4HIMWE6IkiYMXfVoa+4f9KX+FN0ZLsaMw1WNG2ETLA6N+/YA+cg==",
|
||||
"version": "1.15.1",
|
||||
"resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.15.1.tgz",
|
||||
"integrity": "sha512-NUo5XNiAdULrJENtJXZZ3fHtfMolzZwczzBbnAeBbqBwG+LaG6YaJtuwzwGSQZ2wsCrxjEhNNjAkKigy3n8teQ==",
|
||||
"dependencies": {
|
||||
"@types/mime": "*",
|
||||
"@types/node": "*"
|
||||
|
|
@ -243,6 +243,14 @@
|
|||
"node": ">= 0.10"
|
||||
}
|
||||
},
|
||||
"node_modules/data-uri-to-buffer": {
|
||||
"version": "4.0.1",
|
||||
"resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-4.0.1.tgz",
|
||||
"integrity": "sha512-0R9ikRb668HB7QDxT1vkpuUBtqc53YyAwMwGeUFKRojY/NWKvdZ+9UYtRfGmhqNbRkTSVpMbmyhXipFFv2cb/A==",
|
||||
"engines": {
|
||||
"node": ">= 12"
|
||||
}
|
||||
},
|
||||
"node_modules/debug": {
|
||||
"version": "2.6.9",
|
||||
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
|
||||
|
|
@ -348,6 +356,28 @@
|
|||
"node": ">= 0.10.0"
|
||||
}
|
||||
},
|
||||
"node_modules/fetch-blob": {
|
||||
"version": "3.2.0",
|
||||
"resolved": "https://registry.npmjs.org/fetch-blob/-/fetch-blob-3.2.0.tgz",
|
||||
"integrity": "sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ==",
|
||||
"funding": [
|
||||
{
|
||||
"type": "github",
|
||||
"url": "https://github.com/sponsors/jimmywarting"
|
||||
},
|
||||
{
|
||||
"type": "paypal",
|
||||
"url": "https://paypal.me/jimmywarting"
|
||||
}
|
||||
],
|
||||
"dependencies": {
|
||||
"node-domexception": "^1.0.0",
|
||||
"web-streams-polyfill": "^3.0.3"
|
||||
},
|
||||
"engines": {
|
||||
"node": "^12.20 || >= 14.13"
|
||||
}
|
||||
},
|
||||
"node_modules/finalhandler": {
|
||||
"version": "1.2.0",
|
||||
"resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.2.0.tgz",
|
||||
|
|
@ -365,6 +395,17 @@
|
|||
"node": ">= 0.8"
|
||||
}
|
||||
},
|
||||
"node_modules/formdata-polyfill": {
|
||||
"version": "4.0.10",
|
||||
"resolved": "https://registry.npmjs.org/formdata-polyfill/-/formdata-polyfill-4.0.10.tgz",
|
||||
"integrity": "sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==",
|
||||
"dependencies": {
|
||||
"fetch-blob": "^3.1.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=12.20.0"
|
||||
}
|
||||
},
|
||||
"node_modules/forwarded": {
|
||||
"version": "0.2.0",
|
||||
"resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz",
|
||||
|
|
@ -540,6 +581,41 @@
|
|||
"node": ">= 0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/node-domexception": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz",
|
||||
"integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==",
|
||||
"funding": [
|
||||
{
|
||||
"type": "github",
|
||||
"url": "https://github.com/sponsors/jimmywarting"
|
||||
},
|
||||
{
|
||||
"type": "github",
|
||||
"url": "https://paypal.me/jimmywarting"
|
||||
}
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=10.5.0"
|
||||
}
|
||||
},
|
||||
"node_modules/node-fetch": {
|
||||
"version": "3.3.1",
|
||||
"resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-3.3.1.tgz",
|
||||
"integrity": "sha512-cRVc/kyto/7E5shrWca1Wsea4y6tL9iYJE5FBCius3JQfb/4P4I295PfhgbJQBLTx6lATE4z+wK0rPM4VS2uow==",
|
||||
"dependencies": {
|
||||
"data-uri-to-buffer": "^4.0.0",
|
||||
"fetch-blob": "^3.1.4",
|
||||
"formdata-polyfill": "^4.0.10"
|
||||
},
|
||||
"engines": {
|
||||
"node": "^12.20.0 || ^14.13.1 || >=16.0.0"
|
||||
},
|
||||
"funding": {
|
||||
"type": "opencollective",
|
||||
"url": "https://opencollective.com/node-fetch"
|
||||
}
|
||||
},
|
||||
"node_modules/object-assign": {
|
||||
"version": "4.1.1",
|
||||
"resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
|
||||
|
|
@ -581,14 +657,15 @@
|
|||
"integrity": "sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ=="
|
||||
},
|
||||
"node_modules/peer": {
|
||||
"version": "1.0.0-rc.9",
|
||||
"resolved": "https://registry.npmjs.org/peer/-/peer-1.0.0-rc.9.tgz",
|
||||
"integrity": "sha512-wjt3fWMKxM/lH/1uD5Qs9qinQ1x/aa9br1eZEQuJ2wuBBQrjAcCT85MUuY9PYcyoW5ymyABsDKC3H/q9KmZ3PA==",
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/peer/-/peer-1.0.0.tgz",
|
||||
"integrity": "sha512-fPVtyCKZWVfjbf7XnY7MskhTlu+pBpMvQV81sngT8aXIuT5YF9y9bwIw8y5BlI98DV0NsDpLjow/oemFNvcKkg==",
|
||||
"dependencies": {
|
||||
"@types/express": "^4.17.3",
|
||||
"@types/ws": "^7.2.3 || ^8.0.0",
|
||||
"cors": "^2.8.5",
|
||||
"express": "^4.17.1",
|
||||
"node-fetch": "^3.3.0",
|
||||
"ws": "^7.2.3 || ^8.0.0",
|
||||
"yargs": "^17.6.2"
|
||||
},
|
||||
|
|
@ -819,6 +896,14 @@
|
|||
"node": ">= 0.8"
|
||||
}
|
||||
},
|
||||
"node_modules/web-streams-polyfill": {
|
||||
"version": "3.2.1",
|
||||
"resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.2.1.tgz",
|
||||
"integrity": "sha512-e0MO3wdXWKrLbL0DgGnUV7WHVuw9OUvL4hjgnPkIeEvESk74gAITi5G606JtZPp39cd8HA9VQzCIvA49LpPN5Q==",
|
||||
"engines": {
|
||||
"node": ">= 8"
|
||||
}
|
||||
},
|
||||
"node_modules/wrap-ansi": {
|
||||
"version": "7.0.0",
|
||||
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
|
||||
|
|
@ -836,9 +921,9 @@
|
|||
}
|
||||
},
|
||||
"node_modules/ws": {
|
||||
"version": "8.12.1",
|
||||
"resolved": "https://registry.npmjs.org/ws/-/ws-8.12.1.tgz",
|
||||
"integrity": "sha512-1qo+M9Ba+xNhPB+YTWUlK6M17brTut5EXbcBaMRN5pH5dFrXz7lzz1ChFSUq3bOUl8yEvSenhHmYUNJxFzdJew==",
|
||||
"version": "8.13.0",
|
||||
"resolved": "https://registry.npmjs.org/ws/-/ws-8.13.0.tgz",
|
||||
"integrity": "sha512-x9vcZYTrFPC7aSIbj7sRCYo7L/Xb8Iy+pW0ng0wt2vCJv7M9HOMy0UoN3rr+IFC7hb7vXoqS+P9ktyLLLhO+LA==",
|
||||
"engines": {
|
||||
"node": ">=10.0.0"
|
||||
},
|
||||
|
|
@ -864,9 +949,9 @@
|
|||
}
|
||||
},
|
||||
"node_modules/yargs": {
|
||||
"version": "17.6.2",
|
||||
"resolved": "https://registry.npmjs.org/yargs/-/yargs-17.6.2.tgz",
|
||||
"integrity": "sha512-1/9UrdHjDZc0eOU0HxOHoS78C69UD3JRMvzlJ7S79S2nTaWRA/whGCTV8o9e/N/1Va9YIV7Q4sOxD8VV4pCWOw==",
|
||||
"version": "17.7.1",
|
||||
"resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.1.tgz",
|
||||
"integrity": "sha512-cwiTb08Xuv5fqF4AovYacTFNxk62th7LKJ6BL9IGUpTJrWoU7/7WdQGTP2SjKf1dUNBGzDd28p/Yfs/GI6JrLw==",
|
||||
"dependencies": {
|
||||
"cliui": "^8.0.1",
|
||||
"escalade": "^3.1.1",
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "peers-server",
|
||||
"version": "1.0.0",
|
||||
"version": "v1.11.0",
|
||||
"description": "assist server to get live sessions & sourcemaps reader to get stack trace",
|
||||
"main": "peerjs-server.js",
|
||||
"scripts": {
|
||||
|
|
@ -19,6 +19,6 @@
|
|||
"homepage": "https://github.com/openreplay/openreplay#readme",
|
||||
"dependencies": {
|
||||
"express": "^4.18.2",
|
||||
"peer": "^v1.0.0-rc.9"
|
||||
"peer": "^v1.0.0"
|
||||
}
|
||||
}
|
||||
|
|
|
|||
3
peers/prepare-dev.sh
Executable file
3
peers/prepare-dev.sh
Executable file
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/bash
|
||||
rsync -avr --exclude=".*" --ignore-existing ../assist/utils ./
|
||||
cp ../sourcemap-reader/utils/health.js ./utils/.
|
||||
6
peers/run-dev.sh
Executable file
6
peers/run-dev.sh
Executable file
|
|
@ -0,0 +1,6 @@
|
|||
#!/bin/bash
|
||||
set -a
|
||||
source .env
|
||||
set +a
|
||||
|
||||
npm start
|
||||
|
|
@ -1,5 +1,6 @@
|
|||
const dumps = require('./utils/HeapSnapshot');
|
||||
const {request_logger} = require('./utils/helper');
|
||||
const health = require("./utils/health");
|
||||
const assert = require('assert').strict;
|
||||
const {peerRouter, peerConnection, peerDisconnect, peerError} = require('./servers/peerjs-server');
|
||||
const express = require('express');
|
||||
|
|
@ -44,10 +45,4 @@ process.on('uncaughtException', err => {
|
|||
// process.exit(1);
|
||||
});
|
||||
|
||||
app.get('/private/shutdown', (req, res) => {
|
||||
console.log("Requested shutdown");
|
||||
res.statusCode = 200;
|
||||
res.end("ok!");
|
||||
process.kill(1, "SIGTERM");
|
||||
}
|
||||
);
|
||||
health.healthApp.listen(health.PORT, HOST, health.listen_cb);
|
||||
|
|
@ -64,6 +64,7 @@ service:
|
|||
type: ClusterIP
|
||||
ports:
|
||||
socketio: 9001
|
||||
metrics: 8888
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
|
|
|
|||
|
|
@ -64,6 +64,7 @@ service:
|
|||
type: ClusterIP
|
||||
ports:
|
||||
peerjs: 9000
|
||||
metrics: 8888
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
|
|
|
|||
|
|
@ -48,6 +48,7 @@ service:
|
|||
type: ClusterIP
|
||||
ports:
|
||||
sourcemapreader: 9000
|
||||
metrics: 8888
|
||||
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
|
|
|
|||
42
scripts/schema/db/init_dbs/postgresql/1.11.0/1.11.0.sql
Normal file
42
scripts/schema/db/init_dbs/postgresql/1.11.0/1.11.0.sql
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
DO
|
||||
$$
|
||||
DECLARE
|
||||
previous_version CONSTANT text := 'v1.10.0';
|
||||
next_version CONSTANT text := 'v1.11.0';
|
||||
BEGIN
|
||||
IF (SELECT openreplay_version()) = previous_version THEN
|
||||
raise notice 'valid previous DB version';
|
||||
ELSEIF (SELECT openreplay_version()) = next_version THEN
|
||||
raise notice 'new version detected, nothing to do';
|
||||
ELSE
|
||||
RAISE EXCEPTION 'upgrade to % failed, invalid previous version, expected %, got %', next_version,previous_version,(SELECT openreplay_version());
|
||||
END IF;
|
||||
END ;
|
||||
$$
|
||||
LANGUAGE plpgsql;
|
||||
|
||||
BEGIN;
|
||||
CREATE OR REPLACE FUNCTION openreplay_version()
|
||||
RETURNS text AS
|
||||
$$
|
||||
SELECT 'v1.11.0'
|
||||
$$ LANGUAGE sql IMMUTABLE;
|
||||
|
||||
ALTER TABLE events.inputs
|
||||
ADD COLUMN duration integer NULL,
|
||||
ADD COLUMN hesitation integer NULL;
|
||||
|
||||
ALTER TABLE public.projects
|
||||
ALTER COLUMN gdpr SET DEFAULT '{
|
||||
"maskEmails": true,
|
||||
"sampleRate": 33,
|
||||
"maskNumbers": false,
|
||||
"defaultInputMode": "obscured"
|
||||
}'::jsonb;
|
||||
|
||||
ALTER TYPE issue_type ADD VALUE IF NOT EXISTS 'mouse_thrashing';
|
||||
|
||||
ALTER TABLE events.clicks
|
||||
ADD COLUMN hesitation integer NULL;
|
||||
|
||||
COMMIT;
|
||||
|
|
@ -190,7 +190,7 @@ $$
|
|||
"maskEmails": true,
|
||||
"sampleRate": 33,
|
||||
"maskNumbers": false,
|
||||
"defaultInputMode": "plain"
|
||||
"defaultInputMode": "obscured"
|
||||
}'::jsonb,
|
||||
first_recorded_session_at timestamp without time zone NULL DEFAULT NULL,
|
||||
sessions_last_check_at timestamp without time zone NULL DEFAULT NULL,
|
||||
|
|
@ -628,13 +628,14 @@ $$
|
|||
|
||||
CREATE TABLE events.clicks
|
||||
(
|
||||
session_id bigint NOT NULL REFERENCES sessions (session_id) ON DELETE CASCADE,
|
||||
message_id bigint NOT NULL,
|
||||
timestamp bigint NOT NULL,
|
||||
label text DEFAULT NULL,
|
||||
url text DEFAULT '' NOT NULL,
|
||||
session_id bigint NOT NULL REFERENCES sessions (session_id) ON DELETE CASCADE,
|
||||
message_id bigint NOT NULL,
|
||||
timestamp bigint NOT NULL,
|
||||
label text DEFAULT NULL,
|
||||
url text DEFAULT '' NOT NULL,
|
||||
path text,
|
||||
selector text DEFAULT '' NOT NULL,
|
||||
selector text DEFAULT '' NOT NULL,
|
||||
hesitation integer DEFAULT NULL,
|
||||
PRIMARY KEY (session_id, message_id)
|
||||
);
|
||||
CREATE INDEX clicks_session_id_idx ON events.clicks (session_id);
|
||||
|
|
@ -654,8 +655,10 @@ $$
|
|||
session_id bigint NOT NULL REFERENCES sessions (session_id) ON DELETE CASCADE,
|
||||
message_id bigint NOT NULL,
|
||||
timestamp bigint NOT NULL,
|
||||
label text DEFAULT NULL,
|
||||
value text DEFAULT NULL,
|
||||
label text DEFAULT NULL,
|
||||
value text DEFAULT NULL,
|
||||
duration integer DEFAULT NULL,
|
||||
hesitation integer DEFAULT NULL,
|
||||
PRIMARY KEY (session_id, message_id)
|
||||
);
|
||||
CREATE INDEX inputs_session_id_idx ON events.inputs (session_id);
|
||||
|
|
|
|||
5
sourcemap-reader/.gitignore
vendored
5
sourcemap-reader/.gitignore
vendored
|
|
@ -3,5 +3,8 @@ node_modules
|
|||
npm-debug.log
|
||||
.cache
|
||||
test.html
|
||||
/utils/
|
||||
/utils/assistHelper.js
|
||||
/utils/geoIP.js
|
||||
/utils/HeapSnapshot.js
|
||||
/utils/helper.js
|
||||
mappings.wasm
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ function build_api(){
|
|||
}
|
||||
cp -R ../sourcemap-reader ../${destination}
|
||||
cd ../${destination}
|
||||
cp -R ../utilities/utils .
|
||||
cp -R ../assist/utils .
|
||||
tag=""
|
||||
# Copy enterprise code
|
||||
[[ $1 == "ee" ]] && {
|
||||
|
|
|
|||
|
|
@ -1,3 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
rm -rf ./utils
|
||||
rm -rf ./utils/assistHelper.js
|
||||
rm -rf ./utils/geoIP.js
|
||||
rm -rf ./utils/HeapSnapshot.js
|
||||
rm -rf ./utils/helper.js
|
||||
10
sourcemap-reader/package-lock.json
generated
10
sourcemap-reader/package-lock.json
generated
|
|
@ -1,12 +1,12 @@
|
|||
{
|
||||
"name": "sourcemaps-reader",
|
||||
"version": "1.0.0",
|
||||
"version": "v1.11.0",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "sourcemaps-reader",
|
||||
"version": "1.0.0",
|
||||
"version": "v1.11.0",
|
||||
"license": "Elastic License 2.0 (ELv2)",
|
||||
"dependencies": {
|
||||
"aws-sdk": "^2.1314.0",
|
||||
|
|
@ -43,9 +43,9 @@
|
|||
}
|
||||
},
|
||||
"node_modules/aws-sdk": {
|
||||
"version": "2.1314.0",
|
||||
"resolved": "https://registry.npmjs.org/aws-sdk/-/aws-sdk-2.1314.0.tgz",
|
||||
"integrity": "sha512-2jsfvgtOQ6kRflaicn50ndME4YoIaBhlus/dZCExtWNXeu8ePh+eAtflsYs6aqIiRPKhCBLaqClzahWm7hC0XA==",
|
||||
"version": "2.1333.0",
|
||||
"resolved": "https://registry.npmjs.org/aws-sdk/-/aws-sdk-2.1333.0.tgz",
|
||||
"integrity": "sha512-MvOuleNeRryJtkCGXGEWDHPqqgxuqdi4/hGzJEpn9tnjsW9LNK8UgFPpYzUZ24ZO/3S+jiUh8DMMrL5nVGnagg==",
|
||||
"dependencies": {
|
||||
"buffer": "4.9.2",
|
||||
"events": "1.1.1",
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "sourcemaps-reader",
|
||||
"version": "1.0.0",
|
||||
"version": "v1.11.0",
|
||||
"description": "assist server to get live sessions & sourcemaps reader to get stack trace",
|
||||
"main": "peerjs-server.js",
|
||||
"scripts": {
|
||||
|
|
|
|||
|
|
@ -1,2 +1,2 @@
|
|||
#!/bin/bash
|
||||
rsync -avr --exclude=".*" --ignore-existing ../utilities/utils ./
|
||||
rsync -avr --exclude=".*" --ignore-existing ../assist/utils ./
|
||||
|
|
@ -1,11 +1,12 @@
|
|||
const dumps = require('./utils/HeapSnapshot');
|
||||
const sourcemapsReaderServer = require('./servers/sourcemaps-server');
|
||||
const express = require('express');
|
||||
const health = require("./utils/health");
|
||||
const {request_logger} = require("./utils/helper");
|
||||
|
||||
const HOST = process.env.SMR_HOST || '127.0.0.1';
|
||||
const PORT = process.env.SMR_PORT || 9000;
|
||||
const PREFIX = process.env.PREFIX || process.env.prefix || ''
|
||||
const PREFIX = process.env.PREFIX || process.env.prefix || '';
|
||||
const P_KEY = process.env.SMR_KEY || 'smr';
|
||||
const heapdump = process.env.heapdump === "1";
|
||||
|
||||
|
|
@ -21,14 +22,7 @@ heapdump && app.use(`${PREFIX}/${P_KEY}/heapdump`, dumps.router);
|
|||
|
||||
const server = app.listen(PORT, HOST, () => {
|
||||
console.log(`SR App listening on http://${HOST}:${PORT}`);
|
||||
console.log('Press Ctrl+C to quit.');
|
||||
health.healthApp.listen(health.PORT, HOST, health.listen_cb);
|
||||
});
|
||||
module.exports = {server};
|
||||
|
||||
app.get('/private/shutdown', (req, res) => {
|
||||
console.log("Requested shutdown");
|
||||
res.statusCode = 200;
|
||||
res.end("ok!");
|
||||
process.kill(1, "SIGTERM");
|
||||
}
|
||||
);
|
||||
module.exports = {server};
|
||||
52
sourcemap-reader/utils/health.js
Normal file
52
sourcemap-reader/utils/health.js
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
const express = require('express');
|
||||
const HOST = process.env.LISTEN_HOST || '0.0.0.0';
|
||||
const PORT = process.env.HEALTH_PORT || 8888;
|
||||
|
||||
|
||||
const {request_logger} = require("./helper");
|
||||
const debug = process.env.debug === "1";
|
||||
const respond = function (res, data) {
|
||||
res.statusCode = 200;
|
||||
res.setHeader('Content-Type', 'application/json');
|
||||
res.end(JSON.stringify({"data": data}));
|
||||
}
|
||||
|
||||
const check_health = async function (req, res) {
|
||||
debug && console.log("[WS]looking for all available sessions");
|
||||
respond(res, {
|
||||
"health": true,
|
||||
"details": {
|
||||
"version": process.env.npm_package_version
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
const healthApp = express();
|
||||
healthApp.use(express.json());
|
||||
healthApp.use(express.urlencoded({extended: true}));
|
||||
healthApp.use(request_logger("[healthApp]"));
|
||||
healthApp.get(['/'], (req, res) => {
|
||||
res.statusCode = 200;
|
||||
res.end("healthApp ok!");
|
||||
}
|
||||
);
|
||||
healthApp.get('/health', check_health);
|
||||
healthApp.get('/shutdown', (req, res) => {
|
||||
console.log("Requested shutdown");
|
||||
res.statusCode = 200;
|
||||
res.end("ok!");
|
||||
process.kill(1, "SIGTERM");
|
||||
}
|
||||
);
|
||||
|
||||
const listen_cb = async function () {
|
||||
console.log(`Health App listening on http://${HOST}:${PORT}`);
|
||||
console.log('Press Ctrl+C to quit.');
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
healthApp,
|
||||
PORT,
|
||||
listen_cb
|
||||
};
|
||||
Loading…
Add table
Reference in a new issue