Merge remote-tracking branch 'origin/api-v1.10.0' into dev

This commit is contained in:
Taha Yassine Kraiem 2023-02-17 15:33:59 +01:00
commit 81c380645e
15 changed files with 296 additions and 169 deletions

120
.github/workflows/assist-ee.yaml vendored Normal file
View file

@ -0,0 +1,120 @@
# This action will push the assist changes to aws
on:
workflow_dispatch:
push:
branches:
- dev
paths:
- "ee/utilities/**"
- "utilities/*/**"
- "!utilities/.gitignore"
- "!utilities/*-dev.sh"
name: Build and Deploy Assist EE
jobs:
deploy:
name: Deploy
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
# We need to diff with old commit
# to see which workers got changed.
fetch-depth: 2
- name: Docker login
run: |
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
- uses: azure/k8s-set-context@v1
with:
method: kubeconfig
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
id: setcontext
- name: Building and Pushing Assist image
id: build-image
env:
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
ENVIRONMENT: staging
run: |
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
cd utilities
PUSH_IMAGE=0 bash -x ./build.sh ee
[[ "x$skip_security_checks" == "xtrue" ]] || {
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
images=("assist")
for image in ${images[*]};do
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
done
err_code=$?
[[ $err_code -ne 0 ]] && {
exit $err_code
}
} && {
echo "Skipping Security Checks"
}
images=("assist")
for image in ${images[*]};do
docker push $DOCKER_REPO/$image:$IMAGE_TAG
done
- name: Creating old image input
run: |
#
# Create yaml with existing image tags
#
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
echo > /tmp/image_override.yaml
for line in `cat /tmp/image_tag.txt`;
do
image_array=($(echo "$line" | tr ':' '\n'))
cat <<EOF >> /tmp/image_override.yaml
${image_array[0]}:
image:
# We've to strip off the -ee, as helm will append it.
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
EOF
done
- name: Deploy to kubernetes
run: |
cd scripts/helmcharts/
## Update secerts
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml
# Update changed image tag
sed -i "/assist/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
cat /tmp/image_override.yaml
# Deploy command
mv openreplay/charts/{ingress-nginx,chalice,quickwit} /tmp
rm -rf openreplay/charts/*
mv /tmp/{ingress-nginx,chalice,quickwit} openreplay/charts/
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
env:
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# We're not passing -ee flag, because helm will add that.
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
ENVIRONMENT: staging
# - name: Debug Job
# if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}
# ENVIRONMENT: staging
#

120
.github/workflows/assist.yaml vendored Normal file
View file

@ -0,0 +1,120 @@
# This action will push the assist changes to aws
on:
workflow_dispatch:
push:
branches:
- dev
paths:
- "ee/utilities/**"
- "utilities/*/**"
- "!utilities/.gitignore"
- "!utilities/*-dev.sh"
name: Build and Deploy Assist EE
jobs:
deploy:
name: Deploy
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
# We need to diff with old commit
# to see which workers got changed.
fetch-depth: 2
- name: Docker login
run: |
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
- uses: azure/k8s-set-context@v1
with:
method: kubeconfig
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
id: setcontext
- name: Building and Pushing Assist image
id: build-image
env:
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
ENVIRONMENT: staging
run: |
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
cd utilities
PUSH_IMAGE=0 bash -x ./build.sh ee
[[ "x$skip_security_checks" == "xtrue" ]] || {
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
images=("assist")
for image in ${images[*]};do
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
done
err_code=$?
[[ $err_code -ne 0 ]] && {
exit $err_code
}
} && {
echo "Skipping Security Checks"
}
images=("assist")
for image in ${images[*]};do
docker push $DOCKER_REPO/$image:$IMAGE_TAG
done
- name: Creating old image input
run: |
#
# Create yaml with existing image tags
#
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
echo > /tmp/image_override.yaml
for line in `cat /tmp/image_tag.txt`;
do
image_array=($(echo "$line" | tr ':' '\n'))
cat <<EOF >> /tmp/image_override.yaml
${image_array[0]}:
image:
# We've to strip off the -ee, as helm will append it.
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
EOF
done
- name: Deploy to kubernetes
run: |
cd scripts/helmcharts/
## Update secerts
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.OSS_LICENSE_KEY }}\"/g" vars.yaml
# Update changed image tag
sed -i "/assist/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
cat /tmp/image_override.yaml
# Deploy command
mv openreplay/charts/{ingress-nginx,chalice,quickwit} /tmp
rm -rf openreplay/charts/*
mv /tmp/{ingress-nginx,chalice,quickwit} openreplay/charts/
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
env:
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
# We're not passing -ee flag, because helm will add that.
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
ENVIRONMENT: staging
# - name: Debug Job
# if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}
# ENVIRONMENT: staging
#

View file

@ -1,69 +0,0 @@
# This action will push the assist changes to aws
on:
workflow_dispatch:
push:
branches:
- dev
paths:
- "ee/utilities/**"
- "utilities/*/**"
- "!utilities/.gitignore"
- "!utilities/*-dev.sh"
name: Build and Deploy Assist EE
jobs:
deploy:
name: Deploy
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
# We need to diff with old commit
# to see which workers got changed.
fetch-depth: 2
- name: Docker login
run: |
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
- uses: azure/k8s-set-context@v1
with:
method: kubeconfig
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
id: setcontext
- name: Building and Pushing api image
id: build-image
env:
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
ENVIRONMENT: staging
run: |
cd utilities
PUSH_IMAGE=1 bash build.sh ee
- name: Deploy to kubernetes
run: |
cd scripts/helmcharts/
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.EE_REGISTRY_URL }}\"#g" vars.yaml
sed -i "s#minio_access_key.*#minio_access_key: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\" #g" vars.yaml
sed -i "s#minio_secret_key.*#minio_secret_key: \"${{ secrets.EE_MINIO_SECRET_KEY }}\" #g" vars.yaml
sed -i "s#domain_name.*#domain_name: \"ee.openreplay.com\" #g" vars.yaml
sed -i "s#kubeconfig.*#kubeconfig_path: ${EE_KUBECONFIG}#g" vars.yaml
sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml
bash kube-install.sh --app utilities
env:
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
ENVIRONMENT: staging
# - name: Debug Job
# if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}
# ENVIRONMENT: staging
#

View file

@ -1,68 +0,0 @@
# This action will push the assist changes to aws
on:
workflow_dispatch:
push:
branches:
- api-v1.10.0
paths:
- "utilities/**"
- "!utilities/.gitignore"
- "!utilities/*-dev.sh"
name: Build and Deploy Assist
jobs:
deploy:
name: Deploy
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
# We need to diff with old commit
# to see which workers got changed.
fetch-depth: 2
- name: Docker login
run: |
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
- uses: azure/k8s-set-context@v1
with:
method: kubeconfig
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
id: setcontext
- name: Building and Pushing api image
id: build-image
env:
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
ENVIRONMENT: staging
run: |
cd utilities
PUSH_IMAGE=1 bash build.sh
- name: Deploy to kubernetes
run: |
cd scripts/helmcharts/
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
sed -i "s#minio_access_key.*#minio_access_key: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\" #g" vars.yaml
sed -i "s#minio_secret_key.*#minio_secret_key: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\" #g" vars.yaml
sed -i "s#domain_name.*#domain_name: \"foss.openreplay.com\" #g" vars.yaml
sed -i "s#kubeconfig.*#kubeconfig_path: ${KUBECONFIG}#g" vars.yaml
sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml
bash kube-install.sh --app utilities
env:
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
ENVIRONMENT: staging
# - name: Debug Job
# if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}
# ENVIRONMENT: staging
#

View file

@ -53,3 +53,10 @@ async def stop_server():
await shutdown()
import os, signal
os.kill(1, signal.SIGTERM)
if config("LOCAL_DEV", default=False, cast=bool):
@app.get('/private/trigger', tags=["private"])
async def trigger_main_cron():
logging.info("Triggering main cron")
alerts_processor.process()

View file

@ -49,10 +49,12 @@ LeftToDb = {
schemas.AlertColumn.errors__4xx_5xx__count: {
"table": "events.resources INNER JOIN public.sessions USING(session_id)", "formula": "COUNT(session_id)",
"condition": "status/100!=2"},
schemas.AlertColumn.errors__4xx__count: {"table": "events.resources INNER JOIN public.sessions USING(session_id)",
"formula": "COUNT(session_id)", "condition": "status/100=4"},
schemas.AlertColumn.errors__5xx__count: {"table": "events.resources INNER JOIN public.sessions USING(session_id)",
"formula": "COUNT(session_id)", "condition": "status/100=5"},
schemas.AlertColumn.errors__4xx__count: {
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
"formula": "COUNT(session_id)", "condition": "status/100=4"},
schemas.AlertColumn.errors__5xx__count: {
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
"formula": "COUNT(session_id)", "condition": "status/100=5"},
schemas.AlertColumn.errors__javascript__impacted_sessions__count: {
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
"formula": "COUNT(DISTINCT session_id)", "condition": "success= FALSE AND type='script'"},
@ -95,7 +97,7 @@ def can_check(a) -> bool:
a["options"].get("lastNotification") is None or
a["options"]["lastNotification"] <= 0 or
((now - a["options"]["lastNotification"]) > a["options"]["renotifyInterval"] * 60 * 1000)) \
and ((now - a["createdAt"]) % (TimeInterval[repetitionBase] * 60 * 1000)) < 60 * 1000
and ((now - a["createdAt"]) % (TimeInterval[repetitionBase] * 60 * 1000)) < 60 * 1000
def Build(a):
@ -119,7 +121,7 @@ def Build(a):
subQ = f"""SELECT {colDef["formula"]} AS value
FROM {colDef["table"]}
WHERE project_id = %(project_id)s
{"AND " + colDef["condition"] if colDef.get("condition") is not None else ""}"""
{"AND " + colDef["condition"] if colDef.get("condition") else ""}"""
j_s = colDef.get("joinSessions", True)
main_table = colDef["table"]
is_ss = main_table == "public.sessions"
@ -142,8 +144,7 @@ def Build(a):
"startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000,
"timestamp_sub2": TimeUTC.now() - 2 * a["options"]["currentPeriod"] * 60 * 1000}
else:
sub1 = f"""{subQ} AND timestamp>=%(startDate)s
AND timestamp<=%(now)s
sub1 = f"""{subQ} {"AND timestamp >= %(startDate)s AND timestamp <= %(now)s" if not is_ss else ""}
{"AND start_ts >= %(startDate)s AND start_ts <= %(now)s" if j_s else ""}"""
params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000
sub2 = f"""{subQ} {"AND timestamp < %(startDate)s AND timestamp >= %(timestamp_sub2)s" if not is_ss else ""}
@ -206,7 +207,7 @@ def process():
cur = cur.recreate(rollback=True)
if len(notifications) > 0:
cur.execute(
cur.mogrify(f"""UPDATE public.Alerts
cur.mogrify(f"""UPDATE public.alerts
SET options = options||'{{"lastNotification":{TimeUTC.now()}}}'::jsonb
WHERE alert_id IN %(ids)s;""", {"ids": tuple([n["alertId"] for n in notifications])}))
if len(notifications) > 0:

3
api/run-alerts-dev.sh Executable file
View file

@ -0,0 +1,3 @@
#!/bin/zsh
uvicorn app_alerts:app --reload

View file

@ -6,41 +6,41 @@ logging.basicConfig(level=config("LOGLEVEL", default=logging.INFO))
from . import sessions as sessions_legacy
if config("EXP_SESSIONS_SEARCH", cast=bool, default=False):
print(">>> Using experimental sessions search")
logging.info(">>> Using experimental sessions search")
from . import sessions_exp as sessions
else:
from . import sessions as sessions
if config("EXP_AUTOCOMPLETE", cast=bool, default=False):
print(">>> Using experimental autocomplete")
logging.info(">>> Using experimental autocomplete")
from . import autocomplete_exp as autocomplete
else:
from . import autocomplete as autocomplete
if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
print(">>> Using experimental error search")
logging.info(">>> Using experimental error search")
from . import errors as errors_legacy
from . import errors_exp as errors
if config("EXP_ERRORS_GET", cast=bool, default=False):
print(">>> Using experimental error get")
logging.info(">>> Using experimental error get")
else:
from . import errors as errors
if config("EXP_METRICS", cast=bool, default=False):
print(">>> Using experimental metrics")
logging.info(">>> Using experimental metrics")
from . import metrics_exp as metrics
else:
from . import metrics as metrics
if config("EXP_ALERTS", cast=bool, default=False):
print(">>> Using experimental alerts")
logging.info(">>> Using experimental alerts")
from . import alerts_processor_exp as alerts_processor
else:
from . import alerts_processor as alerts_processor
if config("EXP_FUNNELS", cast=bool, default=False):
print(">>> Using experimental funnels")
logging.info(">>> Using experimental funnels")
if not config("EXP_SESSIONS_SEARCH", cast=bool, default=False):
from . import sessions as sessions_legacy
@ -49,4 +49,4 @@ else:
from . import significance as significance
if config("EXP_RESOURCES", cast=bool, default=False):
print(">>> Using experimental resources for session-replay")
logging.info(">>> Using experimental resources for session-replay")

View file

@ -54,10 +54,12 @@ LeftToDb = {
schemas.AlertColumn.errors__4xx_5xx__count: {
"table": "events.resources INNER JOIN public.sessions USING(session_id)", "formula": "COUNT(session_id)",
"condition": "status/100!=2"},
schemas.AlertColumn.errors__4xx__count: {"table": "events.resources INNER JOIN public.sessions USING(session_id)",
"formula": "COUNT(session_id)", "condition": "status/100=4"},
schemas.AlertColumn.errors__5xx__count: {"table": "events.resources INNER JOIN public.sessions USING(session_id)",
"formula": "COUNT(session_id)", "condition": "status/100=5"},
schemas.AlertColumn.errors__4xx__count: {
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
"formula": "COUNT(session_id)", "condition": "status/100=4"},
schemas.AlertColumn.errors__5xx__count: {
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
"formula": "COUNT(session_id)", "condition": "status/100=5"},
schemas.AlertColumn.errors__javascript__impacted_sessions__count: {
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
"formula": "COUNT(DISTINCT session_id)", "condition": "success= FALSE AND type='script'"},
@ -100,7 +102,7 @@ def can_check(a) -> bool:
a["options"].get("lastNotification") is None or
a["options"]["lastNotification"] <= 0 or
((now - a["options"]["lastNotification"]) > a["options"]["renotifyInterval"] * 60 * 1000)) \
and ((now - a["createdAt"]) % (TimeInterval[repetitionBase] * 60 * 1000)) < 60 * 1000
and ((now - a["createdAt"]) % (TimeInterval[repetitionBase] * 60 * 1000)) < 60 * 1000
def Build(a):
@ -124,7 +126,7 @@ def Build(a):
subQ = f"""SELECT {colDef["formula"]} AS value
FROM {colDef["table"]}
WHERE project_id = %(project_id)s
{"AND " + colDef["condition"] if colDef.get("condition") is not None else ""}"""
{"AND " + colDef["condition"] if colDef.get("condition") else ""}"""
j_s = colDef.get("joinSessions", True)
main_table = colDef["table"]
is_ss = main_table == "public.sessions"
@ -211,7 +213,7 @@ def process():
cur = cur.recreate(rollback=True)
if len(notifications) > 0:
cur.execute(
cur.mogrify(f"""UPDATE public.Alerts
cur.mogrify(f"""UPDATE public.alerts
SET options = options||'{{"lastNotification":{TimeUTC.now()}}}'::jsonb
WHERE alert_id IN %(ids)s;""", {"ids": tuple([n["alertId"] for n in notifications])}))
if len(notifications) > 0:

View file

@ -135,7 +135,7 @@ def Build(a):
FROM {colDef["table"](now)}
WHERE project_id = %(project_id)s
{"AND event_type=%(event_type)s" if params["event_type"] else ""}
{"AND " + colDef["condition"] if colDef.get("condition") is not None else ""}"""
{"AND " + colDef["condition"] if colDef.get("condition") else ""}"""
q = f"""SELECT coalesce(value,0) AS value, coalesce(value,0) {a["query"]["operator"]} {a["query"]["right"]} AS valid"""
@ -198,9 +198,14 @@ def process():
if alert["query"]["left"] != "CUSTOM":
continue
if alerts_processor.can_check(alert):
logging.info(f"Querying alertId:{alert['alertId']} name: {alert['name']}")
query, params = Build(alert)
query = ch_cur.format(query, params)
try:
query = ch_cur.format(query, params)
except Exception as e:
logging.error(
f"!!!Error while building alert query for alertId:{alert['alertId']} name: {alert['name']}")
logging.error(e)
continue
logging.debug(alert)
logging.debug(query)
try:

View file

@ -15,5 +15,4 @@ servers/sourcemaps-server.js
/utils/helper.js
/utils/assistHelper.js
.local
run-dev.sh
*.mmdb

6
ee/utilities/run-dev.sh Executable file
View file

@ -0,0 +1,6 @@
#!/bin/bash
set -a
source .env
set +a
npm start

View file

@ -24,7 +24,7 @@ const {
const {createAdapter} = require("@socket.io/redis-adapter");
const {createClient} = require("redis");
const wsRouter = express.Router();
const REDIS_URL = process.env.REDIS_URL || "redis://localhost:6379";
const REDIS_URL = (process.env.REDIS_URL || "localhost:6379").replace(/((^\w+:|^)\/\/|^)/, 'redis://');
const pubClient = createClient({url: REDIS_URL});
const subClient = pubClient.duplicate();
console.log(`Using Redis: ${REDIS_URL}`);
@ -309,7 +309,8 @@ module.exports = {
debug && console.log(`notifying new agent about no SESSIONS`);
io.to(socket.id).emit(EVENTS_DEFINITION.emit.NO_SESSIONS);
}
await io.of('/').adapter.remoteJoin(socket.id, socket.peerId);
// await io.of('/').adapter.join(socket.id, socket.peerId);
await socket.join(socket.peerId);
let rooms = await io.of('/').adapter.allRooms();
if (rooms.has(socket.peerId)) {
let connectedSockets = await io.in(socket.peerId).fetchSockets();

View file

@ -287,7 +287,7 @@ module.exports = {
debug && console.log(`notifying new agent about no SESSIONS`);
io.to(socket.id).emit(EVENTS_DEFINITION.emit.NO_SESSIONS);
}
socket.join(socket.peerId);
await socket.join(socket.peerId);
if (io.sockets.adapter.rooms.get(socket.peerId)) {
debug && console.log(`${socket.id} joined room:${socket.peerId}, as:${socket.identity}, members:${io.sockets.adapter.rooms.get(socket.peerId).size}`);
}

View file

@ -268,7 +268,7 @@ module.exports = {
debug && console.log(`notifying new agent about no SESSIONS`);
io.to(socket.id).emit(EVENTS_DEFINITION.emit.NO_SESSIONS);
}
socket.join(socket.peerId);
await socket.join(socket.peerId);
if (io.sockets.adapter.rooms.get(socket.peerId)) {
debug && console.log(`${socket.id} joined room:${socket.peerId}, as:${socket.identity}, members:${io.sockets.adapter.rooms.get(socket.peerId).size}`);
}