Merge pull request #706 from openreplay/dev

draft: release 1.8.0
This commit is contained in:
Mehdi Osman 2022-09-02 12:34:45 +02:00 committed by GitHub
commit a0fc1672c8
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
845 changed files with 35234 additions and 16610 deletions

View file

@ -84,9 +84,9 @@ jobs:
cat /tmp/image_override.yaml
# Deploy command
mv openreplay/charts/{ingress-nginx,chalice} /tmp
mv openreplay/charts/{ingress-nginx,chalice,quickwit} /tmp
rm -rf openreplay/charts/*
mv /tmp/{ingress-nginx,chalice} openreplay/charts/
mv /tmp/{ingress-nginx,chalice,quickwit} openreplay/charts/
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
env:
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}

View file

@ -78,9 +78,9 @@ jobs:
cat /tmp/image_override.yaml
# Deploy command
mv openreplay/charts/{ingress-nginx,frontend} /tmp
mv openreplay/charts/{ingress-nginx,frontend,quickwit} /tmp
rm -rf openreplay/charts/*
mv /tmp/{ingress-nginx,frontend} openreplay/charts/
mv /tmp/{ingress-nginx,frontend,quickwit} openreplay/charts/
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
env:
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
@ -123,9 +123,9 @@ jobs:
cat /tmp/image_override.yaml
# Deploy command
mv openreplay/charts/{ingress-nginx,frontend} /tmp
mv openreplay/charts/{ingress-nginx,frontend,quickwit} /tmp
rm -rf openreplay/charts/*
mv /tmp/{ingress-nginx,frontend} openreplay/charts/
mv /tmp/{ingress-nginx,frontend,quickwit} openreplay/charts/
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
env:
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}

View file

@ -117,6 +117,7 @@ jobs:
echo > /tmp/image_override.yaml
mkdir /tmp/helmcharts
mv openreplay/charts/ingress-nginx /tmp/helmcharts/
mv openreplay/charts/quickwit /tmp/helmcharts/
## Update images
for image in $(cat /tmp/images_to_build.txt);
do

View file

@ -114,6 +114,7 @@ jobs:
echo > /tmp/image_override.yaml
mkdir /tmp/helmcharts
mv openreplay/charts/ingress-nginx /tmp/helmcharts/
mv openreplay/charts/quickwit /tmp/helmcharts/
## Update images
for image in $(cat /tmp/images_to_build.txt);
do

4
api/.gitignore vendored
View file

@ -174,4 +174,6 @@ logs*.txt
SUBNETS.json
./chalicelib/.configs
README/*
README/*
.local
build_crons.sh

View file

@ -1,7 +1,6 @@
FROM python:3.10-alpine
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
RUN apk upgrade busybox --no-cache --repository=http://dl-cdn.alpinelinux.org/alpine/edge/main
RUN apk add --no-cache build-base nodejs npm tini
ARG envarg
# Add Tini

View file

@ -1,7 +1,6 @@
FROM python:3.10-alpine
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
RUN apk upgrade busybox --no-cache --repository=http://dl-cdn.alpinelinux.org/alpine/edge/main
RUN apk add --no-cache build-base tini
ARG envarg
ENV APP_NAME=alerts \

View file

@ -4,6 +4,7 @@ from apscheduler.schedulers.asyncio import AsyncIOScheduler
from decouple import config
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
from fastapi.middleware.gzip import GZipMiddleware
from starlette.responses import StreamingResponse
from chalicelib.utils import helper
@ -14,7 +15,7 @@ from routers.crons import core_dynamic_crons
from routers.subs import dashboard, insights, metrics, v1_api
app = FastAPI(root_path="/api", docs_url=config("docs_url", default=""), redoc_url=config("redoc_url", default=""))
app.add_middleware(GZipMiddleware, minimum_size=1000)
@app.middleware('http')
async def or_middleware(request: Request, call_next):

View file

@ -15,10 +15,12 @@ class ProjectAuthorizer:
if len(request.path_params.keys()) == 0 or request.path_params.get(self.project_identifier) is None:
return
current_user: schemas.CurrentContext = await OR_context(request)
project_identifier = request.path_params[self.project_identifier]
value = request.path_params[self.project_identifier]
if (self.project_identifier == "projectId" \
and projects.get_project(project_id=project_identifier, tenant_id=current_user.tenant_id) is None) \
or (self.project_identifier.lower() == "projectKey" \
and projects.get_internal_project_id(project_key=project_identifier) is None):
and not (isinstance(value, int) or isinstance(value, str) and value.isnumeric())
and projects.get_project(project_id=value, tenant_id=current_user.tenant_id) is None) \
or (self.project_identifier == "projectKey" \
and projects.get_internal_project_id(project_key=value) is None):
print("project not found")
print(value)
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="project not found.")

View file

@ -18,6 +18,8 @@ check_prereq() {
}
function build_api(){
cp -R ../api ../_api
cd ../_api
cp -R ../utilities/utils ../sourcemap-reader/.
cp -R ../sourcemap-reader .
tag=""
@ -28,6 +30,8 @@ function build_api(){
tag="ee-"
}
docker build -f ./Dockerfile --build-arg envarg=$envarg -t ${DOCKER_REPO:-'local'}/chalice:${git_sha1} .
cd ../api
rm -rf ../_api
[[ $PUSH_IMAGE -eq 1 ]] && {
docker push ${DOCKER_REPO:-'local'}/chalice:${git_sha1}
docker tag ${DOCKER_REPO:-'local'}/chalice:${git_sha1} ${DOCKER_REPO:-'local'}/chalice:${tag}latest
@ -39,9 +43,9 @@ function build_api(){
check_prereq
build_api $1
echo buil_complete
source build_alerts.sh $1
IMAGE_TAG=$IMAGE_TAG PUSH_IMAGE=$PUSH_IMAGE DOCKER_REPO=$DOCKER_REPO bash build_alerts.sh $1
[[ $1 == "ee" ]] && {
cp ../ee/api/build_crons.sh .
IMAGE_TAG=$IMAGE_TAG PUSH_IMAGE=$PUSH_IMAGE DOCKER_REPO=$DOCKER_REPO bash build_crons.sh $1
}
echo "api done"
}

View file

@ -17,6 +17,8 @@ check_prereq() {
}
function build_api(){
cp -R ../api ../_alerts
cd ../_alerts
tag=""
# Copy enterprise code
[[ $1 == "ee" ]] && {
@ -24,15 +26,15 @@ function build_api(){
envarg="default-ee"
tag="ee-"
}
cp -R ../api ../_alerts
docker build -f ../_alerts/Dockerfile.alerts --build-arg envarg=$envarg -t ${DOCKER_REPO:-'local'}/alerts:${git_sha1} .
docker build -f ./Dockerfile.alerts --build-arg envarg=$envarg -t ${DOCKER_REPO:-'local'}/alerts:${git_sha1} .
cd ../api
rm -rf ../_alerts
[[ $PUSH_IMAGE -eq 1 ]] && {
docker push ${DOCKER_REPO:-'local'}/alerts:${git_sha1}
docker tag ${DOCKER_REPO:-'local'}/alerts:${git_sha1} ${DOCKER_REPO:-'local'}/alerts:${tag}latest
docker push ${DOCKER_REPO:-'local'}/alerts:${tag}latest
}
echo "completed alerts build"
echo "completed alerts build"
}
check_prereq

View file

@ -52,8 +52,8 @@ def create(project_id, data: schemas.AlertSchema):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify("""\
INSERT INTO public.alerts(project_id, name, description, detection_method, query, options, series_id)
VALUES (%(project_id)s, %(name)s, %(description)s, %(detection_method)s, %(query)s, %(options)s::jsonb, %(series_id)s)
INSERT INTO public.alerts(project_id, name, description, detection_method, query, options, series_id, change)
VALUES (%(project_id)s, %(name)s, %(description)s, %(detection_method)s, %(query)s, %(options)s::jsonb, %(series_id)s, %(change)s)
RETURNING *;""",
{"project_id": project_id, **data})
)
@ -75,7 +75,8 @@ def update(id, data: schemas.AlertSchema):
detection_method = %(detection_method)s,
query = %(query)s,
options = %(options)s,
series_id = %(series_id)s
series_id = %(series_id)s,
change = %(change)s
WHERE alert_id =%(id)s AND deleted_at ISNULL
RETURNING *;""",
{"id": id, **data})

View file

@ -12,7 +12,8 @@ def get_all_alerts():
(EXTRACT(EPOCH FROM alerts.created_at) * 1000)::BIGINT AS created_at,
alerts.name,
alerts.series_id,
filter
filter,
change
FROM public.alerts
LEFT JOIN metric_series USING (series_id)
INNER JOIN projects USING (project_id)

View file

@ -1,12 +1,16 @@
import decimal
import logging
from decouple import config
import schemas
from chalicelib.core import alerts_listener
from chalicelib.core import sessions, alerts
from chalicelib.utils import pg_client
from chalicelib.utils.TimeUTC import TimeUTC
logging.basicConfig(level=config("LOGLEVEL", default=logging.INFO))
LeftToDb = {
schemas.AlertColumn.performance__dom_content_loaded__average: {
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
@ -41,7 +45,7 @@ LeftToDb = {
"formula": "AVG(NULLIF(resources.duration,0))"},
schemas.AlertColumn.resources__missing__count: {
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
"formula": "COUNT(DISTINCT url_hostpath)", "condition": "success= FALSE"},
"formula": "COUNT(DISTINCT url_hostpath)", "condition": "success= FALSE AND type='img'"},
schemas.AlertColumn.errors__4xx_5xx__count: {
"table": "events.resources INNER JOIN public.sessions USING(session_id)", "formula": "COUNT(session_id)",
"condition": "status/100!=2"},
@ -53,8 +57,9 @@ LeftToDb = {
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
"formula": "COUNT(DISTINCT session_id)", "condition": "success= FALSE AND type='script'"},
schemas.AlertColumn.performance__crashes__count: {
"table": "(SELECT *, start_ts AS timestamp FROM public.sessions WHERE errors_count > 0) AS sessions",
"formula": "COUNT(DISTINCT session_id)", "condition": "errors_count > 0"},
"table": "public.sessions",
"formula": "COUNT(DISTINCT session_id)",
"condition": "errors_count > 0 AND duration>0"},
schemas.AlertColumn.errors__javascript__count: {
"table": "events.errors INNER JOIN public.errors AS m_errors USING (error_id)",
"formula": "COUNT(DISTINCT session_id)", "condition": "source='js_exception'", "joinSessions": False},
@ -94,7 +99,8 @@ def can_check(a) -> bool:
def Build(a):
params = {"project_id": a["projectId"]}
now = TimeUTC.now()
params = {"project_id": a["projectId"], "now": now}
full_args = {}
j_s = True
if a["seriesId"] is not None:
@ -121,11 +127,12 @@ def Build(a):
if a["seriesId"] is not None:
q += f""" FROM ({subQ}) AS stat"""
else:
q += f""" FROM ({subQ} AND timestamp>=%(startDate)s
{"AND sessions.start_ts >= %(startDate)s" if j_s else ""}) AS stat"""
q += f""" FROM ({subQ} AND timestamp>=%(startDate)s AND timestamp<=%(now)s
{"AND sessions.start_ts >= %(startDate)s" if j_s else ""}
{"AND sessions.start_ts <= %(now)s" if j_s else ""}) AS stat"""
params = {**params, **full_args, "startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000}
else:
if a["options"]["change"] == schemas.AlertDetectionChangeType.change:
if a["change"] == schemas.AlertDetectionType.change:
if a["seriesId"] is not None:
sub2 = subQ.replace("%(startDate)s", "%(timestamp_sub2)s").replace("%(endDate)s", "%(startDate)s")
sub1 = f"SELECT (({subQ})-({sub2})) AS value"
@ -135,7 +142,9 @@ def Build(a):
"timestamp_sub2": TimeUTC.now() - 2 * a["options"]["currentPeriod"] * 60 * 1000}
else:
sub1 = f"""{subQ} AND timestamp>=%(startDate)s
{"AND sessions.start_ts >= %(startDate)s" if j_s else ""}"""
AND datetime<=toDateTime(%(now)s/1000)
{"AND sessions.start_ts >= %(startDate)s" if j_s else ""}
{"AND sessions.start_ts <= %(now)s" if j_s else ""}"""
params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000
sub2 = f"""{subQ} AND timestamp<%(startDate)s
AND timestamp>=%(timestamp_sub2)s
@ -155,8 +164,9 @@ def Build(a):
- (a["options"]["currentPeriod"] + a["options"]["currentPeriod"]) \
* 60 * 1000}
else:
sub1 = f"""{subQ} AND timestamp>=%(startDate)s
{"AND sessions.start_ts >= %(startDate)s" if j_s else ""}"""
sub1 = f"""{subQ} AND timestamp>=%(startDate)s AND timestamp<=%(now)s
{"AND sessions.start_ts >= %(startDate)s" if j_s else ""}
{"AND sessions.start_ts <= %(now)s" if j_s else ""}"""
params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000
sub2 = f"""{subQ} AND timestamp<%(startDate)s
AND timestamp>=%(timestamp_sub2)s
@ -185,30 +195,7 @@ def process():
result = cur.fetchone()
if result["valid"]:
logging.info("Valid alert, notifying users")
notifications.append({
"alertId": alert["alertId"],
"tenantId": alert["tenantId"],
"title": alert["name"],
"description": f"has been triggered, {alert['query']['left']} = {round(result['value'], 2)} ({alert['query']['operator']} {alert['query']['right']}).",
"buttonText": "Check metrics for more details",
"buttonUrl": f"/{alert['projectId']}/metrics",
"imageUrl": None,
"options": {"source": "ALERT", "sourceId": alert["alertId"],
"sourceMeta": alert["detectionMethod"],
"message": alert["options"]["message"], "projectId": alert["projectId"],
"data": {"title": alert["name"],
"limitValue": alert["query"]["right"],
"actualValue": float(result["value"]) \
if isinstance(result["value"], decimal.Decimal) \
else result["value"],
"operator": alert["query"]["operator"],
"trigger": alert["query"]["left"],
"alertId": alert["alertId"],
"detectionMethod": alert["detectionMethod"],
"currentPeriod": alert["options"]["currentPeriod"],
"previousPeriod": alert["options"]["previousPeriod"],
"createdAt": TimeUTC.now()}},
})
notifications.append(generate_notification(alert, result))
except Exception as e:
logging.error(f"!!!Error while running alert query for alertId:{alert['alertId']}")
logging.error(str(e))
@ -220,3 +207,30 @@ def process():
WHERE alert_id IN %(ids)s;""", {"ids": tuple([n["alertId"] for n in notifications])}))
if len(notifications) > 0:
alerts.process_notifications(notifications)
def generate_notification(alert, result):
return {
"alertId": alert["alertId"],
"tenantId": alert["tenantId"],
"title": alert["name"],
"description": f"has been triggered, {alert['query']['left']} = {round(result['value'], 2)} ({alert['query']['operator']} {alert['query']['right']}).",
"buttonText": "Check metrics for more details",
"buttonUrl": f"/{alert['projectId']}/metrics",
"imageUrl": None,
"options": {"source": "ALERT", "sourceId": alert["alertId"],
"sourceMeta": alert["detectionMethod"],
"message": alert["options"]["message"], "projectId": alert["projectId"],
"data": {"title": alert["name"],
"limitValue": alert["query"]["right"],
"actualValue": float(result["value"]) \
if isinstance(result["value"], decimal.Decimal) \
else result["value"],
"operator": alert["query"]["operator"],
"trigger": alert["query"]["left"],
"alertId": alert["alertId"],
"detectionMethod": alert["detectionMethod"],
"currentPeriod": alert["options"]["currentPeriod"],
"previousPeriod": alert["options"]["previousPeriod"],
"createdAt": TimeUTC.now()}},
}

View file

@ -1,8 +1,10 @@
import requests
from decouple import config
from os.path import exists
import schemas
from chalicelib.core import projects
from starlette.exceptions import HTTPException
from os import access, R_OK
SESSION_PROJECTION_COLS = """s.project_id,
s.session_id::text AS session_id,
@ -161,3 +163,23 @@ def autocomplete(project_id, q: str, key: str = None):
def get_ice_servers():
return config("iceServers") if config("iceServers", default=None) is not None \
and len(config("iceServers")) > 0 else None
def get_raw_mob_by_id(project_id, session_id):
efs_path = config("FS_DIR")
if not exists(efs_path):
raise HTTPException(400, f"EFS not found in path: {efs_path}")
if not access(efs_path, R_OK):
raise HTTPException(400, f"EFS found under: {efs_path}; but it is not readable, please check permissions")
path_to_file = efs_path + "/" + str(session_id)
if exists(path_to_file):
if not access(path_to_file, R_OK):
raise HTTPException(400, f"Replay file found under: {efs_path};"
f" but it is not readable, please check permissions")
return path_to_file
return None

View file

@ -0,0 +1,132 @@
import schemas
from chalicelib.core import countries
from chalicelib.utils import helper
from chalicelib.utils import pg_client
from chalicelib.utils.event_filter_definition import Event
TABLE = "public.autocomplete"
def __get_autocomplete_table(value, project_id):
autocomplete_events = [schemas.FilterType.rev_id,
schemas.EventType.click,
schemas.FilterType.user_device,
schemas.FilterType.user_id,
schemas.FilterType.user_browser,
schemas.FilterType.user_os,
schemas.EventType.custom,
schemas.FilterType.user_country,
schemas.EventType.location,
schemas.EventType.input]
autocomplete_events.sort()
sub_queries = []
c_list = []
for e in autocomplete_events:
if e == schemas.FilterType.user_country:
c_list = countries.get_country_code_autocomplete(value)
if len(c_list) > 0:
sub_queries.append(f"""(SELECT DISTINCT ON(value) type, value
FROM {TABLE}
WHERE project_id = %(project_id)s
AND type= '{e}'
AND value IN %(c_list)s)""")
continue
sub_queries.append(f"""(SELECT type, value
FROM {TABLE}
WHERE project_id = %(project_id)s
AND type= '{e}'
AND value ILIKE %(svalue)s
LIMIT 5)""")
if len(value) > 2:
sub_queries.append(f"""(SELECT type, value
FROM {TABLE}
WHERE project_id = %(project_id)s
AND type= '{e}'
AND value ILIKE %(value)s
LIMIT 5)""")
with pg_client.PostgresClient() as cur:
query = cur.mogrify(" UNION DISTINCT ".join(sub_queries) + ";",
{"project_id": project_id,
"value": helper.string_to_sql_like(value),
"svalue": helper.string_to_sql_like("^" + value),
"c_list": tuple(c_list)
})
try:
print(query)
cur.execute(query)
except Exception as err:
print("--------- AUTOCOMPLETE SEARCH QUERY EXCEPTION -----------")
print(query.decode('UTF-8'))
print("--------- VALUE -----------")
print(value)
print("--------------------")
raise err
results = helper.list_to_camel_case(cur.fetchall())
return results
def __generic_query(typename, value_length=None):
if typename == schemas.FilterType.user_country:
return f"""SELECT DISTINCT value, type
FROM {TABLE}
WHERE
project_id = %(project_id)s
AND type='{typename}'
AND value IN %(value)s
ORDER BY value"""
if value_length is None or value_length > 2:
return f"""(SELECT DISTINCT value, type
FROM {TABLE}
WHERE
project_id = %(project_id)s
AND type='{typename}'
AND value ILIKE %(svalue)s
ORDER BY value
LIMIT 5)
UNION DISTINCT
(SELECT DISTINCT value, type
FROM {TABLE}
WHERE
project_id = %(project_id)s
AND type='{typename}'
AND value ILIKE %(value)s
ORDER BY value
LIMIT 5);"""
return f"""SELECT DISTINCT value, type
FROM {TABLE}
WHERE
project_id = %(project_id)s
AND type='{typename}'
AND value ILIKE %(svalue)s
ORDER BY value
LIMIT 10;"""
def __generic_autocomplete(event: Event):
def f(project_id, value, key=None, source=None):
with pg_client.PostgresClient() as cur:
query = __generic_query(event.ui_type, value_length=len(value))
params = {"project_id": project_id, "value": helper.string_to_sql_like(value),
"svalue": helper.string_to_sql_like("^" + value)}
cur.execute(cur.mogrify(query, params))
return helper.list_to_camel_case(cur.fetchall())
return f
def __generic_autocomplete_metas(typename):
def f(project_id, text):
with pg_client.PostgresClient() as cur:
params = {"project_id": project_id, "value": helper.string_to_sql_like(text),
"svalue": helper.string_to_sql_like("^" + text)}
if typename == schemas.FilterType.user_country:
params["value"] = tuple(countries.get_country_code_autocomplete(text))
query = cur.mogrify(__generic_query(typename, value_length=len(text)), params)
cur.execute(query)
rows = cur.fetchall()
return rows
return f

View file

@ -0,0 +1,295 @@
COUNTRIES = {
"AC": "Ascension Island",
"AD": "Andorra",
"AE": "United Arab Emirates",
"AF": "Afghanistan",
"AG": "Antigua And Barbuda",
"AI": "Anguilla",
"AL": "Albania",
"AM": "Armenia",
"AN": "Netherlands Antilles",
"AO": "Angola",
"AQ": "Antarctica",
"AR": "Argentina",
"AS": "American Samoa",
"AT": "Austria",
"AU": "Australia",
"AW": "Aruba",
"AX": "Åland Islands",
"AZ": "Azerbaijan",
"BA": "Bosnia & Herzegovina",
"BB": "Barbados",
"BD": "Bangladesh",
"BE": "Belgium",
"BF": "Burkina Faso",
"BG": "Bulgaria",
"BH": "Bahrain",
"BI": "Burundi",
"BJ": "Benin",
"BL": "Saint Barthélemy",
"BM": "Bermuda",
"BN": "Brunei Darussalam",
"BO": "Bolivia",
"BQ": "Bonaire, Saint Eustatius And Saba",
"BR": "Brazil",
"BS": "Bahamas",
"BT": "Bhutan",
"BU": "Burma",
"BV": "Bouvet Island",
"BW": "Botswana",
"BY": "Belarus",
"BZ": "Belize",
"CA": "Canada",
"CC": "Cocos Islands",
"CD": "Congo",
"CF": "Central African Republic",
"CG": "Congo",
"CH": "Switzerland",
"CI": "Côte d'Ivoire",
"CK": "Cook Islands",
"CL": "Chile",
"CM": "Cameroon",
"CN": "China",
"CO": "Colombia",
"CP": "Clipperton Island",
"CR": "Costa Rica",
"CS": "Serbia and Montenegro",
"CT": "Canton and Enderbury Islands",
"CU": "Cuba",
"CV": "Cabo Verde",
"CW": "Curacao",
"CX": "Christmas Island",
"CY": "Cyprus",
"CZ": "Czech Republic",
"DD": "Germany",
"DE": "Germany",
"DG": "Diego Garcia",
"DJ": "Djibouti",
"DK": "Denmark",
"DM": "Dominica",
"DO": "Dominican Republic",
"DY": "Dahomey",
"DZ": "Algeria",
"EA": "Ceuta, Mulilla",
"EC": "Ecuador",
"EE": "Estonia",
"EG": "Egypt",
"EH": "Western Sahara",
"ER": "Eritrea",
"ES": "Spain",
"ET": "Ethiopia",
"FI": "Finland",
"FJ": "Fiji",
"FK": "Falkland Islands",
"FM": "Micronesia",
"FO": "Faroe Islands",
"FQ": "French Southern and Antarctic Territories",
"FR": "France",
"FX": "France, Metropolitan",
"GA": "Gabon",
"GB": "United Kingdom",
"GD": "Grenada",
"GE": "Georgia",
"GF": "French Guiana",
"GG": "Guernsey",
"GH": "Ghana",
"GI": "Gibraltar",
"GL": "Greenland",
"GM": "Gambia",
"GN": "Guinea",
"GP": "Guadeloupe",
"GQ": "Equatorial Guinea",
"GR": "Greece",
"GS": "South Georgia And The South Sandwich Islands",
"GT": "Guatemala",
"GU": "Guam",
"GW": "Guinea-bissau",
"GY": "Guyana",
"HK": "Hong Kong",
"HM": "Heard Island And McDonald Islands",
"HN": "Honduras",
"HR": "Croatia",
"HT": "Haiti",
"HU": "Hungary",
"HV": "Upper Volta",
"IC": "Canary Islands",
"ID": "Indonesia",
"IE": "Ireland",
"IL": "Israel",
"IM": "Isle Of Man",
"IN": "India",
"IO": "British Indian Ocean Territory",
"IQ": "Iraq",
"IR": "Iran",
"IS": "Iceland",
"IT": "Italy",
"JE": "Jersey",
"JM": "Jamaica",
"JO": "Jordan",
"JP": "Japan",
"JT": "Johnston Island",
"KE": "Kenya",
"KG": "Kyrgyzstan",
"KH": "Cambodia",
"KI": "Kiribati",
"KM": "Comoros",
"KN": "Saint Kitts And Nevis",
"KP": "Korea",
"KR": "Korea",
"KW": "Kuwait",
"KY": "Cayman Islands",
"KZ": "Kazakhstan",
"LA": "Laos",
"LB": "Lebanon",
"LC": "Saint Lucia",
"LI": "Liechtenstein",
"LK": "Sri Lanka",
"LR": "Liberia",
"LS": "Lesotho",
"LT": "Lithuania",
"LU": "Luxembourg",
"LV": "Latvia",
"LY": "Libya",
"MA": "Morocco",
"MC": "Monaco",
"MD": "Moldova",
"ME": "Montenegro",
"MF": "Saint Martin",
"MG": "Madagascar",
"MH": "Marshall Islands",
"MI": "Midway Islands",
"MK": "Macedonia",
"ML": "Mali",
"MM": "Myanmar",
"MN": "Mongolia",
"MO": "Macao",
"MP": "Northern Mariana Islands",
"MQ": "Martinique",
"MR": "Mauritania",
"MS": "Montserrat",
"MT": "Malta",
"MU": "Mauritius",
"MV": "Maldives",
"MW": "Malawi",
"MX": "Mexico",
"MY": "Malaysia",
"MZ": "Mozambique",
"NA": "Namibia",
"NC": "New Caledonia",
"NE": "Niger",
"NF": "Norfolk Island",
"NG": "Nigeria",
"NH": "New Hebrides",
"NI": "Nicaragua",
"NL": "Netherlands",
"NO": "Norway",
"NP": "Nepal",
"NQ": "Dronning Maud Land",
"NR": "Nauru",
"NT": "Neutral Zone",
"NU": "Niue",
"NZ": "New Zealand",
"OM": "Oman",
"PA": "Panama",
"PC": "Pacific Islands",
"PE": "Peru",
"PF": "French Polynesia",
"PG": "Papua New Guinea",
"PH": "Philippines",
"PK": "Pakistan",
"PL": "Poland",
"PM": "Saint Pierre And Miquelon",
"PN": "Pitcairn",
"PR": "Puerto Rico",
"PS": "Palestine",
"PT": "Portugal",
"PU": "U.S. Miscellaneous Pacific Islands",
"PW": "Palau",
"PY": "Paraguay",
"PZ": "Panama Canal Zone",
"QA": "Qatar",
"RE": "Reunion",
"RH": "Southern Rhodesia",
"RO": "Romania",
"RS": "Serbia",
"RU": "Russian Federation",
"RW": "Rwanda",
"SA": "Saudi Arabia",
"SB": "Solomon Islands",
"SC": "Seychelles",
"SD": "Sudan",
"SE": "Sweden",
"SG": "Singapore",
"SH": "Saint Helena, Ascension And Tristan Da Cunha",
"SI": "Slovenia",
"SJ": "Svalbard And Jan Mayen",
"SK": "Slovakia",
"SL": "Sierra Leone",
"SM": "San Marino",
"SN": "Senegal",
"SO": "Somalia",
"SR": "Suriname",
"SS": "South Sudan",
"ST": "Sao Tome and Principe",
"SU": "USSR",
"SV": "El Salvador",
"SX": "Sint Maarten",
"SY": "Syrian Arab Republic",
"SZ": "Swaziland",
"TA": "Tristan de Cunha",
"TC": "Turks And Caicos Islands",
"TD": "Chad",
"TF": "French Southern Territories",
"TG": "Togo",
"TH": "Thailand",
"TJ": "Tajikistan",
"TK": "Tokelau",
"TL": "Timor-Leste",
"TM": "Turkmenistan",
"TN": "Tunisia",
"TO": "Tonga",
"TP": "East Timor",
"TR": "Turkey",
"TT": "Trinidad And Tobago",
"TV": "Tuvalu",
"TW": "Taiwan",
"TZ": "Tanzania",
"UA": "Ukraine",
"UG": "Uganda",
"UM": "United States Minor Outlying Islands",
"US": "United States",
"UY": "Uruguay",
"UZ": "Uzbekistan",
"VA": "Vatican City State",
"VC": "Saint Vincent And The Grenadines",
"VD": "VietNam",
"VE": "Venezuela",
"VG": "Virgin Islands (British)",
"VI": "Virgin Islands (US)",
"VN": "VietNam",
"VU": "Vanuatu",
"WF": "Wallis And Futuna",
"WK": "Wake Island",
"WS": "Samoa",
"XK": "Kosovo",
"YD": "Yemen",
"YE": "Yemen",
"YT": "Mayotte",
"YU": "Yugoslavia",
"ZA": "South Africa",
"ZM": "Zambia",
"ZR": "Zaire",
"ZW": "Zimbabwe",
}
def get_country_code_autocomplete(text):
if text is None or len(text) == 0:
return []
results = []
for code in COUNTRIES:
if text.lower() in code.lower() \
or text.lower() in COUNTRIES[code].lower():
results.append(code)
return results

View file

@ -91,7 +91,7 @@ def __get_sessions_list(project_id, user_id, data):
data.series[0].filter.endDate = data.endTimestamp
data.series[0].filter.page = data.page
data.series[0].filter.limit = data.limit
return sessions.search2_pg(data=data.series[0].filter, project_id=project_id, user_id=user_id)
return sessions.search_sessions(data=data.series[0].filter, project_id=project_id, user_id=user_id)
def merged_live(project_id, data: schemas.TryCustomMetricsPayloadSchema, user_id=None):
@ -166,7 +166,7 @@ def get_sessions(project_id, user_id, metric_id, data: schemas.CustomMetricSessi
s.filter.limit = data.limit
s.filter.page = data.page
results.append({"seriesId": s.series_id, "seriesName": s.name,
**sessions.search2_pg(data=s.filter, project_id=project_id, user_id=user_id)})
**sessions.search_sessions(data=s.filter, project_id=project_id, user_id=user_id)})
return results
@ -213,7 +213,7 @@ def try_sessions(project_id, user_id, data: schemas.CustomMetricSessionsPayloadS
s.filter.limit = data.limit
s.filter.page = data.page
results.append({"seriesId": None, "seriesName": s.name,
**sessions.search2_pg(data=s.filter, project_id=project_id, user_id=user_id)})
**sessions.search_sessions(data=s.filter, project_id=project_id, user_id=user_id)})
return results
@ -532,7 +532,7 @@ def get_funnel_sessions_by_issue(user_id, project_id, metric_id, issue_id,
"lostConversions": 0,
"unaffectedSessions": 0}
return {"seriesId": s.series_id, "seriesName": s.name,
"sessions": sessions.search2_pg(user_id=user_id, project_id=project_id,
issue=issue, data=s.filter)
"sessions": sessions.search_sessions(user_id=user_id, project_id=project_id,
issue=issue, data=s.filter)
if issue is not None else {"total": 0, "sessions": []},
"issue": issue}

View file

@ -304,7 +304,9 @@ def make_chart_metrics(project_id, user_id, metric_id, data: schemas.CustomMetri
include_dashboard=False)
if raw_metric is None:
return None
metric = schemas.CustomMetricAndTemplate = schemas.CustomMetricAndTemplate.parse_obj(raw_metric)
metric: schemas.CustomMetricAndTemplate = schemas.CustomMetricAndTemplate.parse_obj(raw_metric)
if metric.is_template and metric.predefined_key is None:
return None
if metric.is_template:
return get_predefined_metric(key=metric.predefined_key, project_id=project_id, data=data.dict())
else:

View file

@ -251,10 +251,7 @@ def get_details(project_id, error_id, user_id, **data):
parent_error_id,session_id, user_anonymous_id,
user_id, user_uuid, user_browser, user_browser_version,
user_os, user_os_version, user_device, payload,
COALESCE((SELECT TRUE
FROM public.user_favorite_errors AS fe
WHERE pe.error_id = fe.error_id
AND fe.user_id = %(user_id)s), FALSE) AS favorite,
FALSE AS favorite,
True AS viewed
FROM public.errors AS pe
INNER JOIN events.errors AS ee USING (error_id)
@ -424,10 +421,11 @@ def __get_sort_key(key):
}.get(key, 'max_datetime')
def search(data: schemas.SearchErrorsSchema, project_id, user_id, flows=False):
empty_response = {'total': 0,
'errors': []
}
def search(data: schemas.SearchErrorsSchema, project_id, user_id):
empty_response = {
'total': 0,
'errors': []
}
platform = None
for f in data.filters:
@ -449,17 +447,12 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id, flows=False):
data.endDate = TimeUTC.now(1)
if len(data.events) > 0 or len(data.filters) > 0:
print("-- searching for sessions before errors")
# if favorite_only=True search for sessions associated with favorite_error
statuses = sessions.search2_pg(data=data, project_id=project_id, user_id=user_id, errors_only=True,
error_status=data.status)
statuses = sessions.search_sessions(data=data, project_id=project_id, user_id=user_id, errors_only=True,
error_status=data.status)
if len(statuses) == 0:
return empty_response
error_ids = [e["errorId"] for e in statuses]
with pg_client.PostgresClient() as cur:
if data.startDate is None:
data.startDate = TimeUTC.now(-7)
if data.endDate is None:
data.endDate = TimeUTC.now()
step_size = __get_step_size(data.startDate, data.endDate, data.density, factor=1)
sort = __get_sort_key('datetime')
if data.sort is not None:
@ -488,9 +481,9 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id, flows=False):
if error_ids is not None:
params["error_ids"] = tuple(error_ids)
pg_sub_query.append("error_id IN %(error_ids)s")
if data.bookmarked:
pg_sub_query.append("ufe.user_id = %(userId)s")
extra_join += " INNER JOIN public.user_favorite_errors AS ufe USING (error_id)"
# if data.bookmarked:
# pg_sub_query.append("ufe.user_id = %(userId)s")
# extra_join += " INNER JOIN public.user_favorite_errors AS ufe USING (error_id)"
if data.query is not None and len(data.query) > 0:
pg_sub_query.append("(pe.name ILIKE %(error_query)s OR pe.message ILIKE %(error_query)s)")
params["error_query"] = helper.values_for_operator(value=data.query,
@ -509,7 +502,7 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id, flows=False):
FROM (SELECT error_id,
name,
message,
COUNT(DISTINCT user_uuid) AS users,
COUNT(DISTINCT COALESCE(user_id,user_uuid::text)) AS users,
COUNT(DISTINCT session_id) AS sessions,
MAX(timestamp) AS max_datetime,
MIN(timestamp) AS min_datetime
@ -544,19 +537,13 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id, flows=False):
cur.execute(cur.mogrify(main_pg_query, params))
rows = cur.fetchall()
total = 0 if len(rows) == 0 else rows[0]["full_count"]
if flows:
return {"count": total}
if total == 0:
rows = []
else:
if len(statuses) == 0:
query = cur.mogrify(
"""SELECT error_id, status, parent_error_id, payload,
COALESCE((SELECT TRUE
FROM public.user_favorite_errors AS fe
WHERE errors.error_id = fe.error_id
AND fe.user_id = %(user_id)s LIMIT 1), FALSE) AS favorite,
"""SELECT error_id,
COALESCE((SELECT TRUE
FROM public.user_viewed_errors AS ve
WHERE errors.error_id = ve.error_id
@ -574,26 +561,12 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id, flows=False):
for r in rows:
r.pop("full_count")
if r["error_id"] in statuses:
r["status"] = statuses[r["error_id"]]["status"]
r["parent_error_id"] = statuses[r["error_id"]]["parentErrorId"]
r["favorite"] = statuses[r["error_id"]]["favorite"]
r["viewed"] = statuses[r["error_id"]]["viewed"]
r["stack"] = format_first_stack_frame(statuses[r["error_id"]])["stack"]
else:
r["status"] = "untracked"
r["parent_error_id"] = None
r["favorite"] = False
r["viewed"] = False
r["stack"] = None
offset = len(rows)
rows = [r for r in rows if r["stack"] is None
or (len(r["stack"]) == 0 or len(r["stack"]) > 1
or len(r["stack"]) > 0
and (r["message"].lower() != "script error." or len(r["stack"][0]["absPath"]) > 0))]
offset -= len(rows)
return {
'total': total - offset,
'total': total,
'errors': helper.list_to_camel_case(rows)
}

View file

@ -0,0 +1,48 @@
from chalicelib.utils import pg_client
def add_favorite_error(project_id, user_id, error_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(f"""INSERT INTO public.user_favorite_errors(user_id, error_id)
VALUES (%(userId)s,%(error_id)s);""",
{"userId": user_id, "error_id": error_id})
)
return {"errorId": error_id, "favorite": True}
def remove_favorite_error(project_id, user_id, error_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(f"""DELETE FROM public.user_favorite_errors
WHERE
user_id = %(userId)s
AND error_id = %(error_id)s;""",
{"userId": user_id, "error_id": error_id})
)
return {"errorId": error_id, "favorite": False}
def favorite_error(project_id, user_id, error_id):
exists, favorite = error_exists_and_favorite(user_id=user_id, error_id=error_id)
if not exists:
return {"errors": ["cannot bookmark non-rehydrated errors"]}
if favorite:
return remove_favorite_error(project_id=project_id, user_id=user_id, error_id=error_id)
return add_favorite_error(project_id=project_id, user_id=user_id, error_id=error_id)
def error_exists_and_favorite(user_id, error_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(
"""SELECT errors.error_id AS exists, ufe.error_id AS favorite
FROM public.errors
LEFT JOIN (SELECT error_id FROM public.user_favorite_errors WHERE user_id = %(userId)s) AS ufe USING (error_id)
WHERE error_id = %(error_id)s;""",
{"userId": user_id, "error_id": error_id})
)
r = cur.fetchone()
if r is None:
return False, False
return True, r.get("favorite") is not None

View file

@ -1,91 +0,0 @@
from chalicelib.utils import pg_client
def add_favorite_error(project_id, user_id, error_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(f"""\
INSERT INTO public.user_favorite_errors
(user_id, error_id)
VALUES
(%(userId)s,%(error_id)s);""",
{"userId": user_id, "error_id": error_id})
)
return {"errorId": error_id, "favorite": True}
def remove_favorite_error(project_id, user_id, error_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(f"""\
DELETE FROM public.user_favorite_errors
WHERE
user_id = %(userId)s
AND error_id = %(error_id)s;""",
{"userId": user_id, "error_id": error_id})
)
return {"errorId": error_id, "favorite": False}
def favorite_error(project_id, user_id, error_id):
exists, favorite = error_exists_and_favorite(user_id=user_id, error_id=error_id)
if not exists:
return {"errors": ["cannot bookmark non-rehydrated errors"]}
if favorite:
return remove_favorite_error(project_id=project_id, user_id=user_id, error_id=error_id)
return add_favorite_error(project_id=project_id, user_id=user_id, error_id=error_id)
def error_exists_and_favorite(user_id, error_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(
"""SELECT errors.error_id AS exists, ufe.error_id AS favorite
FROM public.errors
LEFT JOIN (SELECT error_id FROM public.user_favorite_errors WHERE user_id = %(userId)s) AS ufe USING (error_id)
WHERE error_id = %(error_id)s;""",
{"userId": user_id, "error_id": error_id})
)
r = cur.fetchone()
if r is None:
return False, False
return True, r.get("favorite") is not None
def add_viewed_error(project_id, user_id, error_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify("""\
INSERT INTO public.user_viewed_errors
(user_id, error_id)
VALUES
(%(userId)s,%(error_id)s);""",
{"userId": user_id, "error_id": error_id})
)
def viewed_error_exists(user_id, error_id):
with pg_client.PostgresClient() as cur:
query = cur.mogrify(
"""SELECT
errors.error_id AS hydrated,
COALESCE((SELECT TRUE
FROM public.user_viewed_errors AS ve
WHERE ve.error_id = %(error_id)s
AND ve.user_id = %(userId)s LIMIT 1), FALSE) AS viewed
FROM public.errors
WHERE error_id = %(error_id)s""",
{"userId": user_id, "error_id": error_id})
cur.execute(
query=query
)
r = cur.fetchone()
if r:
return r.get("viewed")
return True
def viewed_error(project_id, user_id, error_id):
if viewed_error_exists(user_id=user_id, error_id=error_id):
return None
return add_viewed_error(project_id=project_id, user_id=user_id, error_id=error_id)

View file

@ -0,0 +1,37 @@
from chalicelib.utils import pg_client
def add_viewed_error(project_id, user_id, error_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify("""INSERT INTO public.user_viewed_errors(user_id, error_id)
VALUES (%(userId)s,%(error_id)s);""",
{"userId": user_id, "error_id": error_id})
)
def viewed_error_exists(user_id, error_id):
with pg_client.PostgresClient() as cur:
query = cur.mogrify(
"""SELECT
errors.error_id AS hydrated,
COALESCE((SELECT TRUE
FROM public.user_viewed_errors AS ve
WHERE ve.error_id = %(error_id)s
AND ve.user_id = %(userId)s LIMIT 1), FALSE) AS viewed
FROM public.errors
WHERE error_id = %(error_id)s""",
{"userId": user_id, "error_id": error_id})
cur.execute(
query=query
)
r = cur.fetchone()
if r:
return r.get("viewed")
return True
def viewed_error(project_id, user_id, error_id):
if viewed_error_exists(user_id=user_id, error_id=error_id):
return None
return add_viewed_error(project_id=project_id, user_id=user_id, error_id=error_id)

View file

@ -1,10 +1,14 @@
import schemas
from chalicelib.core import issues
from chalicelib.core import sessions_metas, metadata
from chalicelib.core import metadata
from chalicelib.core import sessions_metas
from chalicelib.utils import pg_client, helper
from chalicelib.utils.TimeUTC import TimeUTC
from chalicelib.utils.event_filter_definition import SupportedFilter, Event
from chalicelib.core import autocomplete
def get_customs_by_sessionId2_pg(session_id, project_id):
with pg_client.PostgresClient() as cur:
@ -92,11 +96,6 @@ def get_by_sessionId2_pg(session_id, project_id, group_clickrage=False):
return rows
def __get_data_for_extend(data):
if "errors" not in data:
return data["data"]
def __pg_errors_query(source=None, value_length=None):
if value_length is None or value_length > 2:
return f"""((SELECT DISTINCT ON(lg.message)
@ -110,7 +109,7 @@ def __pg_errors_query(source=None, value_length=None):
AND lg.project_id = %(project_id)s
{"AND source = %(source)s" if source is not None else ""}
LIMIT 5)
UNION ALL
UNION DISTINCT
(SELECT DISTINCT ON(lg.name)
lg.name AS value,
source,
@ -122,7 +121,7 @@ def __pg_errors_query(source=None, value_length=None):
AND lg.project_id = %(project_id)s
{"AND source = %(source)s" if source is not None else ""}
LIMIT 5)
UNION
UNION DISTINCT
(SELECT DISTINCT ON(lg.message)
lg.message AS value,
source,
@ -134,7 +133,7 @@ def __pg_errors_query(source=None, value_length=None):
AND lg.project_id = %(project_id)s
{"AND source = %(source)s" if source is not None else ""}
LIMIT 5)
UNION ALL
UNION DISTINCT
(SELECT DISTINCT ON(lg.name)
lg.name AS value,
source,
@ -157,7 +156,7 @@ def __pg_errors_query(source=None, value_length=None):
AND lg.project_id = %(project_id)s
{"AND source = %(source)s" if source is not None else ""}
LIMIT 5)
UNION ALL
UNION DISTINCT
(SELECT DISTINCT ON(lg.name)
lg.name AS value,
source,
@ -177,8 +176,7 @@ def __search_pg_errors(project_id, value, key=None, source=None):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(__pg_errors_query(source,
value_length=len(value) \
if SUPPORTED_TYPES[event_type.ERROR.ui_type].change_by_length else None),
value_length=len(value)),
{"project_id": project_id, "value": helper.string_to_sql_like(value),
"svalue": helper.string_to_sql_like("^" + value),
"source": source}))
@ -189,7 +187,7 @@ def __search_pg_errors(project_id, value, key=None, source=None):
def __search_pg_errors_ios(project_id, value, key=None, source=None):
now = TimeUTC.now()
if SUPPORTED_TYPES[event_type.ERROR_IOS.ui_type].change_by_length is False or len(value) > 2:
if len(value) > 2:
query = f"""(SELECT DISTINCT ON(lg.reason)
lg.reason AS value,
'{event_type.ERROR_IOS.ui_type}' AS type
@ -268,7 +266,7 @@ def __search_pg_metadata(project_id, value, key=None, source=None):
for k in meta_keys.keys():
colname = metadata.index_to_colname(meta_keys[k])
if SUPPORTED_TYPES[event_type.METADATA.ui_type].change_by_length is False or len(value) > 2:
if len(value) > 2:
sub_from.append(f"""((SELECT DISTINCT ON ({colname}) {colname} AS value, '{k}' AS key
FROM public.sessions
WHERE project_id = %(project_id)s
@ -294,48 +292,6 @@ def __search_pg_metadata(project_id, value, key=None, source=None):
return results
def __generic_query(typename, value_length=None):
if value_length is None or value_length > 2:
return f"""(SELECT DISTINCT value, type
FROM public.autocomplete
WHERE
project_id = %(project_id)s
AND type='{typename}'
AND value ILIKE %(svalue)s
LIMIT 5)
UNION
(SELECT DISTINCT value, type
FROM public.autocomplete
WHERE
project_id = %(project_id)s
AND type='{typename}'
AND value ILIKE %(value)s
LIMIT 5);"""
return f"""SELECT DISTINCT value, type
FROM public.autocomplete
WHERE
project_id = %(project_id)s
AND type='{typename}'
AND value ILIKE %(svalue)s
LIMIT 10;"""
def __generic_autocomplete(event: Event):
def f(project_id, value, key=None, source=None):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(
__generic_query(event.ui_type,
value_length=len(value) \
if SUPPORTED_TYPES[event.ui_type].change_by_length \
else None),
{"project_id": project_id, "value": helper.string_to_sql_like(value),
"svalue": helper.string_to_sql_like("^" + value)}))
return helper.list_to_camel_case(cur.fetchall())
return f
class event_type:
CLICK = Event(ui_type=schemas.EventType.click, table="events.clicks", column="label")
INPUT = Event(ui_type=schemas.EventType.input, table="events.inputs", column="label")
@ -358,99 +314,65 @@ class event_type:
SUPPORTED_TYPES = {
event_type.CLICK.ui_type: SupportedFilter(get=__generic_autocomplete(event_type.CLICK),
query=__generic_query(typename=event_type.CLICK.ui_type),
change_by_length=True),
event_type.INPUT.ui_type: SupportedFilter(get=__generic_autocomplete(event_type.INPUT),
query=__generic_query(typename=event_type.INPUT.ui_type),
change_by_length=True),
event_type.LOCATION.ui_type: SupportedFilter(get=__generic_autocomplete(event_type.LOCATION),
query=__generic_query(typename=event_type.LOCATION.ui_type),
change_by_length=True),
event_type.CUSTOM.ui_type: SupportedFilter(get=__generic_autocomplete(event_type.CUSTOM),
query=__generic_query(typename=event_type.CUSTOM.ui_type),
change_by_length=True),
event_type.REQUEST.ui_type: SupportedFilter(get=__generic_autocomplete(event_type.REQUEST),
query=__generic_query(typename=event_type.REQUEST.ui_type),
change_by_length=True),
event_type.GRAPHQL.ui_type: SupportedFilter(get=__generic_autocomplete(event_type.GRAPHQL),
query=__generic_query(typename=event_type.GRAPHQL.ui_type),
change_by_length=True),
event_type.STATEACTION.ui_type: SupportedFilter(get=__generic_autocomplete(event_type.STATEACTION),
query=__generic_query(typename=event_type.STATEACTION.ui_type),
change_by_length=True),
event_type.CLICK.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(event_type.CLICK),
query=autocomplete.__generic_query(typename=event_type.CLICK.ui_type)),
event_type.INPUT.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(event_type.INPUT),
query=autocomplete.__generic_query(typename=event_type.INPUT.ui_type)),
event_type.LOCATION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(event_type.LOCATION),
query=autocomplete.__generic_query(
typename=event_type.LOCATION.ui_type)),
event_type.CUSTOM.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(event_type.CUSTOM),
query=autocomplete.__generic_query(typename=event_type.CUSTOM.ui_type)),
event_type.REQUEST.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(event_type.REQUEST),
query=autocomplete.__generic_query(
typename=event_type.REQUEST.ui_type)),
event_type.GRAPHQL.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(event_type.GRAPHQL),
query=autocomplete.__generic_query(
typename=event_type.GRAPHQL.ui_type)),
event_type.STATEACTION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(event_type.STATEACTION),
query=autocomplete.__generic_query(
typename=event_type.STATEACTION.ui_type)),
event_type.ERROR.ui_type: SupportedFilter(get=__search_pg_errors,
query=None, change_by_length=True),
query=None),
event_type.METADATA.ui_type: SupportedFilter(get=__search_pg_metadata,
query=None, change_by_length=True),
query=None),
# IOS
event_type.CLICK_IOS.ui_type: SupportedFilter(get=__generic_autocomplete(event_type.CLICK_IOS),
query=__generic_query(typename=event_type.CLICK_IOS.ui_type),
change_by_length=True),
event_type.INPUT_IOS.ui_type: SupportedFilter(get=__generic_autocomplete(event_type.INPUT_IOS),
query=__generic_query(typename=event_type.INPUT_IOS.ui_type),
change_by_length=True),
event_type.VIEW_IOS.ui_type: SupportedFilter(get=__generic_autocomplete(event_type.VIEW_IOS),
query=__generic_query(typename=event_type.VIEW_IOS.ui_type),
change_by_length=True),
event_type.CUSTOM_IOS.ui_type: SupportedFilter(get=__generic_autocomplete(event_type.CUSTOM_IOS),
query=__generic_query(typename=event_type.CUSTOM_IOS.ui_type),
change_by_length=True),
event_type.REQUEST_IOS.ui_type: SupportedFilter(get=__generic_autocomplete(event_type.REQUEST_IOS),
query=__generic_query(typename=event_type.REQUEST_IOS.ui_type),
change_by_length=True),
event_type.CLICK_IOS.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(event_type.CLICK_IOS),
query=autocomplete.__generic_query(
typename=event_type.CLICK_IOS.ui_type)),
event_type.INPUT_IOS.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(event_type.INPUT_IOS),
query=autocomplete.__generic_query(
typename=event_type.INPUT_IOS.ui_type)),
event_type.VIEW_IOS.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(event_type.VIEW_IOS),
query=autocomplete.__generic_query(
typename=event_type.VIEW_IOS.ui_type)),
event_type.CUSTOM_IOS.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(event_type.CUSTOM_IOS),
query=autocomplete.__generic_query(
typename=event_type.CUSTOM_IOS.ui_type)),
event_type.REQUEST_IOS.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(event_type.REQUEST_IOS),
query=autocomplete.__generic_query(
typename=event_type.REQUEST_IOS.ui_type)),
event_type.ERROR_IOS.ui_type: SupportedFilter(get=__search_pg_errors_ios,
query=None, change_by_length=True),
query=None),
}
def __get_autocomplete_table(value, project_id):
autocomplete_events = [schemas.FilterType.rev_id,
schemas.EventType.click,
schemas.FilterType.user_device,
schemas.FilterType.user_id,
schemas.FilterType.user_browser,
schemas.FilterType.user_os,
schemas.EventType.custom,
schemas.FilterType.user_country,
schemas.EventType.location,
schemas.EventType.input]
autocomplete_events.sort()
sub_queries = []
for e in autocomplete_events:
sub_queries.append(f"""(SELECT type, value
FROM public.autocomplete
WHERE project_id = %(project_id)s
AND type= '{e}'
AND value ILIKE %(svalue)s
LIMIT 5)""")
if len(value) > 2:
sub_queries.append(f"""(SELECT type, value
FROM public.autocomplete
WHERE project_id = %(project_id)s
AND type= '{e}'
AND value ILIKE %(value)s
LIMIT 5)""")
def get_errors_by_session_id(session_id, project_id):
with pg_client.PostgresClient() as cur:
query = cur.mogrify(" UNION ".join(sub_queries) + ";",
{"project_id": project_id, "value": helper.string_to_sql_like(value),
"svalue": helper.string_to_sql_like("^" + value)})
try:
cur.execute(query)
except Exception as err:
print("--------- AUTOCOMPLETE SEARCH QUERY EXCEPTION -----------")
print(query.decode('UTF-8'))
print("--------- VALUE -----------")
print(value)
print("--------------------")
raise err
results = helper.list_to_camel_case(cur.fetchall())
return results
cur.execute(cur.mogrify(f"""\
SELECT er.*,ur.*, er.timestamp - s.start_ts AS time
FROM {event_type.ERROR.table} AS er INNER JOIN public.errors AS ur USING (error_id) INNER JOIN public.sessions AS s USING (session_id)
WHERE er.session_id = %(session_id)s AND s.project_id=%(project_id)s
ORDER BY timestamp;""", {"session_id": session_id, "project_id": project_id}))
errors = cur.fetchall()
for e in errors:
e["stacktrace_parsed_at"] = TimeUTC.datetime_to_timestamp(e["stacktrace_parsed_at"])
return helper.list_to_camel_case(errors)
def search(text, event_type, project_id, source, key):
if not event_type:
return {"data": __get_autocomplete_table(text, project_id)}
return {"data": autocomplete.__get_autocomplete_table(text, project_id)}
if event_type in SUPPORTED_TYPES.keys():
rows = SUPPORTED_TYPES[event_type].get(project_id=project_id, value=text, key=key, source=source)
@ -470,16 +392,3 @@ def search(text, event_type, project_id, source, key):
return {"errors": ["unsupported event"]}
return {"data": rows}
def get_errors_by_session_id(session_id, project_id):
with pg_client.PostgresClient() as cur:
cur.execute(cur.mogrify(f"""\
SELECT er.*,ur.*, er.timestamp - s.start_ts AS time
FROM {event_type.ERROR.table} AS er INNER JOIN public.errors AS ur USING (error_id) INNER JOIN public.sessions AS s USING (session_id)
WHERE er.session_id = %(session_id)s AND s.project_id=%(project_id)s
ORDER BY timestamp;""", {"session_id": session_id, "project_id": project_id}))
errors = cur.fetchall()
for e in errors:
e["stacktrace_parsed_at"] = TimeUTC.datetime_to_timestamp(e["stacktrace_parsed_at"])
return helper.list_to_camel_case(errors)

View file

@ -138,8 +138,8 @@ def get_by_user(project_id, user_id, range_value=None, start_date=None, end_date
get_start_end_time(filter_d=row["filter"], range_value=range_value, start_date=start_date,
end_date=end_date)
counts = sessions.search2_pg(data=schemas.SessionsSearchPayloadSchema.parse_obj(row["filter"]),
project_id=project_id, user_id=None, count_only=True)
counts = sessions.search_sessions(data=schemas.SessionsSearchPayloadSchema.parse_obj(row["filter"]),
project_id=project_id, user_id=None, count_only=True)
row["sessionsCount"] = counts["countSessions"]
row["usersCount"] = counts["countUsers"]
filter_clone = dict(row["filter"])
@ -193,8 +193,8 @@ def get_sessions(project_id, funnel_id, user_id, range_value=None, start_date=No
if f is None:
return {"errors": ["funnel not found"]}
get_start_end_time(filter_d=f["filter"], range_value=range_value, start_date=start_date, end_date=end_date)
return sessions.search2_pg(data=schemas.SessionsSearchPayloadSchema.parse_obj(f["filter"]), project_id=project_id,
user_id=user_id)
return sessions.search_sessions(data=schemas.SessionsSearchPayloadSchema.parse_obj(f["filter"]), project_id=project_id,
user_id=user_id)
def get_sessions_on_the_fly(funnel_id, project_id, user_id, data: schemas.FunnelSearchPayloadSchema):
@ -207,8 +207,8 @@ def get_sessions_on_the_fly(funnel_id, project_id, user_id, data: schemas.Funnel
get_start_end_time(filter_d=f["filter"], range_value=data.range_value,
start_date=data.startDate, end_date=data.endDate)
data = schemas.FunnelSearchPayloadSchema.parse_obj(f["filter"])
return sessions.search2_pg(data=data, project_id=project_id,
user_id=user_id)
return sessions.search_sessions(data=data, project_id=project_id,
user_id=user_id)
def get_top_insights(project_id, user_id, funnel_id, range_value=None, start_date=None, end_date=None):
@ -365,8 +365,8 @@ def search_by_issue(user_id, project_id, funnel_id, issue_id, data: schemas.Funn
if i.get("issueId", "") == issue_id:
issue = i
break
return {"sessions": sessions.search2_pg(user_id=user_id, project_id=project_id, issue=issue,
data=data) if issue is not None else {"total": 0, "sessions": []},
return {"sessions": sessions.search_sessions(user_id=user_id, project_id=project_id, issue=issue,
data=data) if issue is not None else {"total": 0, "sessions": []},
# "stages": helper.list_to_camel_case(insights),
# "totalDropDueToIssues": total_drop_due_to_issues,
"issue": issue}

View file

@ -7,13 +7,18 @@ class BaseIntegration(ABC):
def __init__(self, user_id, ISSUE_CLASS):
self._user_id = user_id
self.issue_handler = ISSUE_CLASS(self.integration_token)
self.__issue_handler = ISSUE_CLASS(self.integration_token)
@property
@abstractmethod
def provider(self):
pass
@property
@abstractmethod
def issue_handler(self):
pass
@property
def integration_token(self):
integration = self.get()

View file

@ -1,8 +1,9 @@
import schemas
from chalicelib.core import integration_base
from chalicelib.core.integration_github_issue import GithubIntegrationIssue
from chalicelib.utils import pg_client, helper
PROVIDER = "GITHUB"
PROVIDER = schemas.IntegrationType.github
class GitHubIntegration(integration_base.BaseIntegration):
@ -15,6 +16,10 @@ class GitHubIntegration(integration_base.BaseIntegration):
def provider(self):
return PROVIDER
@property
def issue_handler(self):
return
def get_obfuscated(self):
integration = self.get()
if integration is None:

View file

@ -1,8 +1,9 @@
import schemas
from chalicelib.core import integration_base
from chalicelib.core.integration_jira_cloud_issue import JIRACloudIntegrationIssue
from chalicelib.utils import pg_client, helper
PROVIDER = "JIRA"
PROVIDER = schemas.IntegrationType.jira
def obfuscate_string(string):
@ -16,21 +17,29 @@ class JIRAIntegration(integration_base.BaseIntegration):
# super(JIRAIntegration, self).__init__(jwt, user_id, JIRACloudIntegrationProxy)
self._user_id = user_id
self.integration = self.get()
if self.integration is None:
return
self.integration["valid"] = True
try:
self.issue_handler = JIRACloudIntegrationIssue(token=self.integration["token"],
username=self.integration["username"],
url=self.integration["url"])
except Exception as e:
self.issue_handler = None
if not self.integration["url"].endswith('atlassian.net'):
self.integration["valid"] = False
@property
def provider(self):
return PROVIDER
@property
def issue_handler(self):
if self.integration["url"].endswith('atlassian.net') and self.__issue_handler is None:
try:
self.__issue_handler = JIRACloudIntegrationIssue(token=self.integration["token"],
username=self.integration["username"],
url=self.integration["url"])
except Exception as e:
self.__issue_handler = None
self.integration["valid"] = False
return self.__issue_handler
# TODO: remove this once jira-oauth is done
def get(self):
with pg_client.PostgresClient() as cur:
@ -41,7 +50,14 @@ class JIRAIntegration(integration_base.BaseIntegration):
WHERE user_id=%(user_id)s;""",
{"user_id": self._user_id})
)
return helper.dict_to_camel_case(cur.fetchone())
data = helper.dict_to_camel_case(cur.fetchone())
if data is None:
return
data["valid"] = True
if not data["url"].endswith('atlassian.net'):
data["valid"] = False
return data
def get_obfuscated(self):
if self.integration is None:
@ -66,7 +82,7 @@ class JIRAIntegration(integration_base.BaseIntegration):
w = helper.dict_to_camel_case(cur.fetchone())
if obfuscate:
w["token"] = obfuscate_string(w["token"])
return w
return self.get()
# TODO: make this generic for all issue tracking integrations
def _add(self, data):
@ -84,7 +100,7 @@ class JIRAIntegration(integration_base.BaseIntegration):
"token": token, "url": url})
)
w = helper.dict_to_camel_case(cur.fetchone())
return w
return self.get()
def delete(self):
with pg_client.PostgresClient() as cur:

View file

@ -0,0 +1,61 @@
import schemas
from chalicelib.utils import pg_client
def get_global_integrations_status(tenant_id, user_id, project_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(f"""\
SELECT EXISTS((SELECT 1
FROM public.oauth_authentication
WHERE user_id = %(user_id)s
AND provider = 'github')) AS {schemas.IntegrationType.github},
EXISTS((SELECT 1
FROM public.jira_cloud
WHERE user_id = %(user_id)s)) AS {schemas.IntegrationType.jira},
EXISTS((SELECT 1
FROM public.integrations
WHERE project_id=%(project_id)s
AND provider='bugsnag')) AS {schemas.IntegrationType.bugsnag},
EXISTS((SELECT 1
FROM public.integrations
WHERE project_id=%(project_id)s
AND provider='cloudwatch')) AS {schemas.IntegrationType.cloudwatch},
EXISTS((SELECT 1
FROM public.integrations
WHERE project_id=%(project_id)s
AND provider='datadog')) AS {schemas.IntegrationType.datadog},
EXISTS((SELECT 1
FROM public.integrations
WHERE project_id=%(project_id)s
AND provider='newrelic')) AS {schemas.IntegrationType.newrelic},
EXISTS((SELECT 1
FROM public.integrations
WHERE project_id=%(project_id)s
AND provider='rollbar')) AS {schemas.IntegrationType.rollbar},
EXISTS((SELECT 1
FROM public.integrations
WHERE project_id=%(project_id)s
AND provider='sentry')) AS {schemas.IntegrationType.sentry},
EXISTS((SELECT 1
FROM public.integrations
WHERE project_id=%(project_id)s
AND provider='stackdriver')) AS {schemas.IntegrationType.stackdriver},
EXISTS((SELECT 1
FROM public.integrations
WHERE project_id=%(project_id)s
AND provider='sumologic')) AS {schemas.IntegrationType.sumologic},
EXISTS((SELECT 1
FROM public.integrations
WHERE project_id=%(project_id)s
AND provider='elasticsearch')) AS {schemas.IntegrationType.elasticsearch},
EXISTS((SELECT 1
FROM public.webhooks
WHERE type='slack')) AS {schemas.IntegrationType.slack};""",
{"user_id": user_id, "tenant_id": tenant_id, "project_id": project_id})
)
current_integrations = cur.fetchone()
result = []
for k in current_integrations.keys():
result.append({"name": k, "integrated": current_integrations[k]})
return result

View file

@ -27,7 +27,7 @@ def __get_default_integration(user_id):
current_integrations["jira"] else None
def get_integration(tenant_id, user_id, tool=None):
def get_integration(tenant_id, user_id, tool=None, for_delete=False):
if tool is None:
tool = __get_default_integration(user_id=user_id)
if tool is None:
@ -37,7 +37,7 @@ def get_integration(tenant_id, user_id, tool=None):
return {"errors": [f"issue tracking tool not supported yet, available: {SUPPORTED_TOOLS}"]}, None
if tool == integration_jira_cloud.PROVIDER:
integration = integration_jira_cloud.JIRAIntegration(tenant_id=tenant_id, user_id=user_id)
if integration.integration is not None and not integration.integration.get("valid", True):
if not for_delete and integration.integration is not None and not integration.integration.get("valid", True):
return {"errors": ["JIRA: connexion issue/unauthorized"]}, integration
return None, integration
elif tool == integration_github.PROVIDER:

View file

@ -765,8 +765,8 @@ def get_missing_resources_trend(project_id, startTimestamp=TimeUTC.now(delta_day
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=True, chart=True, data=args)
pg_sub_query.append("resources.success = FALSE")
pg_sub_query_chart.append("resources.success = FALSE")
pg_sub_query.append("resources.type != 'fetch'")
pg_sub_query_chart.append("resources.type != 'fetch'")
pg_sub_query.append("resources.type = 'img'")
pg_sub_query_chart.append("resources.type = 'img'")
with pg_client.PostgresClient() as cur:
pg_query = f"""SELECT
@ -1580,27 +1580,27 @@ def get_domains_errors(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
step_size = __get_step_size(startTimestamp, endTimestamp, density, factor=1)
pg_sub_query_subset = __get_constraints(project_id=project_id, time_constraint=True, chart=False, data=args)
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=False, chart=True,
data=args, main_table="resources", time_column="timestamp", project=False,
data=args, main_table="requests", time_column="timestamp", project=False,
duration=False)
pg_sub_query_subset.append("resources.timestamp>=%(startTimestamp)s")
pg_sub_query_subset.append("resources.timestamp<%(endTimestamp)s")
pg_sub_query_subset.append("resources.status/100 = %(status_code)s")
pg_sub_query_subset.append("requests.timestamp>=%(startTimestamp)s")
pg_sub_query_subset.append("requests.timestamp<%(endTimestamp)s")
pg_sub_query_subset.append("requests.status/100 = %(status_code)s")
with pg_client.PostgresClient() as cur:
pg_query = f"""WITH resources AS(SELECT resources.url_host, timestamp
FROM events.resources INNER JOIN public.sessions USING (session_id)
pg_query = f"""WITH requests AS(SELECT requests.host, timestamp
FROM events_common.requests INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query_subset)}
)
SELECT generated_timestamp AS timestamp,
COALESCE(JSONB_AGG(resources) FILTER ( WHERE resources IS NOT NULL ), '[]'::JSONB) AS keys
COALESCE(JSONB_AGG(requests) FILTER ( WHERE requests IS NOT NULL ), '[]'::JSONB) AS keys
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
LEFT JOIN LATERAL ( SELECT resources.url_host, COUNT(resources.*) AS count
FROM resources
LEFT JOIN LATERAL ( SELECT requests.host, COUNT(*) AS count
FROM requests
WHERE {" AND ".join(pg_sub_query_chart)}
GROUP BY url_host
GROUP BY host
ORDER BY count DESC
LIMIT 5
) AS resources ON (TRUE)
) AS requests ON (TRUE)
GROUP BY generated_timestamp
ORDER BY generated_timestamp;"""
params = {"project_id": project_id,
@ -1625,37 +1625,37 @@ def get_domains_errors(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
return result
def get_domains_errors_4xx(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
endTimestamp=TimeUTC.now(), density=6, **args):
def __get_domains_errors_4xx_and_5xx(status, project_id, startTimestamp=TimeUTC.now(delta_days=-1),
endTimestamp=TimeUTC.now(), density=6, **args):
step_size = __get_step_size(startTimestamp, endTimestamp, density, factor=1)
pg_sub_query_subset = __get_constraints(project_id=project_id, time_constraint=True, chart=False, data=args)
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=False, chart=True,
data=args, main_table="resources", time_column="timestamp", project=False,
data=args, main_table="requests", time_column="timestamp", project=False,
duration=False)
pg_sub_query_subset.append("resources.status/100 = %(status_code)s")
pg_sub_query_subset.append("requests.status/100 = %(status_code)s")
with pg_client.PostgresClient() as cur:
pg_query = f"""WITH resources AS (SELECT resources.url_host, timestamp
FROM events.resources INNER JOIN public.sessions USING (session_id)
pg_query = f"""WITH requests AS (SELECT host, timestamp
FROM events_common.requests INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query_subset)}
)
SELECT generated_timestamp AS timestamp,
COALESCE(JSONB_AGG(resources) FILTER ( WHERE resources IS NOT NULL ), '[]'::JSONB) AS keys
COALESCE(JSONB_AGG(requests) FILTER ( WHERE requests IS NOT NULL ), '[]'::JSONB) AS keys
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
LEFT JOIN LATERAL ( SELECT resources.url_host, COUNT(resources.url_host) AS count
FROM resources
LEFT JOIN LATERAL ( SELECT requests.host, COUNT(*) AS count
FROM requests
WHERE {" AND ".join(pg_sub_query_chart)}
GROUP BY url_host
GROUP BY host
ORDER BY count DESC
LIMIT 5
) AS resources ON (TRUE)
) AS requests ON (TRUE)
GROUP BY generated_timestamp
ORDER BY generated_timestamp;"""
params = {"project_id": project_id,
"startTimestamp": startTimestamp,
"endTimestamp": endTimestamp,
"step_size": step_size,
"status_code": 4, **__get_constraint_values(args)}
"status_code": status, **__get_constraint_values(args)}
cur.execute(cur.mogrify(pg_query, params))
rows = cur.fetchall()
rows = __nested_array_to_dict_array(rows)
@ -1665,44 +1665,16 @@ def get_domains_errors_4xx(project_id, startTimestamp=TimeUTC.now(delta_days=-1)
return rows
def get_domains_errors_4xx(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
endTimestamp=TimeUTC.now(), density=6, **args):
return __get_domains_errors_4xx_and_5xx(status=4, project_id=project_id, startTimestamp=startTimestamp,
endTimestamp=endTimestamp, density=density, **args)
def get_domains_errors_5xx(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
endTimestamp=TimeUTC.now(), density=6, **args):
step_size = __get_step_size(startTimestamp, endTimestamp, density, factor=1)
pg_sub_query_subset = __get_constraints(project_id=project_id, time_constraint=True, chart=False, data=args)
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=False, chart=True,
data=args, main_table="resources", time_column="timestamp", project=False,
duration=False)
pg_sub_query_subset.append("resources.status/100 = %(status_code)s")
with pg_client.PostgresClient() as cur:
pg_query = f"""WITH resources AS (SELECT resources.url_host, timestamp
FROM events.resources INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query_subset)}
)
SELECT generated_timestamp AS timestamp,
COALESCE(JSONB_AGG(resources) FILTER ( WHERE resources IS NOT NULL ), '[]'::JSONB) AS keys
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
LEFT JOIN LATERAL ( SELECT resources.url_host, COUNT(resources.url_host) AS count
FROM resources
WHERE {" AND ".join(pg_sub_query_chart)}
GROUP BY url_host
ORDER BY count DESC
LIMIT 5
) AS resources ON (TRUE)
GROUP BY generated_timestamp
ORDER BY generated_timestamp;"""
params = {"project_id": project_id,
"startTimestamp": startTimestamp,
"endTimestamp": endTimestamp,
"step_size": step_size,
"status_code": 5, **__get_constraint_values(args)}
cur.execute(cur.mogrify(pg_query, params))
rows = cur.fetchall()
rows = __nested_array_to_dict_array(rows)
neutral = __get_neutral(rows)
rows = __merge_rows_with_neutral(rows, neutral)
return rows
return __get_domains_errors_4xx_and_5xx(status=5, project_id=project_id, startTimestamp=startTimestamp,
endTimestamp=endTimestamp, density=density, **args)
def __nested_array_to_dict_array(rows, key="url_host", value="count"):
@ -1747,15 +1719,15 @@ def get_slowest_domains(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
def get_errors_per_domains(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
endTimestamp=TimeUTC.now(), **args):
pg_sub_query = __get_constraints(project_id=project_id, data=args)
pg_sub_query.append("resources.success = FALSE")
pg_sub_query.append("requests.success = FALSE")
with pg_client.PostgresClient() as cur:
pg_query = f"""SELECT
resources.url_host AS domain,
COUNT(resources.session_id) AS errors_count
FROM events.resources INNER JOIN sessions USING (session_id)
requests.host AS domain,
COUNT(requests.session_id) AS errors_count
FROM events_common.requests INNER JOIN sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query)}
GROUP BY resources.url_host
GROUP BY requests.host
ORDER BY errors_count DESC
LIMIT 5;"""
cur.execute(cur.mogrify(pg_query, {"project_id": project_id,
@ -1823,7 +1795,7 @@ def get_calls_errors(project_id, startTimestamp=TimeUTC.now(delta_days=-1), endT
FROM events.resources INNER JOIN sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query)}
GROUP BY resources.method, resources.url_hostpath
ORDER BY (4 + 5), 3 DESC
ORDER BY (4 + 5) DESC, 3 DESC
LIMIT 50;"""
cur.execute(cur.mogrify(pg_query, {"project_id": project_id,
"startTimestamp": startTimestamp,
@ -1832,50 +1804,45 @@ def get_calls_errors(project_id, startTimestamp=TimeUTC.now(delta_days=-1), endT
return helper.list_to_camel_case(rows)
def get_calls_errors_4xx(project_id, startTimestamp=TimeUTC.now(delta_days=-1), endTimestamp=TimeUTC.now(),
platform=None, **args):
def __get_calls_errors_4xx_or_5xx(status, project_id, startTimestamp=TimeUTC.now(delta_days=-1),
endTimestamp=TimeUTC.now(),
platform=None, **args):
pg_sub_query = __get_constraints(project_id=project_id, data=args)
pg_sub_query.append("resources.type = 'fetch'")
pg_sub_query.append("resources.method IS NOT NULL")
pg_sub_query.append("resources.status/100 = 4")
pg_sub_query.append("requests.type = 'fetch'")
pg_sub_query.append("requests.method IS NOT NULL")
pg_sub_query.append(f"requests.status/100 = {status}")
with pg_client.PostgresClient() as cur:
pg_query = f"""SELECT resources.method,
resources.url_hostpath,
COUNT(resources.session_id) AS all_requests
FROM events.resources INNER JOIN sessions USING (session_id)
pg_query = f"""SELECT requests.method,
requests.host,
requests.path,
COUNT(requests.session_id) AS all_requests
FROM events_common.requests INNER JOIN sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query)}
GROUP BY resources.method, resources.url_hostpath
GROUP BY requests.method, requests.host, requests.path
ORDER BY all_requests DESC
LIMIT 10;"""
cur.execute(cur.mogrify(pg_query, {"project_id": project_id,
"startTimestamp": startTimestamp,
"endTimestamp": endTimestamp, **__get_constraint_values(args)}))
rows = cur.fetchall()
for r in rows:
r["url_hostpath"] = r.pop("host") + r.pop("path")
return helper.list_to_camel_case(rows)
def get_calls_errors_4xx(project_id, startTimestamp=TimeUTC.now(delta_days=-1), endTimestamp=TimeUTC.now(),
platform=None, **args):
return __get_calls_errors_4xx_or_5xx(status=4, project_id=project_id, startTimestamp=startTimestamp,
endTimestamp=endTimestamp,
platform=platform, **args)
def get_calls_errors_5xx(project_id, startTimestamp=TimeUTC.now(delta_days=-1), endTimestamp=TimeUTC.now(),
platform=None, **args):
pg_sub_query = __get_constraints(project_id=project_id, data=args)
pg_sub_query.append("resources.type = 'fetch'")
pg_sub_query.append("resources.method IS NOT NULL")
pg_sub_query.append("resources.status/100 = 5")
with pg_client.PostgresClient() as cur:
pg_query = f"""SELECT resources.method,
resources.url_hostpath,
COUNT(resources.session_id) AS all_requests
FROM events.resources INNER JOIN sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query)}
GROUP BY resources.method, resources.url_hostpath
ORDER BY all_requests DESC
LIMIT 10;"""
cur.execute(cur.mogrify(pg_query, {"project_id": project_id,
"startTimestamp": startTimestamp,
"endTimestamp": endTimestamp, **__get_constraint_values(args)}))
rows = cur.fetchall()
return helper.list_to_camel_case(rows)
return __get_calls_errors_4xx_or_5xx(status=5, project_id=project_id, startTimestamp=startTimestamp,
endTimestamp=endTimestamp,
platform=platform, **args)
def get_errors_per_type(project_id, startTimestamp=TimeUTC.now(delta_days=-1), endTimestamp=TimeUTC.now(),
@ -1883,10 +1850,9 @@ def get_errors_per_type(project_id, startTimestamp=TimeUTC.now(delta_days=-1), e
step_size = __get_step_size(startTimestamp, endTimestamp, density, factor=1)
pg_sub_query_subset = __get_constraints(project_id=project_id, data=args)
pg_sub_query_subset.append("resources.timestamp>=%(startTimestamp)s")
pg_sub_query_subset.append("resources.timestamp<%(endTimestamp)s")
pg_sub_query_subset.append("resources.type != 'fetch'")
pg_sub_query_subset.append("resources.status > 200")
pg_sub_query_subset.append("requests.timestamp>=%(startTimestamp)s")
pg_sub_query_subset.append("requests.timestamp<%(endTimestamp)s")
pg_sub_query_subset.append("requests.status_code > 200")
pg_sub_query_subset_e = __get_constraints(project_id=project_id, data=args, duration=False, main_table="m_errors",
time_constraint=False)
@ -1897,8 +1863,8 @@ def get_errors_per_type(project_id, startTimestamp=TimeUTC.now(delta_days=-1), e
pg_sub_query_subset_e.append("timestamp<%(endTimestamp)s")
with pg_client.PostgresClient() as cur:
pg_query = f"""WITH resources AS (SELECT status, timestamp
FROM events.resources
pg_query = f"""WITH requests AS (SELECT status_code AS status, timestamp
FROM events_common.requests
INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query_subset)}
),
@ -1927,7 +1893,7 @@ def get_errors_per_type(project_id, startTimestamp=TimeUTC.now(delta_days=-1), e
), 0) AS integrations
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
LEFT JOIN LATERAL (SELECT status
FROM resources
FROM requests
WHERE {" AND ".join(pg_sub_query_chart)}
) AS errors_partition ON (TRUE)
GROUP BY timestamp
@ -2169,44 +2135,44 @@ def get_resources_by_party(project_id, startTimestamp=TimeUTC.now(delta_days=-1)
pg_sub_query_subset = __get_constraints(project_id=project_id, time_constraint=True,
chart=False, data=args)
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=False, project=False,
chart=True, data=args, main_table="resources", time_column="timestamp",
chart=True, data=args, main_table="requests", time_column="timestamp",
duration=False)
pg_sub_query_subset.append("resources.timestamp >= %(startTimestamp)s")
pg_sub_query_subset.append("resources.timestamp < %(endTimestamp)s")
pg_sub_query_subset.append("resources.success = FALSE")
pg_sub_query_subset.append("requests.timestamp >= %(startTimestamp)s")
pg_sub_query_subset.append("requests.timestamp < %(endTimestamp)s")
# pg_sub_query_subset.append("resources.type IN ('fetch', 'script')")
pg_sub_query_subset.append("requests.success = FALSE")
with pg_client.PostgresClient() as cur:
pg_query = f"""WITH resources AS (
SELECT resources.url_host, timestamp
FROM events.resources
pg_query = f"""WITH requests AS (
SELECT requests.host, timestamp
FROM events_common.requests
INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query_subset)}
)
SELECT generated_timestamp AS timestamp,
SUM(CASE WHEN first.url_host = sub_resources.url_host THEN 1 ELSE 0 END) AS first_party,
SUM(CASE WHEN first.url_host != sub_resources.url_host THEN 1 ELSE 0 END) AS third_party
SUM(CASE WHEN first.host = sub_requests.host THEN 1 ELSE 0 END) AS first_party,
SUM(CASE WHEN first.host != sub_requests.host THEN 1 ELSE 0 END) AS third_party
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
LEFT JOIN (
SELECT resources.url_host,
COUNT(resources.session_id) AS count
FROM events.resources
SELECT requests.host,
COUNT(requests.session_id) AS count
FROM events_common.requests
INNER JOIN public.sessions USING (session_id)
WHERE sessions.project_id = '1'
AND resources.type IN ('fetch', 'script')
AND sessions.start_ts > (EXTRACT(EPOCH FROM now() - INTERVAL '31 days') * 1000)::BIGINT
AND sessions.start_ts < (EXTRACT(EPOCH FROM now()) * 1000)::BIGINT
AND resources.timestamp > (EXTRACT(EPOCH FROM now() - INTERVAL '31 days') * 1000)::BIGINT
AND resources.timestamp < (EXTRACT(EPOCH FROM now()) * 1000)::BIGINT
AND requests.timestamp > (EXTRACT(EPOCH FROM now() - INTERVAL '31 days') * 1000)::BIGINT
AND requests.timestamp < (EXTRACT(EPOCH FROM now()) * 1000)::BIGINT
AND sessions.duration>0
GROUP BY resources.url_host
GROUP BY requests.host
ORDER BY count DESC
LIMIT 1
) AS first ON (TRUE)
LEFT JOIN LATERAL (
SELECT resources.url_host
FROM resources
SELECT requests.host
FROM requests
WHERE {" AND ".join(pg_sub_query_chart)}
) AS sub_resources ON (TRUE)
) AS sub_requests ON (TRUE)
GROUP BY generated_timestamp
ORDER BY generated_timestamp;"""
cur.execute(cur.mogrify(pg_query, {"step_size": step_size,

View file

@ -43,25 +43,53 @@ def __create(tenant_id, name):
def get_projects(tenant_id, recording_state=False, gdpr=None, recorded=False, stack_integrations=False):
with pg_client.PostgresClient() as cur:
recorded_q = ""
extra_projection = ""
extra_join = ""
if gdpr:
extra_projection += ',s.gdpr'
if recorded:
recorded_q = """, COALESCE((SELECT TRUE
FROM public.sessions
WHERE sessions.project_id = s.project_id
AND sessions.start_ts >= (EXTRACT(EPOCH FROM s.created_at) * 1000 - 24 * 60 * 60 * 1000)
AND sessions.start_ts <= %(now)s
LIMIT 1), FALSE) AS recorded"""
query = cur.mogrify(f"""SELECT
s.project_id, s.name, s.project_key, s.save_request_payloads
{',s.gdpr' if gdpr else ''}
{recorded_q}
{',stack_integrations.count>0 AS stack_integrations' if stack_integrations else ''}
extra_projection += """, COALESCE(nullif(EXTRACT(EPOCH FROM s.first_recorded_session_at) * 1000, NULL)::BIGINT,
(SELECT MIN(sessions.start_ts)
FROM public.sessions
WHERE sessions.project_id = s.project_id
AND sessions.start_ts >= (EXTRACT(EPOCH FROM
COALESCE(s.sessions_last_check_at, s.created_at)) * 1000-24*60*60*1000)
AND sessions.start_ts <= %(now)s
LIMIT 1), NULL) AS first_recorded"""
if stack_integrations:
extra_projection += ',stack_integrations.count>0 AS stack_integrations'
if stack_integrations:
extra_join = """LEFT JOIN LATERAL (SELECT COUNT(*) AS count
FROM public.integrations
WHERE s.project_id = integrations.project_id
LIMIT 1) AS stack_integrations ON TRUE"""
query = cur.mogrify(f"""{"SELECT *, first_recorded IS NOT NULL AS recorded FROM (" if recorded else ""}
SELECT s.project_id, s.name, s.project_key, s.save_request_payloads, s.first_recorded_session_at
{extra_projection}
FROM public.projects AS s
{'LEFT JOIN LATERAL (SELECT COUNT(*) AS count FROM public.integrations WHERE s.project_id = integrations.project_id LIMIT 1) AS stack_integrations ON TRUE' if stack_integrations else ''}
{extra_join}
WHERE s.deleted_at IS NULL
ORDER BY s.project_id;""", {"now": TimeUTC.now()})
ORDER BY s.project_id {") AS raw" if recorded else ""};""", {"now": TimeUTC.now()})
cur.execute(query)
rows = cur.fetchall()
# if recorded is requested, check if it was saved or computed
if recorded:
for r in rows:
if r["first_recorded_session_at"] is None:
extra_update = ""
if r["recorded"]:
extra_update = ", first_recorded_session_at=to_timestamp(%(first_recorded)s/1000)"
query = cur.mogrify(f"""UPDATE public.projects
SET sessions_last_check_at=(now() at time zone 'utc')
{extra_update}
WHERE project_id=%(project_id)s""",
{"project_id": r["project_id"], "first_recorded": r["first_recorded"]})
cur.execute(query)
r.pop("first_recorded_session_at")
r.pop("first_recorded")
if recording_state:
project_ids = [f'({r["project_id"]})' for r in rows]
query = cur.mogrify(f"""SELECT projects.project_id, COALESCE(MAX(start_ts), 0) AS last

View file

@ -2,7 +2,7 @@ from typing import List
import schemas
from chalicelib.core import events, metadata, events_ios, \
sessions_mobs, issues, projects, errors, resources, assist, performance_event
sessions_mobs, issues, projects, errors, resources, assist, performance_event, sessions_viewed, sessions_favorite
from chalicelib.utils import pg_client, helper, metrics_helper
SESSION_PROJECTION_COLS = """s.project_id,
@ -172,8 +172,12 @@ def _isUndefined_operator(op: schemas.SearchEventOperator):
return op in [schemas.SearchEventOperator._is_undefined]
def search2_pg(data: schemas.SessionsSearchPayloadSchema, project_id, user_id, errors_only=False,
error_status=schemas.ErrorStatus.all, count_only=False, issue=None):
# This function executes the query and return result
def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_id, errors_only=False,
error_status=schemas.ErrorStatus.all, count_only=False, issue=None):
if data.bookmarked:
data.startDate, data.endDate = sessions_favorite.get_start_end_timestamp(project_id, user_id)
full_args, query_part = search_query_parts(data=data, error_status=error_status, errors_only=errors_only,
favorite_only=data.bookmarked, issue=issue, project_id=project_id,
user_id=user_id)
@ -187,16 +191,12 @@ def search2_pg(data: schemas.SessionsSearchPayloadSchema, project_id, user_id, e
meta_keys = []
with pg_client.PostgresClient() as cur:
if errors_only:
main_query = cur.mogrify(f"""SELECT DISTINCT er.error_id, ser.status, ser.parent_error_id, ser.payload,
COALESCE((SELECT TRUE
FROM public.user_favorite_sessions AS fs
WHERE s.session_id = fs.session_id
AND fs.user_id = %(userId)s), FALSE) AS favorite,
COALESCE((SELECT TRUE
main_query = cur.mogrify(f"""SELECT DISTINCT er.error_id,
COALESCE((SELECT TRUE
FROM public.user_viewed_errors AS ve
WHERE er.error_id = ve.error_id
AND ve.user_id = %(userId)s LIMIT 1), FALSE) AS viewed
{query_part};""", full_args)
{query_part};""", full_args)
elif count_only:
main_query = cur.mogrify(f"""SELECT COUNT(DISTINCT s.session_id) AS count_sessions,
@ -401,6 +401,7 @@ def __is_valid_event(is_any: bool, event: schemas._SessionSearchEventSchema):
event.filters is None or len(event.filters) == 0))
# this function generates the query and return the generated-query with the dict of query arguments
def search_query_parts(data, error_status, errors_only, favorite_only, issue, project_id, user_id, extra_event=None):
ss_constraints = []
full_args = {"project_id": project_id, "startDate": data.startDate, "endDate": data.endDate,
@ -522,12 +523,12 @@ def search_query_parts(data, error_status, errors_only, favorite_only, issue, pr
ss_constraints.append("ms.duration <= %(maxDuration)s")
full_args["maxDuration"] = f.value[1]
elif filter_type == schemas.FilterType.referrer:
extra_from += f"INNER JOIN {events.event_type.LOCATION.table} AS p USING(session_id)"
# extra_from += f"INNER JOIN {events.event_type.LOCATION.table} AS p USING(session_id)"
if is_any:
extra_constraints.append('p.base_referrer IS NOT NULL')
extra_constraints.append('s.base_referrer IS NOT NULL')
else:
extra_constraints.append(
_multiple_conditions(f"p.base_referrer {op} %({f_k})s", f.value, is_not=is_not, value_key=f_k))
_multiple_conditions(f"s.base_referrer {op} %({f_k})s", f.value, is_not=is_not, value_key=f_k))
elif filter_type == events.event_type.METADATA.ui_type:
# get metadata list only if you need it
if meta_keys is None:
@ -717,7 +718,7 @@ def search_query_parts(data, error_status, errors_only, favorite_only, issue, pr
event_where.append(
_multiple_conditions(f"(main1.message {op} %({e_k})s OR main1.name {op} %({e_k})s)",
event.value, value_key=e_k))
if event.source[0] not in [None, "*", ""]:
if len(event.source) > 0 and event.source[0] not in [None, "*", ""]:
event_where.append(_multiple_conditions(f"main1.source = %({s_k})s", event.source, value_key=s_k))
@ -989,13 +990,13 @@ def search_query_parts(data, error_status, errors_only, favorite_only, issue, pr
extra_from += f" INNER JOIN {events.event_type.ERROR.table} AS er USING (session_id) INNER JOIN public.errors AS ser USING (error_id)"
extra_constraints.append("ser.source = 'js_exception'")
extra_constraints.append("ser.project_id = %(project_id)s")
if error_status != schemas.ErrorStatus.all:
extra_constraints.append("ser.status = %(error_status)s")
full_args["error_status"] = error_status
if favorite_only:
extra_from += " INNER JOIN public.user_favorite_errors AS ufe USING (error_id)"
extra_constraints.append("ufe.user_id = %(userId)s")
# extra_constraints = [extra.decode('UTF-8') + "\n" for extra in extra_constraints]
# if error_status != schemas.ErrorStatus.all:
# extra_constraints.append("ser.status = %(error_status)s")
# full_args["error_status"] = error_status
# if favorite_only:
# extra_from += " INNER JOIN public.user_favorite_errors AS ufe USING (error_id)"
# extra_constraints.append("ufe.user_id = %(userId)s")
if favorite_only and not errors_only and user_id is not None:
extra_from += """INNER JOIN (SELECT user_id, session_id
FROM public.user_favorite_sessions

View file

@ -6,10 +6,8 @@ def add_favorite_session(project_id, user_id, session_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(f"""\
INSERT INTO public.user_favorite_sessions
(user_id, session_id)
VALUES
(%(userId)s,%(sessionId)s);""",
INSERT INTO public.user_favorite_sessions(user_id, session_id)
VALUES (%(userId)s,%(sessionId)s);""",
{"userId": user_id, "sessionId": session_id})
)
return sessions.get_by_id2_pg(project_id=project_id, session_id=session_id, user_id=user_id, full_data=False,
@ -21,8 +19,7 @@ def remove_favorite_session(project_id, user_id, session_id):
cur.execute(
cur.mogrify(f"""\
DELETE FROM public.user_favorite_sessions
WHERE
user_id = %(userId)s
WHERE user_id = %(userId)s
AND session_id = %(sessionId)s;""",
{"userId": user_id, "sessionId": session_id})
)
@ -30,19 +27,6 @@ def remove_favorite_session(project_id, user_id, session_id):
include_fav_viewed=True)
def add_viewed_session(project_id, user_id, session_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify("""\
INSERT INTO public.user_viewed_sessions
(user_id, session_id)
VALUES
(%(userId)s,%(sessionId)s)
ON CONFLICT DO NOTHING;""",
{"userId": user_id, "sessionId": session_id})
)
def favorite_session(project_id, user_id, session_id):
if favorite_session_exists(user_id=user_id, session_id=session_id):
return remove_favorite_session(project_id=project_id, user_id=user_id, session_id=session_id)
@ -50,16 +34,11 @@ def favorite_session(project_id, user_id, session_id):
return add_favorite_session(project_id=project_id, user_id=user_id, session_id=session_id)
def view_session(project_id, user_id, session_id):
return add_viewed_session(project_id=project_id, user_id=user_id, session_id=session_id)
def favorite_session_exists(user_id, session_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(
"""SELECT
session_id
"""SELECT session_id
FROM public.user_favorite_sessions
WHERE
user_id = %(userId)s
@ -68,3 +47,18 @@ def favorite_session_exists(user_id, session_id):
)
r = cur.fetchone()
return r is not None
def get_start_end_timestamp(project_id, user_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(
"""SELECT max(start_ts) AS max_start_ts, min(start_ts) AS min_start_ts
FROM public.user_favorite_sessions INNER JOIN sessions USING(session_id)
WHERE
user_favorite_sessions.user_id = %(userId)s
AND project_id = %(project_id)s;""",
{"userId": user_id, "project_id": project_id})
)
r = cur.fetchone()
return (0, 0) if r is None else (r["max_start_ts"], r["min_start_ts"])

View file

@ -1,206 +1,66 @@
import schemas
from chalicelib.utils import pg_client, helper
from chalicelib.core import autocomplete
from chalicelib.utils.event_filter_definition import SupportedFilter
def get_key_values(project_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(
f"""\
SELECT ARRAY_AGG(DISTINCT s.user_os
ORDER BY s.user_os) FILTER ( WHERE s.user_os IS NOT NULL AND s.platform='web') AS {schemas.FilterType.user_os},
ARRAY_AGG(DISTINCT s.user_browser
ORDER BY s.user_browser)
FILTER ( WHERE s.user_browser IS NOT NULL AND s.platform='web') AS {schemas.FilterType.user_browser},
ARRAY_AGG(DISTINCT s.user_device
ORDER BY s.user_device)
FILTER ( WHERE s.user_device IS NOT NULL AND s.user_device != '' AND s.platform='web') AS {schemas.FilterType.user_device},
ARRAY_AGG(DISTINCT s.user_country
ORDER BY s.user_country)
FILTER ( WHERE s.user_country IS NOT NULL AND s.platform='web')::text[] AS {schemas.FilterType.user_country},
ARRAY_AGG(DISTINCT s.user_id
ORDER BY s.user_id) FILTER ( WHERE s.user_id IS NOT NULL AND s.user_id != 'none' AND s.user_id != '' AND s.platform='web') AS {schemas.FilterType.user_id},
ARRAY_AGG(DISTINCT s.user_anonymous_id
ORDER BY s.user_anonymous_id) FILTER ( WHERE s.user_anonymous_id IS NOT NULL AND s.user_anonymous_id != 'none' AND s.user_anonymous_id != '' AND s.platform='web') AS {schemas.FilterType.user_anonymous_id},
ARRAY_AGG(DISTINCT s.rev_id
ORDER BY s.rev_id) FILTER ( WHERE s.rev_id IS NOT NULL AND s.platform='web') AS {schemas.FilterType.rev_id},
ARRAY_AGG(DISTINCT p.referrer
ORDER BY p.referrer)
FILTER ( WHERE p.referrer != '' ) AS {schemas.FilterType.referrer},
ARRAY_AGG(DISTINCT s.utm_source
ORDER BY s.utm_source) FILTER ( WHERE s.utm_source IS NOT NULL AND s.utm_source != 'none' AND s.utm_source != '') AS {schemas.FilterType.utm_source},
ARRAY_AGG(DISTINCT s.utm_medium
ORDER BY s.utm_medium) FILTER ( WHERE s.utm_medium IS NOT NULL AND s.utm_medium != 'none' AND s.utm_medium != '') AS {schemas.FilterType.utm_medium},
ARRAY_AGG(DISTINCT s.utm_campaign
ORDER BY s.utm_campaign) FILTER ( WHERE s.utm_campaign IS NOT NULL AND s.utm_campaign != 'none' AND s.utm_campaign != '') AS {schemas.FilterType.utm_campaign},
ARRAY_AGG(DISTINCT s.user_os
ORDER BY s.user_os) FILTER ( WHERE s.user_os IS NOT NULL AND s.platform='ios' ) AS {schemas.FilterType.user_os_ios},
ARRAY_AGG(DISTINCT s.user_device
ORDER BY s.user_device)
FILTER ( WHERE s.user_device IS NOT NULL AND s.user_device != '' AND s.platform='ios') AS {schemas.FilterType.user_device_ios},
ARRAY_AGG(DISTINCT s.user_country
ORDER BY s.user_country)
FILTER ( WHERE s.user_country IS NOT NULL AND s.platform='ios')::text[] AS {schemas.FilterType.user_country_ios},
ARRAY_AGG(DISTINCT s.user_id
ORDER BY s.user_id) FILTER ( WHERE s.user_id IS NOT NULL AND s.user_id != 'none' AND s.user_id != '' AND s.platform='ios') AS {schemas.FilterType.user_id_ios},
ARRAY_AGG(DISTINCT s.user_anonymous_id
ORDER BY s.user_anonymous_id) FILTER ( WHERE s.user_anonymous_id IS NOT NULL AND s.user_anonymous_id != 'none' AND s.user_anonymous_id != '' AND s.platform='ios') AS {schemas.FilterType.user_anonymous_id_ios},
ARRAY_AGG(DISTINCT s.rev_id
ORDER BY s.rev_id) FILTER ( WHERE s.rev_id IS NOT NULL AND s.platform='ios') AS {schemas.FilterType.rev_id_ios}
FROM public.sessions AS s
LEFT JOIN events.pages AS p USING (session_id)
WHERE s.project_id = %(site_id)s;""",
{"site_id": project_id}
)
)
row = cur.fetchone()
for k in row.keys():
if row[k] is None:
row[k] = []
elif len(row[k]) > 500:
row[k] = row[k][:500]
return helper.dict_to_CAPITAL_keys(row)
def get_top_key_values(project_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(
f"""\
SELECT {",".join([f"ARRAY((SELECT value FROM public.autocomplete WHERE project_id = %(site_id)s AND type='{k}' GROUP BY value ORDER BY COUNT(*) DESC LIMIT %(limit)s)) AS {k}" for k in SUPPORTED_TYPES.keys()])};""",
{"site_id": project_id, "limit": 5}
)
)
row = cur.fetchone()
return helper.dict_to_CAPITAL_keys(row)
def __generic_query(typename, value_length=None):
if value_length is None or value_length > 2:
return f""" (SELECT DISTINCT value, type
FROM public.autocomplete
WHERE
project_id = %(project_id)s
AND type ='{typename}'
AND value ILIKE %(svalue)s
ORDER BY value
LIMIT 5)
UNION
(SELECT DISTINCT value, type
FROM public.autocomplete
WHERE
project_id = %(project_id)s
AND type ='{typename}'
AND value ILIKE %(value)s
ORDER BY value
LIMIT 5);"""
return f""" SELECT DISTINCT value, type
FROM public.autocomplete
WHERE
project_id = %(project_id)s
AND type ='{typename}'
AND value ILIKE %(svalue)s
ORDER BY value
LIMIT 10;"""
def __generic_autocomplete(typename):
def f(project_id, text):
with pg_client.PostgresClient() as cur:
query = cur.mogrify(__generic_query(typename,
value_length=len(text) \
if SUPPORTED_TYPES[typename].change_by_length else None),
{"project_id": project_id, "value": helper.string_to_sql_like(text),
"svalue": helper.string_to_sql_like("^" + text)})
cur.execute(query)
rows = cur.fetchall()
return rows
return f
SUPPORTED_TYPES = {
schemas.FilterType.user_os: SupportedFilter(
get=__generic_autocomplete(typename=schemas.FilterType.user_os),
query=__generic_query(typename=schemas.FilterType.user_os),
change_by_length=True),
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.user_os),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.user_os)),
schemas.FilterType.user_browser: SupportedFilter(
get=__generic_autocomplete(typename=schemas.FilterType.user_browser),
query=__generic_query(typename=schemas.FilterType.user_browser),
change_by_length=True),
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.user_browser),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.user_browser)),
schemas.FilterType.user_device: SupportedFilter(
get=__generic_autocomplete(typename=schemas.FilterType.user_device),
query=__generic_query(typename=schemas.FilterType.user_device),
change_by_length=True),
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.user_device),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.user_device)),
schemas.FilterType.user_country: SupportedFilter(
get=__generic_autocomplete(typename=schemas.FilterType.user_country),
query=__generic_query(typename=schemas.FilterType.user_country),
change_by_length=True),
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.user_country),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.user_country)),
schemas.FilterType.user_id: SupportedFilter(
get=__generic_autocomplete(typename=schemas.FilterType.user_id),
query=__generic_query(typename=schemas.FilterType.user_id),
change_by_length=True),
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.user_id),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.user_id)),
schemas.FilterType.user_anonymous_id: SupportedFilter(
get=__generic_autocomplete(typename=schemas.FilterType.user_anonymous_id),
query=__generic_query(typename=schemas.FilterType.user_anonymous_id),
change_by_length=True),
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.user_anonymous_id),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.user_anonymous_id)),
schemas.FilterType.rev_id: SupportedFilter(
get=__generic_autocomplete(typename=schemas.FilterType.rev_id),
query=__generic_query(typename=schemas.FilterType.rev_id),
change_by_length=True),
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.rev_id),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.rev_id)),
schemas.FilterType.referrer: SupportedFilter(
get=__generic_autocomplete(typename=schemas.FilterType.referrer),
query=__generic_query(typename=schemas.FilterType.referrer),
change_by_length=True),
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.referrer),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.referrer)),
schemas.FilterType.utm_campaign: SupportedFilter(
get=__generic_autocomplete(typename=schemas.FilterType.utm_campaign),
query=__generic_query(typename=schemas.FilterType.utm_campaign),
change_by_length=True),
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.utm_campaign),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.utm_campaign)),
schemas.FilterType.utm_medium: SupportedFilter(
get=__generic_autocomplete(typename=schemas.FilterType.utm_medium),
query=__generic_query(typename=schemas.FilterType.utm_medium),
change_by_length=True),
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.utm_medium),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.utm_medium)),
schemas.FilterType.utm_source: SupportedFilter(
get=__generic_autocomplete(typename=schemas.FilterType.utm_source),
query=__generic_query(typename=schemas.FilterType.utm_source),
change_by_length=True),
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.utm_source),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.utm_source)),
# IOS
schemas.FilterType.user_os_ios: SupportedFilter(
get=__generic_autocomplete(typename=schemas.FilterType.user_os_ios),
query=__generic_query(typename=schemas.FilterType.user_os_ios),
change_by_length=True),
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.user_os_ios),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.user_os_ios)),
schemas.FilterType.user_device_ios: SupportedFilter(
get=__generic_autocomplete(
get=autocomplete.__generic_autocomplete_metas(
typename=schemas.FilterType.user_device_ios),
query=__generic_query(typename=schemas.FilterType.user_device_ios),
change_by_length=True),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.user_device_ios)),
schemas.FilterType.user_country_ios: SupportedFilter(
get=__generic_autocomplete(typename=schemas.FilterType.user_country_ios),
query=__generic_query(typename=schemas.FilterType.user_country_ios),
change_by_length=True),
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.user_country_ios),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.user_country_ios)),
schemas.FilterType.user_id_ios: SupportedFilter(
get=__generic_autocomplete(typename=schemas.FilterType.user_id_ios),
query=__generic_query(typename=schemas.FilterType.user_id_ios),
change_by_length=True),
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.user_id_ios),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.user_id_ios)),
schemas.FilterType.user_anonymous_id_ios: SupportedFilter(
get=__generic_autocomplete(typename=schemas.FilterType.user_anonymous_id_ios),
query=__generic_query(typename=schemas.FilterType.user_anonymous_id_ios),
change_by_length=True),
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.user_anonymous_id_ios),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.user_anonymous_id_ios)),
schemas.FilterType.rev_id_ios: SupportedFilter(
get=__generic_autocomplete(typename=schemas.FilterType.rev_id_ios),
query=__generic_query(typename=schemas.FilterType.rev_id_ios),
change_by_length=True),
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.rev_id_ios),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.rev_id_ios)),
}
def search(text, meta_type, project_id):
def search(text: str, meta_type: schemas.FilterType, project_id: int):
rows = []
if meta_type not in list(SUPPORTED_TYPES.keys()):
return {"errors": ["unsupported type"]}

View file

@ -0,0 +1,11 @@
from chalicelib.utils import pg_client
def view_session(project_id, user_id, session_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify("""INSERT INTO public.user_viewed_sessions(user_id, session_id)
VALUES (%(userId)s,%(sessionId)s)
ON CONFLICT DO NOTHING;""",
{"userId": user_id, "sessionId": session_id})
)

View file

@ -559,8 +559,8 @@ def get_top_insights(filter_d, project_id):
"dropDueToIssues": 0
}]
counts = sessions.search2_pg(data=schemas.SessionsSearchCountSchema.parse_obj(filter_d), project_id=project_id,
user_id=None, count_only=True)
counts = sessions.search_sessions(data=schemas.SessionsSearchCountSchema.parse_obj(filter_d), project_id=project_id,
user_id=None, count_only=True)
output[0]["sessionsCount"] = counts["countSessions"]
output[0]["usersCount"] = counts["countUsers"]
return output, 0

View file

@ -45,7 +45,7 @@ def create_step1(data: schemas.UserSignupSchema):
print("Verifying company's name validity")
company_name = data.organizationName
if company_name is None or len(company_name) < 1 or not helper.is_alphanumeric_space(company_name):
if company_name is None or len(company_name) < 1:
errors.append("invalid organization's name")
print("Verifying project's name validity")

View file

@ -168,7 +168,7 @@ def update(tenant_id, user_id, changes):
{"user_id": user_id, **changes})
)
return helper.dict_to_camel_case(cur.fetchone())
return get(user_id=user_id, tenant_id=tenant_id)
def create_member(tenant_id, user_id, data, background_tasks: BackgroundTasks):
@ -181,7 +181,7 @@ def create_member(tenant_id, user_id, data, background_tasks: BackgroundTasks):
if user:
return {"errors": ["user already exists"]}
name = data.get("name", None)
if name is not None and not helper.is_alphabet_latin_space(name):
if name is not None and len(name) == 0:
return {"errors": ["invalid user name"]}
if name is None:
name = data["email"]

View file

@ -6,7 +6,6 @@ class Event:
class SupportedFilter:
def __init__(self, get, query, change_by_length):
def __init__(self, get, query):
self.get = get
self.query = query
self.change_by_length = change_by_length

View file

@ -18,7 +18,7 @@ class JiraManager:
self._config = {"JIRA_PROJECT_ID": project_id, "JIRA_URL": url, "JIRA_USERNAME": username,
"JIRA_PASSWORD": password}
try:
self._jira = JIRA(url, basic_auth=(username, password), logging=True, max_retries=1)
self._jira = JIRA(url, basic_auth=(username, password), logging=True, max_retries=0, timeout=3)
except Exception as e:
print("!!! JIRA AUTH ERROR")
print(e)

View file

@ -47,4 +47,5 @@ sessions_region=us-east-1
sourcemaps_bucket=sourcemaps
sourcemaps_reader=http://127.0.0.1:9000/sourcemaps
stage=default-foss
version_number=1.4.0
version_number=1.4.0
FS_DIR=/mnt/efs

View file

@ -1,15 +1,15 @@
requests==2.28.1
urllib3==1.26.10
boto3==1.24.26
boto3==1.24.53
pyjwt==2.4.0
psycopg2-binary==2.9.3
elasticsearch==8.3.1
jira==3.3.0
elasticsearch==8.3.3
jira==3.3.1
fastapi==0.78.0
fastapi==0.80.0
uvicorn[standard]==0.18.2
python-decouple==3.6
pydantic[email]==1.9.1
pydantic[email]==1.9.2
apscheduler==3.9.1

View file

@ -1,15 +1,15 @@
requests==2.28.1
urllib3==1.26.10
boto3==1.24.26
boto3==1.24.53
pyjwt==2.4.0
psycopg2-binary==2.9.3
elasticsearch==8.3.1
jira==3.3.0
elasticsearch==8.3.3
jira==3.3.1
fastapi==0.78.0
fastapi==0.80.0
uvicorn[standard]==0.18.2
python-decouple==3.6
pydantic[email]==1.9.1
pydantic[email]==1.9.2
apscheduler==3.9.1

View file

@ -1,18 +1,19 @@
from typing import Union, Optional
from typing import Union
from decouple import config
from fastapi import Depends, Body, BackgroundTasks, HTTPException
from fastapi.responses import FileResponse
from starlette import status
import schemas
from chalicelib.core import log_tool_rollbar, sourcemaps, events, sessions_assignments, projects, \
sessions_metas, alerts, funnels, issues, integrations_manager, metadata, \
alerts, funnels, issues, integrations_manager, metadata, \
log_tool_elasticsearch, log_tool_datadog, \
log_tool_stackdriver, reset_password, sessions_favorite_viewed, \
log_tool_stackdriver, reset_password, sessions_favorite, \
log_tool_cloudwatch, log_tool_sentry, log_tool_sumologic, log_tools, errors, sessions, \
log_tool_newrelic, announcements, log_tool_bugsnag, weekly_report, integration_jira_cloud, integration_github, \
assist, heatmaps, mobile, signup, tenants, errors_favorite_viewed, boarding, notifications, webhook, users, \
custom_metrics, saved_search
assist, heatmaps, mobile, signup, tenants, errors_viewed, boarding, notifications, webhook, users, \
custom_metrics, saved_search, integrations_global, sessions_viewed, errors_favorite
from chalicelib.core.collaboration_slack import Slack
from chalicelib.utils import email_helper, helper, captcha
from chalicelib.utils.TimeUTC import TimeUTC
@ -50,6 +51,14 @@ def login(data: schemas.UserLoginSchema = Body(...)):
}
@app.post('/{projectId}/sessions/search', tags=["sessions"])
@app.post('/{projectId}/sessions/search2', tags=["sessions"])
def sessions_search(projectId: int, data: schemas.FlatSessionsSearchPayloadSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
data = sessions.search_sessions(data=data, project_id=projectId, user_id=context.user_id)
return {'data': data}
@app.get('/{projectId}/sessions/{sessionId}', tags=["sessions"])
@app.get('/{projectId}/sessions2/{sessionId}', tags=["sessions"])
def get_session2(projectId: int, sessionId: Union[int, str], background_tasks: BackgroundTasks,
@ -61,7 +70,7 @@ def get_session2(projectId: int, sessionId: Union[int, str], background_tasks: B
if data is None:
return {"errors": ["session not found"]}
if data.get("inDB"):
background_tasks.add_task(sessions_favorite_viewed.view_session, project_id=projectId, user_id=context.user_id,
background_tasks.add_task(sessions_viewed.view_session, project_id=projectId, user_id=context.user_id,
session_id=sessionId)
return {
'data': data
@ -73,8 +82,8 @@ def get_session2(projectId: int, sessionId: Union[int, str], background_tasks: B
def add_remove_favorite_session2(projectId: int, sessionId: int,
context: schemas.CurrentContext = Depends(OR_context)):
return {
"data": sessions_favorite_viewed.favorite_session(project_id=projectId, user_id=context.user_id,
session_id=sessionId)}
"data": sessions_favorite.favorite_session(project_id=projectId, user_id=context.user_id,
session_id=sessionId)}
@app.get('/{projectId}/sessions/{sessionId}/assign', tags=["sessions"])
@ -163,21 +172,12 @@ def events_search(projectId: int, q: str,
return result
@app.post('/{projectId}/sessions/search2', tags=["sessions"])
def sessions_search2(projectId: int, data: schemas.FlatSessionsSearchPayloadSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
data = sessions.search2_pg(data=data, project_id=projectId, user_id=context.user_id)
return {'data': data}
@app.get('/{projectId}/sessions/filters', tags=["sessions"])
def session_filter_values(projectId: int, context: schemas.CurrentContext = Depends(OR_context)):
return {'data': sessions_metas.get_key_values(projectId)}
@app.get('/{projectId}/sessions/filters/top', tags=["sessions"])
def session_top_filter_values(projectId: int, context: schemas.CurrentContext = Depends(OR_context)):
return {'data': sessions_metas.get_top_key_values(projectId)}
@app.get('/{projectId}/integrations', tags=["integrations"])
def get_integrations_status(projectId: int, context: schemas.CurrentContext = Depends(OR_context)):
data = integrations_global.get_global_integrations_status(tenant_id=context.tenant_id,
user_id=context.user_id,
project_id=projectId)
return {"data": data}
@app.post('/{projectId}/integrations/{integration}/notify/{integrationId}/{source}/{sourceId}', tags=["integrations"])
@ -432,29 +432,49 @@ def get_integration_status(context: schemas.CurrentContext = Depends(OR_context)
return {"data": integration.get_obfuscated()}
@app.get('/integrations/jira', tags=["integrations"])
def get_integration_status_jira(context: schemas.CurrentContext = Depends(OR_context)):
error, integration = integrations_manager.get_integration(tenant_id=context.tenant_id,
user_id=context.user_id,
tool=integration_jira_cloud.PROVIDER)
if error is not None and integration is None:
return error
return {"data": integration.get_obfuscated()}
@app.get('/integrations/github', tags=["integrations"])
def get_integration_status_github(context: schemas.CurrentContext = Depends(OR_context)):
error, integration = integrations_manager.get_integration(tenant_id=context.tenant_id,
user_id=context.user_id,
tool=integration_github.PROVIDER)
if error is not None and integration is None:
return error
return {"data": integration.get_obfuscated()}
@app.post('/integrations/jira', tags=["integrations"])
@app.put('/integrations/jira', tags=["integrations"])
def add_edit_jira_cloud(data: schemas.JiraGithubSchema = Body(...),
def add_edit_jira_cloud(data: schemas.JiraSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
if not data.url.endswith('atlassian.net'):
return {"errors": ["url must be a valid JIRA URL (example.atlassian.net)"]}
error, integration = integrations_manager.get_integration(tool=integration_jira_cloud.PROVIDER,
tenant_id=context.tenant_id,
user_id=context.user_id)
if error is not None and integration is None:
return error
data.provider = integration_jira_cloud.PROVIDER
return {"data": integration.add_edit(data=data.dict())}
@app.post('/integrations/github', tags=["integrations"])
@app.put('/integrations/github', tags=["integrations"])
def add_edit_github(data: schemas.JiraGithubSchema = Body(...),
def add_edit_github(data: schemas.GithubSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
error, integration = integrations_manager.get_integration(tool=integration_github.PROVIDER,
tenant_id=context.tenant_id,
user_id=context.user_id)
if error is not None:
return error
data.provider = integration_github.PROVIDER
return {"data": integration.add_edit(data=data.dict())}
@ -471,7 +491,8 @@ def delete_default_issue_tracking_tool(context: schemas.CurrentContext = Depends
def delete_jira_cloud(context: schemas.CurrentContext = Depends(OR_context)):
error, integration = integrations_manager.get_integration(tool=integration_jira_cloud.PROVIDER,
tenant_id=context.tenant_id,
user_id=context.user_id)
user_id=context.user_id,
for_delete=True)
if error is not None:
return error
return {"data": integration.delete()}
@ -481,7 +502,8 @@ def delete_jira_cloud(context: schemas.CurrentContext = Depends(OR_context)):
def delete_github(context: schemas.CurrentContext = Depends(OR_context)):
error, integration = integrations_manager.get_integration(tool=integration_github.PROVIDER,
tenant_id=context.tenant_id,
user_id=context.user_id)
user_id=context.user_id,
for_delete=True)
if error is not None:
return error
return {"data": integration.delete()}
@ -882,11 +904,22 @@ def get_live_session(projectId: int, sessionId: str, background_tasks: Backgroun
if data is None:
return {"errors": ["session not found"]}
if data.get("inDB"):
background_tasks.add_task(sessions_favorite_viewed.view_session, project_id=projectId,
background_tasks.add_task(sessions_viewed.view_session, project_id=projectId,
user_id=context.user_id, session_id=sessionId)
return {'data': data}
@app.get('/{projectId}/unprocessed/{sessionId}', tags=["assist"])
@app.get('/{projectId}/assist/sessions/{sessionId}/replay', tags=["assist"])
def get_live_session_replay_file(projectId: int, sessionId: str,
context: schemas.CurrentContext = Depends(OR_context)):
path = assist.get_raw_mob_by_id(project_id=projectId, session_id=sessionId)
if path is None:
return {"errors": ["Replay file not found"]}
return FileResponse(path=path, media_type="application/octet-stream")
@app.post('/{projectId}/heatmaps/url', tags=["heatmaps"])
def get_heatmaps_by_url(projectId: int, data: schemas.GetHeatmapPayloadSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
@ -957,7 +990,7 @@ def errors_get_details(projectId: int, errorId: str, background_tasks: Backgroun
data = errors.get_details(project_id=projectId, user_id=context.user_id, error_id=errorId,
**{"density24": density24, "density30": density30})
if data.get("data") is not None:
background_tasks.add_task(errors_favorite_viewed.viewed_error, project_id=projectId, user_id=context.user_id,
background_tasks.add_task(errors_viewed.viewed_error, project_id=projectId, user_id=context.user_id,
error_id=errorId)
return data
@ -986,7 +1019,7 @@ def errors_get_details_sourcemaps(projectId: int, errorId: str,
def add_remove_favorite_error(projectId: int, errorId: str, action: str, startDate: int = TimeUTC.now(-7),
endDate: int = TimeUTC.now(), context: schemas.CurrentContext = Depends(OR_context)):
if action == "favorite":
return errors_favorite_viewed.favorite_error(project_id=projectId, user_id=context.user_id, error_id=errorId)
return errors_favorite.favorite_error(project_id=projectId, user_id=context.user_id, error_id=errorId)
elif action == "sessions":
start_date = startDate
end_date = endDate

View file

@ -7,7 +7,7 @@ from starlette.responses import RedirectResponse
import schemas
from chalicelib.core import integrations_manager
from chalicelib.core import sessions
from chalicelib.core import tenants, users, metadata, projects, license
from chalicelib.core import tenants, users, projects, license
from chalicelib.core import webhook
from chalicelib.core.collaboration_slack import Slack
from chalicelib.utils import helper
@ -95,18 +95,6 @@ def edit_slack_integration(integrationId: int, data: schemas.EditSlackSchema = B
changes={"name": data.name, "endpoint": data.url})}
# this endpoint supports both jira & github based on `provider` attribute
@app.post('/integrations/issues', tags=["integrations"])
def add_edit_jira_cloud_github(data: schemas.JiraGithubSchema,
context: schemas.CurrentContext = Depends(OR_context)):
provider = data.provider.upper()
error, integration = integrations_manager.get_integration(tool=provider, tenant_id=context.tenant_id,
user_id=context.user_id)
if error is not None:
return error
return {"data": integration.add_edit(data=data.dict())}
@app.post('/client/members', tags=["client"])
@app.put('/client/members', tags=["client"])
def add_member(background_tasks: BackgroundTasks, data: schemas.CreateMemberSchema = Body(...),

View file

@ -100,15 +100,17 @@ class NotificationsViewSchema(BaseModel):
endTimestamp: Optional[int] = Field(default=None)
class JiraGithubSchema(BaseModel):
provider: str = Field(...)
username: str = Field(...)
class GithubSchema(BaseModel):
token: str = Field(...)
class JiraSchema(GithubSchema):
username: str = Field(...)
url: HttpUrl = Field(...)
@validator('url')
def transform_url(cls, v: HttpUrl):
return HttpUrl.build(scheme=v.scheme, host=v.host)
return HttpUrl.build(scheme=v.scheme.lower(), host=v.host.lower())
class CreateEditWebhookSchema(BaseModel):
@ -277,7 +279,7 @@ class _AlertMessageSchema(BaseModel):
value: str = Field(...)
class AlertDetectionChangeType(str, Enum):
class AlertDetectionType(str, Enum):
percent = "percent"
change = "change"
@ -288,7 +290,6 @@ class _AlertOptionSchema(BaseModel):
previousPeriod: Literal[15, 30, 60, 120, 240, 1440] = Field(15)
lastNotification: Optional[int] = Field(None)
renotifyInterval: Optional[int] = Field(720)
change: Optional[AlertDetectionChangeType] = Field(None)
class AlertColumn(str, Enum):
@ -337,6 +338,7 @@ class AlertDetectionMethod(str, Enum):
class AlertSchema(BaseModel):
name: str = Field(...)
detection_method: AlertDetectionMethod = Field(...)
change: Optional[AlertDetectionType] = Field(default=AlertDetectionType.change)
description: Optional[str] = Field(None)
options: _AlertOptionSchema = Field(...)
query: _AlertQuerySchema = Field(...)
@ -354,11 +356,6 @@ class AlertSchema(BaseModel):
def alert_validator(cls, values):
if values.get("query") is not None and values["query"].left == AlertColumn.custom:
assert values.get("series_id") is not None, "series_id should not be null for CUSTOM alert"
if values.get("detectionMethod") is not None \
and values["detectionMethod"] == AlertDetectionMethod.change \
and values.get("options") is not None:
assert values["options"].change is not None, \
"options.change should not be null for detection method 'change'"
return values
class Config:
@ -552,13 +549,15 @@ class _SessionSearchEventRaw(__MixedSearchFilter):
assert values.get("sourceOperator") is not None, \
"sourceOperator should not be null for PerformanceEventType"
if values["type"] == PerformanceEventType.time_between_events:
assert values["sourceOperator"] != MathOperator._equal.value, \
f"{MathOperator._equal} is not allowed for duration of {PerformanceEventType.time_between_events}"
assert len(values.get("value", [])) == 2, \
f"must provide 2 Events as value for {PerformanceEventType.time_between_events}"
assert isinstance(values["value"][0], _SessionSearchEventRaw) \
and isinstance(values["value"][1], _SessionSearchEventRaw), \
f"event should be of type _SessionSearchEventRaw for {PerformanceEventType.time_between_events}"
assert len(values["source"]) > 0 and isinstance(values["source"][0], int), \
f"source of type int if required for {PerformanceEventType.time_between_events}"
f"source of type int is required for {PerformanceEventType.time_between_events}"
else:
assert "source" in values, f"source is required for {values.get('type')}"
assert isinstance(values["source"], list), f"source of type list is required for {values.get('type')}"
@ -734,7 +733,7 @@ class ErrorSort(str, Enum):
sessions_count = 'sessions'
class SearchErrorsSchema(SessionsSearchPayloadSchema):
class SearchErrorsSchema(FlatSessionsSearchPayloadSchema):
sort: ErrorSort = Field(default=ErrorSort.occurrence)
density: Optional[int] = Field(7)
status: Optional[ErrorStatus] = Field(default=ErrorStatus.all)
@ -766,7 +765,7 @@ class MobileSignPayloadSchema(BaseModel):
keys: List[str] = Field(...)
class CustomMetricSeriesFilterSchema(FlatSessionsSearchPayloadSchema, SearchErrorsSchema):
class CustomMetricSeriesFilterSchema(SearchErrorsSchema):
startDate: Optional[int] = Field(None)
endDate: Optional[int] = Field(None)
sort: Optional[str] = Field(None)
@ -1026,7 +1025,7 @@ class LiveFilterType(str, Enum):
user_UUID = "USERUUID"
tracker_version = "TRACKERVERSION"
user_browser_version = "USERBROWSERVERSION"
user_device_type = "USERDEVICETYPE",
user_device_type = "USERDEVICETYPE"
class LiveSessionSearchFilterSchema(BaseModel):
@ -1070,3 +1069,18 @@ class LiveSessionsSearchPayloadSchema(_PaginatedSchema):
class Config:
alias_generator = attribute_to_camel_case
class IntegrationType(str, Enum):
github = "GITHUB"
jira = "JIRA"
slack = "SLACK"
sentry = "SENTRY"
bugsnag = "BUGSNAG"
rollbar = "ROLLBAR"
elasticsearch = "ELASTICSEARCH"
datadog = "DATADOG"
sumologic = "SUMOLOGIC"
stackdriver = "STACKDRIVER"
cloudwatch = "CLOUDWATCH"
newrelic = "NEWRELIC"

View file

@ -19,7 +19,6 @@ RUN CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -o service -tags musl openrep
FROM alpine AS entrypoint
RUN apk upgrade busybox --no-cache --repository=http://dl-cdn.alpinelinux.org/alpine/edge/main
RUN apk add --no-cache ca-certificates
RUN adduser -u 1001 openreplay -D
@ -51,14 +50,14 @@ ENV TZ=UTC \
ASSETS_SIZE_LIMIT=6291456 \
ASSETS_HEADERS="{ \"Cookie\": \"ABv=3;\" }" \
FS_CLEAN_HRS=72 \
FILE_SPLIT_SIZE=500000 \
FILE_SPLIT_SIZE=1000000 \
LOG_QUEUE_STATS_INTERVAL_SEC=60 \
DB_BATCH_QUEUE_LIMIT=20 \
DB_BATCH_SIZE_LIMIT=10000000 \
PARTITIONS_NUMBER=16 \
QUEUE_MESSAGE_SIZE_LIMIT=1048576 \
BEACON_SIZE_LIMIT=1000000 \
USE_FAILOVER=false \
USE_FAILOVER=true \
GROUP_STORAGE_FAILOVER=failover \
TOPIC_STORAGE_FAILOVER=storage-failover

View file

@ -29,6 +29,8 @@ function build_service() {
}
function build_api(){
cp -R ../backend ../_backend
cd ../_backend
# Copy enterprise code
[[ $1 == "ee" ]] && {
cp -r ../ee/backend/* ./
@ -43,6 +45,8 @@ function build_api(){
build_service $image
echo "::set-output name=image::${DOCKER_REPO:-'local'}/$image:${git_sha1}"
done
cd ../backend
rm -rf ../_backend
echo "backend build completed"
}

1
backend/cmd/assets/file Normal file
View file

@ -0,0 +1 @@
GROUP_CACHE=from_file

View file

@ -3,7 +3,7 @@ package main
import (
"context"
"log"
"openreplay/backend/pkg/monitoring"
"openreplay/backend/pkg/queue/types"
"os"
"os/signal"
"syscall"
@ -13,8 +13,8 @@ import (
"openreplay/backend/internal/assets/cacher"
config "openreplay/backend/internal/config/assets"
"openreplay/backend/pkg/messages"
"openreplay/backend/pkg/monitoring"
"openreplay/backend/pkg/queue"
"openreplay/backend/pkg/queue/types"
)
func main() {
@ -34,22 +34,25 @@ func main() {
consumer := queue.NewMessageConsumer(
cfg.GroupCache,
[]string{cfg.TopicCache},
func(sessionID uint64, message messages.Message, e *types.Meta) {
switch msg := message.(type) {
case *messages.AssetCache:
cacher.CacheURL(sessionID, msg.URL)
totalAssets.Add(context.Background(), 1)
case *messages.ErrorEvent:
if msg.Source != "js_exception" {
return
}
sourceList, err := assets.ExtractJSExceptionSources(&msg.Payload)
if err != nil {
log.Printf("Error on source extraction: %v", err)
return
}
for _, source := range sourceList {
cacher.CacheJSFile(source)
func(sessionID uint64, iter messages.Iterator, meta *types.Meta) {
for iter.Next() {
if iter.Type() == messages.MsgAssetCache {
msg := iter.Message().Decode().(*messages.AssetCache)
cacher.CacheURL(sessionID, msg.URL)
totalAssets.Add(context.Background(), 1)
} else if iter.Type() == messages.MsgErrorEvent {
msg := iter.Message().Decode().(*messages.ErrorEvent)
if msg.Source != "js_exception" {
continue
}
sourceList, err := assets.ExtractJSExceptionSources(&msg.Payload)
if err != nil {
log.Printf("Error on source extraction: %v", err)
continue
}
for _, source := range sourceList {
cacher.CacheJSFile(source)
}
}
}
},

View file

@ -3,24 +3,23 @@ package main
import (
"errors"
"log"
"openreplay/backend/internal/config/db"
"openreplay/backend/internal/db/datasaver"
"openreplay/backend/pkg/handlers"
custom2 "openreplay/backend/pkg/handlers/custom"
"openreplay/backend/pkg/monitoring"
"openreplay/backend/pkg/sessions"
"time"
"openreplay/backend/pkg/queue/types"
"os"
"os/signal"
"syscall"
"time"
"openreplay/backend/internal/config/db"
"openreplay/backend/internal/db/datasaver"
"openreplay/backend/pkg/db/cache"
"openreplay/backend/pkg/db/postgres"
"openreplay/backend/pkg/handlers"
custom2 "openreplay/backend/pkg/handlers/custom"
logger "openreplay/backend/pkg/log"
"openreplay/backend/pkg/messages"
"openreplay/backend/pkg/monitoring"
"openreplay/backend/pkg/queue"
"openreplay/backend/pkg/queue/types"
"openreplay/backend/pkg/sessions"
)
func main() {
@ -46,54 +45,70 @@ func main() {
// Create handler's aggregator
builderMap := sessions.NewBuilderMap(handlersFabric)
keepMessage := func(tp int) bool {
return tp == messages.MsgMetadata || tp == messages.MsgIssueEvent || tp == messages.MsgSessionStart || tp == messages.MsgSessionEnd || tp == messages.MsgUserID || tp == messages.MsgUserAnonymousID || tp == messages.MsgCustomEvent || tp == messages.MsgClickEvent || tp == messages.MsgInputEvent || tp == messages.MsgPageEvent || tp == messages.MsgErrorEvent || tp == messages.MsgFetchEvent || tp == messages.MsgGraphQLEvent || tp == messages.MsgIntegrationEvent || tp == messages.MsgPerformanceTrackAggr || tp == messages.MsgResourceEvent || tp == messages.MsgLongTask || tp == messages.MsgJSException || tp == messages.MsgResourceTiming || tp == messages.MsgRawCustomEvent || tp == messages.MsgCustomIssue || tp == messages.MsgFetch || tp == messages.MsgGraphQL || tp == messages.MsgStateAction || tp == messages.MsgSetInputTarget || tp == messages.MsgSetInputValue || tp == messages.MsgCreateDocument || tp == messages.MsgMouseClick || tp == messages.MsgSetPageLocation || tp == messages.MsgPageLoadTiming || tp == messages.MsgPageRenderTiming
}
var producer types.Producer = nil
if cfg.UseQuickwit {
producer = queue.NewProducer(cfg.MessageSizeLimit, true)
defer producer.Close(15000)
}
// Init modules
saver := datasaver.New(pg)
saver := datasaver.New(pg, producer)
saver.InitStats()
statsLogger := logger.NewQueueStats(cfg.LoggerTimeout)
// Handler logic
handler := func(sessionID uint64, msg messages.Message, meta *types.Meta) {
handler := func(sessionID uint64, iter messages.Iterator, meta *types.Meta) {
statsLogger.Collect(sessionID, meta)
// Just save session data into db without additional checks
if err := saver.InsertMessage(sessionID, msg); err != nil {
if !postgres.IsPkeyViolation(err) {
log.Printf("Message Insertion Error %v, SessionID: %v, Message: %v", err, sessionID, msg)
for iter.Next() {
if !keepMessage(iter.Type()) {
continue
}
return
}
msg := iter.Message().Decode()
session, err := pg.GetSession(sessionID)
if session == nil {
if err != nil && !errors.Is(err, cache.NilSessionInCacheError) {
log.Printf("Error on session retrieving from cache: %v, SessionID: %v, Message: %v", err, sessionID, msg)
}
return
}
// Save statistics to db
err = saver.InsertStats(session, msg)
if err != nil {
log.Printf("Stats Insertion Error %v; Session: %v, Message: %v", err, session, msg)
}
// Handle heuristics and save to temporary queue in memory
builderMap.HandleMessage(sessionID, msg, msg.Meta().Index)
// Process saved heuristics messages as usual messages above in the code
builderMap.IterateSessionReadyMessages(sessionID, func(msg messages.Message) {
// TODO: DRY code (carefully with the return statement logic)
// Just save session data into db without additional checks
if err := saver.InsertMessage(sessionID, msg); err != nil {
if !postgres.IsPkeyViolation(err) {
log.Printf("Message Insertion Error %v; Session: %v, Message %v", err, session, msg)
log.Printf("Message Insertion Error %v, SessionID: %v, Message: %v", err, sessionID, msg)
}
return
}
if err := saver.InsertStats(session, msg); err != nil {
log.Printf("Stats Insertion Error %v; Session: %v, Message %v", err, session, msg)
session, err := pg.GetSession(sessionID)
if session == nil {
if err != nil && !errors.Is(err, cache.NilSessionInCacheError) {
log.Printf("Error on session retrieving from cache: %v, SessionID: %v, Message: %v", err, sessionID, msg)
}
return
}
})
// Save statistics to db
err = saver.InsertStats(session, msg)
if err != nil {
log.Printf("Stats Insertion Error %v; Session: %v, Message: %v", err, session, msg)
}
// Handle heuristics and save to temporary queue in memory
builderMap.HandleMessage(sessionID, msg, msg.Meta().Index)
// Process saved heuristics messages as usual messages above in the code
builderMap.IterateSessionReadyMessages(sessionID, func(msg messages.Message) {
if err := saver.InsertMessage(sessionID, msg); err != nil {
if !postgres.IsPkeyViolation(err) {
log.Printf("Message Insertion Error %v; Session: %v, Message %v", err, session, msg)
}
return
}
if err := saver.InsertStats(session, msg); err != nil {
log.Printf("Stats Insertion Error %v; Session: %v, Message %v", err, session, msg)
}
})
}
}
// Init consumer

View file

@ -0,0 +1,92 @@
chalice:
env:
jwt_secret: SetARandomStringHere
clickhouse:
enabled: false
fromVersion: v1.6.0
global:
domainName: openreplay.local
email:
emailFrom: OpenReplay<do-not-reply@openreplay.com>
emailHost: ""
emailPassword: ""
emailPort: "587"
emailSslCert: ""
emailSslKey: ""
emailUseSsl: "false"
emailUseTls: "true"
emailUser: ""
enterpriseEditionLicense: ""
ingress:
controller:
config:
enable-real-ip: true
force-ssl-redirect: false
max-worker-connections: 0
proxy-body-size: 10m
ssl-redirect: false
extraArgs:
default-ssl-certificate: app/openreplay-ssl
ingressClass: openreplay
ingressClassResource:
name: openreplay
service:
externalTrafficPolicy: Local
kafka:
kafkaHost: kafka.db.svc.cluster.local
kafkaPort: "9092"
kafkaUseSsl: "false"
zookeeperHost: databases-zookeeper.svc.cluster.local
zookeeperNonTLSPort: 2181
postgresql:
postgresqlDatabase: postgres
postgresqlHost: postgresql.db.svc.cluster.local
postgresqlPassword: changeMePassword
postgresqlPort: "5432"
postgresqlUser: postgres
redis:
redisHost: redis-master.db.svc.cluster.local
redisPort: "6379"
s3:
accessKey: changeMeMinioAccessKey
assetsBucket: sessions-assets
endpoint: http://minio.db.svc.cluster.local:9000
recordingsBucket: mobs
region: us-east-1
secretKey: changeMeMinioPassword
sourcemapsBucket: sourcemaps
ingress-nginx:
controller:
config:
enable-real-ip: true
force-ssl-redirect: false
max-worker-connections: 0
proxy-body-size: 10m
ssl-redirect: false
extraArgs:
default-ssl-certificate: app/openreplay-ssl
ingressClass: openreplay
ingressClassResource:
name: openreplay
service:
externalTrafficPolicy: Local
kafka:
kafkaHost: kafka.db.svc.cluster.local
kafkaPort: "9092"
kafkaUseSsl: "false"
zookeeperHost: databases-zookeeper.svc.cluster.local
zookeeperNonTLSPort: 2181
minio:
global:
minio:
accessKey: changeMeMinioAccessKey
secretKey: changeMeMinioPassword
postgresql:
postgresqlDatabase: postgres
postgresqlHost: postgresql.db.svc.cluster.local
postgresqlPassword: changeMePassword
postgresqlPort: "5432"
postgresqlUser: postgres
redis:
redisHost: redis-master.db.svc.cluster.local
redisPort: "6379"

View file

@ -2,25 +2,23 @@ package main
import (
"log"
"openreplay/backend/pkg/queue/types"
"os"
"os/signal"
"syscall"
"time"
"openreplay/backend/internal/config/ender"
"openreplay/backend/internal/sessionender"
"openreplay/backend/pkg/db/cache"
"openreplay/backend/pkg/db/postgres"
"openreplay/backend/pkg/monitoring"
"time"
"os"
"os/signal"
"syscall"
"openreplay/backend/pkg/intervals"
logger "openreplay/backend/pkg/log"
"openreplay/backend/pkg/messages"
"openreplay/backend/pkg/monitoring"
"openreplay/backend/pkg/queue"
"openreplay/backend/pkg/queue/types"
)
//
func main() {
metrics := monitoring.New("ender")
@ -45,18 +43,17 @@ func main() {
[]string{
cfg.TopicRawWeb,
},
func(sessionID uint64, msg messages.Message, meta *types.Meta) {
switch msg.(type) {
case *messages.SessionStart, *messages.SessionEnd:
// Skip several message types
return
func(sessionID uint64, iter messages.Iterator, meta *types.Meta) {
for iter.Next() {
if iter.Type() == messages.MsgSessionStart || iter.Type() == messages.MsgSessionEnd {
continue
}
if iter.Message().Meta().Timestamp == 0 {
log.Printf("ZERO TS, sessID: %d, msgType: %d", sessionID, iter.Type())
}
statsLogger.Collect(sessionID, meta)
sessions.UpdateSession(sessionID, meta.Timestamp, iter.Message().Meta().Timestamp)
}
// Test debug
if msg.Meta().Timestamp == 0 {
log.Printf("ZERO TS, sessID: %d, msgType: %d", sessionID, msg.TypeID())
}
statsLogger.Collect(sessionID, meta)
sessions.UpdateSession(sessionID, meta.Timestamp, msg.Meta().Timestamp)
},
false,
cfg.MessageSizeLimit,

View file

@ -2,6 +2,12 @@ package main
import (
"log"
"openreplay/backend/pkg/queue/types"
"os"
"os/signal"
"syscall"
"time"
"openreplay/backend/internal/config/heuristics"
"openreplay/backend/pkg/handlers"
web2 "openreplay/backend/pkg/handlers/web"
@ -9,12 +15,7 @@ import (
logger "openreplay/backend/pkg/log"
"openreplay/backend/pkg/messages"
"openreplay/backend/pkg/queue"
"openreplay/backend/pkg/queue/types"
"openreplay/backend/pkg/sessions"
"os"
"os/signal"
"syscall"
"time"
)
func main() {
@ -33,10 +34,6 @@ func main() {
&web2.MemoryIssueDetector{},
&web2.NetworkIssueDetector{},
&web2.PerformanceAggregator{},
// iOS's handlers
//&ios2.AppNotResponding{},
//&ios2.ClickRageDetector{},
//&ios2.PerformanceAggregator{},
// Other handlers (you can add your custom handlers here)
//&custom.CustomHandler{},
}
@ -55,9 +52,11 @@ func main() {
[]string{
cfg.TopicRawWeb,
},
func(sessionID uint64, msg messages.Message, meta *types.Meta) {
statsLogger.Collect(sessionID, meta)
builderMap.HandleMessage(sessionID, msg, msg.Meta().Index)
func(sessionID uint64, iter messages.Iterator, meta *types.Meta) {
for iter.Next() {
statsLogger.Collect(sessionID, meta)
builderMap.HandleMessage(sessionID, iter.Message().Decode(), iter.Message().Meta().Index)
}
},
false,
cfg.MessageSizeLimit,

View file

@ -2,22 +2,20 @@ package main
import (
"context"
"encoding/binary"
"log"
"openreplay/backend/internal/sink/assetscache"
"openreplay/backend/internal/sink/oswriter"
"openreplay/backend/internal/storage"
"openreplay/backend/pkg/monitoring"
"time"
"openreplay/backend/pkg/queue/types"
"os"
"os/signal"
"syscall"
"time"
"openreplay/backend/internal/config/sink"
"openreplay/backend/internal/sink/assetscache"
"openreplay/backend/internal/sink/oswriter"
"openreplay/backend/internal/storage"
. "openreplay/backend/pkg/messages"
"openreplay/backend/pkg/monitoring"
"openreplay/backend/pkg/queue"
"openreplay/backend/pkg/queue/types"
"openreplay/backend/pkg/url/assets"
)
@ -58,51 +56,53 @@ func main() {
[]string{
cfg.TopicRawWeb,
},
func(sessionID uint64, message Message, _ *types.Meta) {
// Process assets
message = assetMessageHandler.ParseAssets(sessionID, message)
func(sessionID uint64, iter Iterator, meta *types.Meta) {
for iter.Next() {
// [METRICS] Increase the number of processed messages
totalMessages.Add(context.Background(), 1)
totalMessages.Add(context.Background(), 1)
// Filter message
typeID := message.TypeID()
// Send SessionEnd trigger to storage service
switch message.(type) {
case *SessionEnd:
if err := producer.Produce(cfg.TopicTrigger, sessionID, Encode(message)); err != nil {
log.Printf("can't send SessionEnd to trigger topic: %s; sessID: %d", err, sessionID)
// Send SessionEnd trigger to storage service
if iter.Type() == MsgSessionEnd {
if err := producer.Produce(cfg.TopicTrigger, sessionID, iter.Message().Encode()); err != nil {
log.Printf("can't send SessionEnd to trigger topic: %s; sessID: %d", err, sessionID)
}
continue
}
return
}
if !IsReplayerType(typeID) {
return
}
// If message timestamp is empty, use at least ts of session start
ts := message.Meta().Timestamp
if ts == 0 {
log.Printf("zero ts; sessID: %d, msg: %+v", sessionID, message)
} else {
// Log ts of last processed message
counter.Update(sessionID, time.UnixMilli(ts))
}
msg := iter.Message()
// Process assets
if iter.Type() == MsgSetNodeAttributeURLBased ||
iter.Type() == MsgSetCSSDataURLBased ||
iter.Type() == MsgCSSInsertRuleURLBased ||
iter.Type() == MsgAdoptedSSReplaceURLBased ||
iter.Type() == MsgAdoptedSSInsertRuleURLBased {
msg = assetMessageHandler.ParseAssets(sessionID, msg.Decode()) // TODO: filter type only once (use iterator inide or bring ParseAssets out here).
}
value := message.Encode()
var data []byte
if IsIOSType(typeID) {
data = value
} else {
data = make([]byte, len(value)+8)
copy(data[8:], value[:])
binary.LittleEndian.PutUint64(data[0:], message.Meta().Index)
}
if err := writer.Write(sessionID, data); err != nil {
log.Printf("Writer error: %v\n", err)
}
// Filter message
if !IsReplayerType(msg.TypeID()) {
continue
}
messageSize.Record(context.Background(), float64(len(data)))
savedMessages.Add(context.Background(), 1)
// If message timestamp is empty, use at least ts of session start
ts := msg.Meta().Timestamp
if ts == 0 {
log.Printf("zero ts; sessID: %d, msgType: %d", sessionID, iter.Type())
} else {
// Log ts of last processed message
counter.Update(sessionID, time.UnixMilli(ts))
}
// Write encoded message with index to session file
data := msg.EncodeWithIndex()
if err := writer.Write(sessionID, data); err != nil {
log.Printf("Writer error: %v\n", err)
}
// [METRICS] Increase the number of written to the files messages and the message size
messageSize.Record(context.Background(), float64(len(data)))
savedMessages.Add(context.Background(), 1)
}
},
false,
cfg.MessageSizeLimit,

View file

@ -2,8 +2,7 @@ package main
import (
"log"
"openreplay/backend/pkg/failover"
"openreplay/backend/pkg/monitoring"
"openreplay/backend/pkg/queue/types"
"os"
"os/signal"
"strconv"
@ -12,9 +11,10 @@ import (
config "openreplay/backend/internal/config/storage"
"openreplay/backend/internal/storage"
"openreplay/backend/pkg/failover"
"openreplay/backend/pkg/messages"
"openreplay/backend/pkg/monitoring"
"openreplay/backend/pkg/queue"
"openreplay/backend/pkg/queue/types"
s3storage "openreplay/backend/pkg/storage"
)
@ -43,14 +43,17 @@ func main() {
[]string{
cfg.TopicTrigger,
},
func(sessionID uint64, msg messages.Message, meta *types.Meta) {
switch m := msg.(type) {
case *messages.SessionEnd:
if err := srv.UploadKey(strconv.FormatUint(sessionID, 10), 5); err != nil {
sessionFinder.Find(sessionID, m.Timestamp)
func(sessionID uint64, iter messages.Iterator, meta *types.Meta) {
for iter.Next() {
if iter.Type() == messages.MsgSessionEnd {
msg := iter.Message().Decode().(*messages.SessionEnd)
if err := srv.UploadKey(strconv.FormatUint(sessionID, 10), 5); err != nil {
log.Printf("can't find session: %d", sessionID)
sessionFinder.Find(sessionID, msg.Timestamp)
}
// Log timestamp of last processed session
counter.Update(sessionID, time.UnixMilli(meta.Timestamp))
}
// Log timestamp of last processed session
counter.Update(sessionID, time.UnixMilli(meta.Timestamp))
}
},
true,

View file

@ -15,6 +15,7 @@ require (
github.com/jackc/pgerrcode v0.0.0-20201024163028-a0d42d470451
github.com/jackc/pgx/v4 v4.6.0
github.com/klauspost/pgzip v1.2.5
github.com/lib/pq v1.2.0
github.com/oschwald/maxminddb-golang v1.7.0
github.com/pkg/errors v0.9.1
github.com/sethvargo/go-envconfig v0.7.0

View file

@ -73,7 +73,8 @@ func (c *cacher) cacheURL(requestURL string, sessionID uint64, depth byte, urlCo
return
}
c.timeoutMap.add(cachePath)
if c.s3.Exists(cachePath) {
crTime := c.s3.GetCreationTime(cachePath)
if crTime != nil && crTime.After(time.Now().Add(-MAX_STORAGE_TIME)) { // recently uploaded
return
}

View file

@ -17,6 +17,7 @@ type Config struct {
CommitBatchTimeout time.Duration `env:"COMMIT_BATCH_TIMEOUT,default=15s"`
BatchQueueLimit int `env:"DB_BATCH_QUEUE_LIMIT,required"`
BatchSizeLimit int `env:"DB_BATCH_SIZE_LIMIT,required"`
UseQuickwit bool `env:"QUICKWIT_ENABLED,default=false"`
}
func New() *Config {

View file

@ -0,0 +1,123 @@
package datasaver
import (
"encoding/json"
"log"
"openreplay/backend/pkg/messages"
)
type FetchEventFTS struct {
Method string `json:"method"`
URL string `json:"url"`
Request string `json:"request"`
Response string `json:"response"`
Status uint64 `json:"status"`
Timestamp uint64 `json:"timestamp"`
Duration uint64 `json:"duration"`
}
type PageEventFTS struct {
MessageID uint64 `json:"message_id"`
Timestamp uint64 `json:"timestamp"`
URL string `json:"url"`
Referrer string `json:"referrer"`
Loaded bool `json:"loaded"`
RequestStart uint64 `json:"request_start"`
ResponseStart uint64 `json:"response_start"`
ResponseEnd uint64 `json:"response_end"`
DomContentLoadedEventStart uint64 `json:"dom_content_loaded_event_start"`
DomContentLoadedEventEnd uint64 `json:"dom_content_loaded_event_end"`
LoadEventStart uint64 `json:"load_event_start"`
LoadEventEnd uint64 `json:"load_event_end"`
FirstPaint uint64 `json:"first_paint"`
FirstContentfulPaint uint64 `json:"first_contentful_paint"`
SpeedIndex uint64 `json:"speed_index"`
VisuallyComplete uint64 `json:"visually_complete"`
TimeToInteractive uint64 `json:"time_to_interactive"`
}
type GraphQLEventFTS struct {
OperationKind string `json:"operation_kind"`
OperationName string `json:"operation_name"`
Variables string `json:"variables"`
Response string `json:"response"`
}
func (s *Saver) sendToFTS(msg messages.Message, sessionID uint64) {
// Skip, if FTS is disabled
if s.producer == nil {
return
}
var (
event []byte
err error
)
switch m := msg.(type) {
// Common
case *messages.Fetch:
event, err = json.Marshal(FetchEventFTS{
Method: m.Method,
URL: m.URL,
Request: m.Request,
Response: m.Response,
Status: m.Status,
Timestamp: m.Timestamp,
Duration: m.Duration,
})
case *messages.FetchEvent:
event, err = json.Marshal(FetchEventFTS{
Method: m.Method,
URL: m.URL,
Request: m.Request,
Response: m.Response,
Status: m.Status,
Timestamp: m.Timestamp,
Duration: m.Duration,
})
case *messages.PageEvent:
event, err = json.Marshal(PageEventFTS{
MessageID: m.MessageID,
Timestamp: m.Timestamp,
URL: m.URL,
Referrer: m.Referrer,
Loaded: m.Loaded,
RequestStart: m.RequestStart,
ResponseStart: m.ResponseStart,
ResponseEnd: m.ResponseEnd,
DomContentLoadedEventStart: m.DomContentLoadedEventStart,
DomContentLoadedEventEnd: m.DomContentLoadedEventEnd,
LoadEventStart: m.LoadEventStart,
LoadEventEnd: m.LoadEventEnd,
FirstPaint: m.FirstPaint,
FirstContentfulPaint: m.FirstContentfulPaint,
SpeedIndex: m.SpeedIndex,
VisuallyComplete: m.VisuallyComplete,
TimeToInteractive: m.TimeToInteractive,
})
case *messages.GraphQL:
event, err = json.Marshal(GraphQLEventFTS{
OperationKind: m.OperationKind,
OperationName: m.OperationName,
Variables: m.Variables,
Response: m.Response,
})
case *messages.GraphQLEvent:
event, err = json.Marshal(GraphQLEventFTS{
OperationKind: m.OperationKind,
OperationName: m.OperationName,
Variables: m.Variables,
Response: m.Response,
})
}
if err != nil {
log.Printf("can't marshal json for quickwit: %s", err)
} else {
if len(event) > 0 {
if err := s.producer.Produce("quickwit", sessionID, event); err != nil {
log.Printf("can't send event to quickwit: %s", err)
}
}
}
}

View file

@ -35,12 +35,15 @@ func (mi *Saver) InsertMessage(sessionID uint64, msg Message) error {
// Unique Web messages
case *PageEvent:
mi.sendToFTS(msg, sessionID)
return mi.pg.InsertWebPageEvent(sessionID, m)
case *ErrorEvent:
return mi.pg.InsertWebErrorEvent(sessionID, m)
case *FetchEvent:
mi.sendToFTS(msg, sessionID)
return mi.pg.InsertWebFetchEvent(sessionID, m)
case *GraphQLEvent:
mi.sendToFTS(msg, sessionID)
return mi.pg.InsertWebGraphQLEvent(sessionID, m)
case *IntegrationEvent:
return mi.pg.InsertWebErrorEvent(sessionID, &ErrorEvent{

View file

@ -1,11 +1,15 @@
package datasaver
import "openreplay/backend/pkg/db/cache"
import (
"openreplay/backend/pkg/db/cache"
"openreplay/backend/pkg/queue/types"
)
type Saver struct {
pg *cache.PGCache
pg *cache.PGCache
producer types.Producer
}
func New(pg *cache.PGCache) *Saver {
return &Saver{pg: pg}
func New(pg *cache.PGCache, producer types.Producer) *Saver {
return &Saver{pg: pg, producer: producer}
}

View file

@ -9,6 +9,7 @@ import (
"math/rand"
"net/http"
"openreplay/backend/internal/http/uuid"
"openreplay/backend/pkg/flakeid"
"strconv"
"time"
@ -133,7 +134,9 @@ func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request)
Token: e.services.Tokenizer.Compose(*tokenData),
UserUUID: userUUID,
SessionID: strconv.FormatUint(tokenData.ID, 10),
ProjectID: strconv.FormatUint(uint64(p.ProjectID), 10),
BeaconSizeLimit: e.cfg.BeaconSizeLimit,
StartTimestamp: int64(flakeid.ExtractTimestamp(tokenData.ID)),
})
}

View file

@ -16,10 +16,12 @@ type StartSessionRequest struct {
type StartSessionResponse struct {
Timestamp int64 `json:"timestamp"`
StartTimestamp int64 `json:"startTimestamp"`
Delay int64 `json:"delay"`
Token string `json:"token"`
UserUUID string `json:"userUUID"`
SessionID string `json:"sessionID"`
ProjectID string `json:"projectID"`
BeaconSizeLimit int64 `json:"beaconSizeLimit"`
}

View file

@ -57,6 +57,21 @@ func (e *AssetsCache) ParseAssets(sessID uint64, msg messages.Message) messages.
}
newMsg.SetMeta(msg.Meta())
return newMsg
case *messages.AdoptedSSReplaceURLBased:
newMsg := &messages.AdoptedSSReplace{
SheetID: m.SheetID,
Text: e.handleCSS(sessID, m.BaseURL, m.Text),
}
newMsg.SetMeta(msg.Meta())
return newMsg
case *messages.AdoptedSSInsertRuleURLBased:
newMsg := &messages.AdoptedSSInsertRule{
SheetID: m.SheetID,
Index: m.Index,
Rule: e.handleCSS(sessID, m.BaseURL, m.Rule),
}
newMsg.SetMeta(msg.Meta())
return newMsg
}
return msg
}

View file

@ -71,6 +71,7 @@ func (w *Writer) Write(key uint64, data []byte) error {
if err != nil {
return err
}
// TODO: add check for the number of recorded bytes to file
_, err = file.Write(data)
return err
}

View file

@ -83,6 +83,14 @@ func (c *PGCache) InsertWebErrorEvent(sessionID uint64, e *ErrorEvent) error {
return nil
}
func (c *PGCache) InsertSessionReferrer(sessionID uint64, referrer string) error {
_, err := c.GetSession(sessionID)
if err != nil {
return err
}
return c.Conn.InsertSessionReferrer(sessionID, referrer)
}
func (c *PGCache) InsertWebFetchEvent(sessionID uint64, e *FetchEvent) error {
session, err := c.GetSession(sessionID)
if err != nil {

View file

@ -3,7 +3,6 @@ package cache
import (
"errors"
"github.com/jackc/pgx/v4"
. "openreplay/backend/pkg/db/types"
)

View file

@ -5,6 +5,7 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
"log"
"openreplay/backend/pkg/db/types"
"openreplay/backend/pkg/monitoring"
"strings"
"time"
@ -13,6 +14,10 @@ import (
"github.com/jackc/pgx/v4/pgxpool"
)
type CH interface {
InsertAutocomplete(session *types.Session, msgType, msgValue string) error
}
type batchItem struct {
query string
arguments []interface{}
@ -37,6 +42,11 @@ type Conn struct {
batchSizeLines syncfloat64.Histogram
sqlRequestTime syncfloat64.Histogram
sqlRequestCounter syncfloat64.Counter
chConn CH
}
func (conn *Conn) SetClickHouse(ch CH) {
conn.chConn = ch
}
func NewConn(url string, queueLimit, sizeLimit int, metrics *monitoring.Metrics) *Conn {
@ -152,6 +162,13 @@ func (conn *Conn) insertAutocompleteValue(sessionID uint64, projectID uint32, tp
if err := conn.autocompletes.Append(value, tp, projectID); err != nil {
log.Printf("autocomplete bulk err: %s", err)
}
if conn.chConn == nil {
return
}
// Send autocomplete data to clickhouse
if err := conn.chConn.InsertAutocomplete(&types.Session{SessionID: sessionID, ProjectID: projectID}, tp, value); err != nil {
log.Printf("click house autocomplete err: %s", err)
}
}
func (conn *Conn) batchQueue(sessionID uint64, sql string, args ...interface{}) {

View file

@ -83,7 +83,6 @@ func (conn *Conn) InsertSessionEnd(sessionID uint64, timestamp uint64) (uint64,
}
func (conn *Conn) HandleSessionEnd(sessionID uint64) error {
// TODO: search acceleration?
sqlRequest := `
UPDATE sessions
SET issue_types=(SELECT
@ -96,11 +95,7 @@ func (conn *Conn) HandleSessionEnd(sessionID uint64) error {
INNER JOIN issues AS ps USING (issue_id)
WHERE session_id = $1)
WHERE session_id = $1`
conn.batchQueue(sessionID, sqlRequest, sessionID)
// Record approximate message size
conn.updateBatchSize(sessionID, len(sqlRequest)+8)
return nil
return conn.c.Exec(sqlRequest, sessionID)
}
func (conn *Conn) InsertRequest(sessionID uint64, timestamp uint64, index uint64, url string, duration uint64, success bool) error {

View file

@ -40,7 +40,6 @@ func (conn *Conn) InsertWebUserAnonymousID(sessionID uint64, projectID uint32, u
return err
}
// TODO: fix column "dom_content_loaded_event_end" of relation "pages"
func (conn *Conn) InsertWebPageEvent(sessionID uint64, projectID uint32, e *PageEvent) error {
host, path, query, err := url.GetURLParts(e.URL)
if err != nil {
@ -79,6 +78,9 @@ func (conn *Conn) InsertWebClickEvent(sessionID uint64, projectID uint32, e *Cli
}
func (conn *Conn) InsertWebInputEvent(sessionID uint64, projectID uint32, e *InputEvent) error {
if e.Label == "" {
return nil
}
value := &e.Value
if e.ValueMasked {
value = nil
@ -185,3 +187,15 @@ func (conn *Conn) InsertWebGraphQLEvent(sessionID uint64, projectID uint32, save
conn.insertAutocompleteValue(sessionID, projectID, "GRAPHQL", e.OperationName)
return nil
}
func (conn *Conn) InsertSessionReferrer(sessionID uint64, referrer string) error {
log.Printf("insert referrer, sessID: %d, referrer: %s", sessionID, referrer)
if referrer == "" {
return nil
}
return conn.c.Exec(`
UPDATE sessions
SET referrer = $1, base_referrer = $2
WHERE session_id = $3 AND referrer IS NULL`,
referrer, url.DiscardURLQuery(referrer), sessionID)
}

View file

@ -1,7 +1,7 @@
package postgres
// Mechanism of combination several session updates into one
const sessionUpdateReq = `UPDATE sessions SET (pages_count, events_count) = (pages_count + $1, events_count + $2) WHERE session_id = $3`
const sessionUpdateReq = `UPDATE sessions SET pages_count = pages_count + $1, events_count = events_count + $2 WHERE session_id = $3`
type sessionUpdates struct {
sessionID uint64

View file

@ -1,17 +1,24 @@
package postgres
import . "openreplay/backend/pkg/db/types"
import (
"github.com/jackc/pgtype"
"log"
. "openreplay/backend/pkg/db/types"
)
func (conn *Conn) GetSession(sessionID uint64) (*Session, error) {
s := &Session{SessionID: sessionID}
var revID, userOSVersion *string
var issueTypes pgtype.EnumArray
if err := conn.c.QueryRow(`
SELECT platform,
duration, project_id, start_ts,
user_uuid, user_os, user_os_version,
user_device, user_device_type, user_country,
rev_id, tracker_version,
user_id, user_anonymous_id,
user_id, user_anonymous_id, referrer,
pages_count, events_count, errors_count, issue_types,
user_browser, user_browser_version, issue_score,
metadata_1, metadata_2, metadata_3, metadata_4, metadata_5,
metadata_6, metadata_7, metadata_8, metadata_9, metadata_10
FROM sessions
@ -23,7 +30,9 @@ func (conn *Conn) GetSession(sessionID uint64) (*Session, error) {
&s.UserUUID, &s.UserOS, &userOSVersion,
&s.UserDevice, &s.UserDeviceType, &s.UserCountry,
&revID, &s.TrackerVersion,
&s.UserID, &s.UserAnonymousID,
&s.UserID, &s.UserAnonymousID, &s.Referrer,
&s.PagesCount, &s.EventsCount, &s.ErrorsCount, &issueTypes,
&s.UserBrowser, &s.UserBrowserVersion, &s.IssueScore,
&s.Metadata1, &s.Metadata2, &s.Metadata3, &s.Metadata4, &s.Metadata5,
&s.Metadata6, &s.Metadata7, &s.Metadata8, &s.Metadata9, &s.Metadata10); err != nil {
return nil, err
@ -34,5 +43,8 @@ func (conn *Conn) GetSession(sessionID uint64) (*Session, error) {
if revID != nil {
s.RevID = *revID
}
if err := issueTypes.AssignTo(&s.IssueTypes); err != nil {
log.Printf("can't scan IssueTypes, err: %s", err)
}
return s, nil
}

View file

@ -11,11 +11,14 @@ type Session struct {
UserOSVersion string
UserDevice string
UserCountry string
Referrer *string
Duration *uint64
PagesCount int
EventsCount int
ErrorsCount int
IssueTypes []string
IssueScore int
UserID *string // pointer??
UserAnonymousID *string

View file

@ -1,67 +1,161 @@
package messages
import (
"fmt"
"github.com/pkg/errors"
"bytes"
"io"
"log"
"strings"
)
func ReadBatchReader(reader io.Reader, messageHandler func(Message)) error {
var index uint64
var timestamp int64
type Iterator interface {
Next() bool // Return true if we have next message
Type() int // Return type of the next message
Message() Message // Return raw or decoded message
}
for {
msg, err := ReadMessage(reader)
type iteratorImpl struct {
data *bytes.Reader
index uint64
timestamp int64
version uint64
msgType uint64
msgSize uint64
canSkip bool
msg Message
url string
}
func NewIterator(data []byte) Iterator {
return &iteratorImpl{
data: bytes.NewReader(data),
}
}
func (i *iteratorImpl) Next() bool {
if i.canSkip {
if _, err := i.data.Seek(int64(i.msgSize), io.SeekCurrent); err != nil {
log.Printf("seek err: %s", err)
return false
}
}
i.canSkip = false
var err error
i.msgType, err = ReadUint(i.data)
if err != nil {
if err == io.EOF {
return nil
return false
}
log.Printf("can't read message type: %s", err)
return false
}
if i.version > 0 && messageHasSize(i.msgType) {
// Read message size if it is a new protocol version
i.msgSize, err = ReadSize(i.data)
if err != nil {
log.Printf("can't read message size: %s", err)
return false
}
i.msg = &RawMessage{
tp: i.msgType,
size: i.msgSize,
meta: &message{},
reader: i.data,
skipped: &i.canSkip,
}
i.canSkip = true
} else {
i.msg, err = ReadMessage(i.msgType, i.data)
if err == io.EOF {
return false
} else if err != nil {
if strings.HasPrefix(err.Error(), "Unknown message code:") {
code := strings.TrimPrefix(err.Error(), "Unknown message code: ")
msg, err = DecodeExtraMessage(code, reader)
i.msg, err = DecodeExtraMessage(code, i.data)
if err != nil {
return fmt.Errorf("can't decode msg: %s", err)
log.Printf("can't decode msg: %s", err)
return false
}
} else {
return errors.Wrapf(err, "Batch Message decoding error on message with index %v", index)
log.Printf("Batch Message decoding error on message with index %v, err: %s", i.index, err)
return false
}
}
msg = transformDeprecated(msg)
isBatchMeta := false
switch m := msg.(type) {
case *BatchMeta: // Is not required to be present in batch since IOS doesn't have it (though we might change it)
if index != 0 { // Might be several 0-0 BatchMeta in a row without a error though
return errors.New("Batch Meta found at the end of the batch")
}
index = m.PageNo<<32 + m.FirstIndex // 2^32 is the maximum count of messages per page (ha-ha)
timestamp = m.Timestamp
isBatchMeta = true
// continue readLoop
case *IOSBatchMeta:
if index != 0 { // Might be several 0-0 BatchMeta in a row without a error though
return errors.New("Batch Meta found at the end of the batch")
}
index = m.FirstIndex
timestamp = int64(m.Timestamp)
isBatchMeta = true
// continue readLoop
case *Timestamp:
timestamp = int64(m.Timestamp) // TODO(?): replace timestamp type to int64 everywhere (including encoding part in tracker)
// No skipping here for making it easy to encode back the same sequence of message
// continue readLoop
case *SessionStart:
timestamp = int64(m.Timestamp)
case *SessionEnd:
timestamp = int64(m.Timestamp)
}
msg.Meta().Index = index
msg.Meta().Timestamp = timestamp
messageHandler(msg)
if !isBatchMeta { // Without that indexes will be unique anyway, though shifted by 1 because BatchMeta is not counted in tracker
index++
}
i.msg = transformDeprecated(i.msg)
}
return errors.New("Error of the codeflow. (Should return on EOF)")
// Process meta information
isBatchMeta := false
switch i.msgType {
case MsgBatchMetadata:
if i.index != 0 { // Might be several 0-0 BatchMeta in a row without an error though
log.Printf("Batch Meta found at the end of the batch")
return false
}
m := i.msg.Decode().(*BatchMetadata)
i.index = m.PageNo<<32 + m.FirstIndex // 2^32 is the maximum count of messages per page (ha-ha)
i.timestamp = m.Timestamp
i.version = m.Version
i.url = m.Url
isBatchMeta = true
if i.version > 1 {
log.Printf("incorrect batch version, skip current batch")
return false
}
case MsgBatchMeta: // Is not required to be present in batch since IOS doesn't have it (though we might change it)
if i.index != 0 { // Might be several 0-0 BatchMeta in a row without an error though
log.Printf("Batch Meta found at the end of the batch")
return false
}
m := i.msg.Decode().(*BatchMeta)
i.index = m.PageNo<<32 + m.FirstIndex // 2^32 is the maximum count of messages per page (ha-ha)
i.timestamp = m.Timestamp
isBatchMeta = true
// continue readLoop
case MsgIOSBatchMeta:
if i.index != 0 { // Might be several 0-0 BatchMeta in a row without an error though
log.Printf("Batch Meta found at the end of the batch")
return false
}
m := i.msg.Decode().(*IOSBatchMeta)
i.index = m.FirstIndex
i.timestamp = int64(m.Timestamp)
isBatchMeta = true
// continue readLoop
case MsgTimestamp:
m := i.msg.Decode().(*Timestamp)
i.timestamp = int64(m.Timestamp)
// No skipping here for making it easy to encode back the same sequence of message
// continue readLoop
case MsgSessionStart:
m := i.msg.Decode().(*SessionStart)
i.timestamp = int64(m.Timestamp)
case MsgSessionEnd:
m := i.msg.Decode().(*SessionEnd)
i.timestamp = int64(m.Timestamp)
case MsgSetPageLocation:
m := i.msg.Decode().(*SetPageLocation)
i.url = m.URL
}
i.msg.Meta().Index = i.index
i.msg.Meta().Timestamp = i.timestamp
i.msg.Meta().Url = i.url
if !isBatchMeta { // Without that indexes will be unique anyway, though shifted by 1 because BatchMeta is not counted in tracker
i.index++
}
return true
}
func (i *iteratorImpl) Type() int {
return int(i.msgType)
}
func (i *iteratorImpl) Message() Message {
return i.msg
}
func messageHasSize(msgType uint64) bool {
return !(msgType == 80 || msgType == 81 || msgType == 82)
}

View file

@ -1,6 +1,7 @@
package messages
import (
"encoding/binary"
"fmt"
"io"
)
@ -20,6 +21,21 @@ func (msg *SessionSearch) Encode() []byte {
return buf[:p]
}
func (msg *SessionSearch) EncodeWithIndex() []byte {
encoded := msg.Encode()
if IsIOSType(msg.TypeID()) {
return encoded
}
data := make([]byte, len(encoded)+8)
copy(data[8:], encoded[:])
binary.LittleEndian.PutUint64(data[0:], msg.Meta().Index)
return data
}
func (msg *SessionSearch) Decode() Message {
return msg
}
func (msg *SessionSearch) TypeID() int {
return 127
}

View file

@ -2,7 +2,7 @@
package messages
func IsReplayerType(id int) bool {
return 0 == id || 2 == id || 4 == id || 5 == id || 6 == id || 7 == id || 8 == id || 9 == id || 10 == id || 11 == id || 12 == id || 13 == id || 14 == id || 15 == id || 16 == id || 18 == id || 19 == id || 20 == id || 22 == id || 37 == id || 38 == id || 39 == id || 40 == id || 41 == id || 44 == id || 45 == id || 46 == id || 47 == id || 48 == id || 49 == id || 54 == id || 55 == id || 59 == id || 69 == id || 70 == id || 90 == id || 93 == id || 96 == id || 100 == id || 102 == id || 103 == id || 105 == id
return 0 == id || 4 == id || 5 == id || 6 == id || 7 == id || 8 == id || 9 == id || 10 == id || 11 == id || 12 == id || 13 == id || 14 == id || 15 == id || 16 == id || 18 == id || 19 == id || 20 == id || 22 == id || 37 == id || 38 == id || 39 == id || 40 == id || 41 == id || 44 == id || 45 == id || 46 == id || 47 == id || 48 == id || 49 == id || 54 == id || 55 == id || 59 == id || 60 == id || 61 == id || 67 == id || 69 == id || 70 == id || 71 == id || 72 == id || 73 == id || 74 == id || 75 == id || 76 == id || 77 == id || 90 == id || 93 == id || 96 == id || 100 == id || 102 == id || 103 == id || 105 == id
}
func IsIOSType(id int) bool {

View file

@ -3,6 +3,7 @@ package messages
type message struct {
Timestamp int64
Index uint64
Url string
}
func (m *message) Meta() *message {
@ -12,10 +13,13 @@ func (m *message) Meta() *message {
func (m *message) SetMeta(origin *message) {
m.Timestamp = origin.Timestamp
m.Index = origin.Index
m.Url = origin.Url
}
type Message interface {
Encode() []byte
EncodeWithIndex() []byte
Decode() Message
TypeID() int
Meta() *message
}

File diff suppressed because it is too large Load diff

View file

@ -3,6 +3,7 @@ package messages
import (
"encoding/json"
"errors"
"fmt"
"io"
"log"
)
@ -16,15 +17,6 @@ func ReadByte(reader io.Reader) (byte, error) {
return p[0], nil
}
// func SkipBytes(reader io.ReadSeeker) error {
// n, err := ReadUint(reader)
// if err != nil {
// return err
// }
// _, err = reader.Seek(n, io.SeekCurrent);
// return err
// }
func ReadData(reader io.Reader) ([]byte, error) {
n, err := ReadUint(reader)
if err != nil {
@ -153,3 +145,28 @@ func WriteJson(v interface{}, buf []byte, p int) int {
}
return WriteData(data, buf, p)
}
func WriteSize(size uint64, buf []byte, p int) {
var m uint64 = 255
for i := 0; i < 3; i++ {
buf[p+i] = byte(size & m)
size = size >> 8
}
fmt.Println(buf)
}
func ReadSize(reader io.Reader) (uint64, error) {
buf := make([]byte, 3)
n, err := io.ReadFull(reader, buf)
if err != nil {
return 0, err
}
if n != 3 {
return 0, fmt.Errorf("read only %d of 3 size bytes", n)
}
var size uint64
for i, b := range buf {
size += uint64(b) << (8 * i)
}
return size, nil
}

View file

@ -0,0 +1,68 @@
package messages
import (
"bytes"
"encoding/binary"
"io"
"log"
)
// RawMessage is a not decoded message
type RawMessage struct {
tp uint64
size uint64
data []byte
reader *bytes.Reader
meta *message
encoded bool
skipped *bool
}
func (m *RawMessage) Encode() []byte {
if m.encoded {
return m.data
}
m.data = make([]byte, m.size+1)
m.data[0] = uint8(m.tp)
m.encoded = true
*m.skipped = false
_, err := io.ReadFull(m.reader, m.data[1:])
if err != nil {
log.Printf("message encode err: %s", err)
return nil
}
return m.data
}
func (m *RawMessage) EncodeWithIndex() []byte {
if !m.encoded {
m.Encode()
}
if IsIOSType(int(m.tp)) {
return m.data
}
data := make([]byte, len(m.data)+8)
copy(data[8:], m.data[:])
binary.LittleEndian.PutUint64(data[0:], m.Meta().Index)
return data
}
func (m *RawMessage) Decode() Message {
if !m.encoded {
m.Encode()
}
msg, err := ReadMessage(m.tp, bytes.NewReader(m.data[1:]))
if err != nil {
log.Printf("decode err: %s", err)
}
msg.Meta().SetMeta(m.meta)
return msg
}
func (m *RawMessage) TypeID() int {
return int(m.tp)
}
func (m *RawMessage) Meta() *message {
return m.meta
}

File diff suppressed because it is too large Load diff

View file

@ -1,19 +1,12 @@
package queue
import (
"bytes"
"log"
"openreplay/backend/pkg/messages"
"openreplay/backend/pkg/queue/types"
)
func NewMessageConsumer(group string, topics []string, handler types.DecodedMessageHandler, autoCommit bool, messageSizeLimit int) types.Consumer {
func NewMessageConsumer(group string, topics []string, handler types.RawMessageHandler, autoCommit bool, messageSizeLimit int) types.Consumer {
return NewConsumer(group, topics, func(sessionID uint64, value []byte, meta *types.Meta) {
if err := messages.ReadBatchReader(bytes.NewReader(value), func(msg messages.Message) {
handler(sessionID, msg, meta)
}); err != nil {
log.Printf("Decode error: %v\n", err)
}
handler(sessionID, messages.NewIterator(value), meta)
}, autoCommit, messageSizeLimit)
}

View file

@ -26,3 +26,4 @@ type Meta struct {
type MessageHandler func(uint64, []byte, *Meta)
type DecodedMessageHandler func(uint64, messages.Message, *Meta)
type RawMessageHandler func(uint64, messages.Iterator, *Meta)

View file

@ -6,6 +6,7 @@ import (
"os"
"sort"
"strconv"
"time"
_s3 "github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
@ -71,6 +72,17 @@ func (s3 *S3) Exists(key string) bool {
return false
}
func (s3 *S3) GetCreationTime(key string) *time.Time {
ans, err := s3.svc.HeadObject(&_s3.HeadObjectInput{
Bucket: s3.bucket,
Key: &key,
})
if err != nil {
return nil
}
return ans.LastModified
}
const MAX_RETURNING_COUNT = 40
func (s3 *S3) GetFrequentlyUsedKeys(projectID uint64) ([]string, error) {

View file

@ -14,7 +14,7 @@ func getSessionKey(sessionID uint64) string {
return strconv.FormatUint(
uint64(time.UnixMilli(
int64(flakeid.ExtractTimestamp(sessionID)),
).Weekday()),
).Day()),
10,
)
}

9
ee/api/.gitignore vendored
View file

@ -177,11 +177,16 @@ chalicelib/saas
README/*
Pipfile
.local/*
/chalicelib/core/alerts.py
/chalicelib/core/alerts_processor.py
/chalicelib/core/announcements.py
/chalicelib/core/autocomplete.py
/chalicelib/core/collaboration_slack.py
/chalicelib/core/errors_favorite_viewed.py
/chalicelib/core/countries.py
/chalicelib/core/errors.py
/chalicelib/core/errors_favorite.py
/chalicelib/core/events.py
/chalicelib/core/events_ios.py
/chalicelib/core/funnels.py
@ -257,4 +262,4 @@ Pipfile
/build_alerts.sh
/routers/subs/metrics.py
/routers/subs/v1_api.py
/chalicelib/core/dashboards.py
/chalicelib/core/dashboards.py

View file

@ -1,7 +1,6 @@
FROM python:3.10-alpine
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
RUN apk upgrade busybox --no-cache --repository=http://dl-cdn.alpinelinux.org/alpine/edge/main
RUN apk add --no-cache build-base libressl libffi-dev libressl-dev libxslt-dev libxml2-dev xmlsec-dev xmlsec nodejs npm tini
ARG envarg
ENV SOURCE_MAP_VERSION=0.7.4 \

View file

@ -1,7 +1,6 @@
FROM python:3.10-alpine
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
RUN apk upgrade busybox --no-cache --repository=http://dl-cdn.alpinelinux.org/alpine/edge/main
RUN apk add --no-cache build-base tini
ARG envarg
ENV APP_NAME=alerts \

View file

@ -1,7 +1,6 @@
FROM python:3.10-alpine
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
RUN apk upgrade busybox --no-cache --repository=http://dl-cdn.alpinelinux.org/alpine/edge/main
RUN apk add --no-cache build-base tini
ARG envarg
ENV APP_NAME=crons \

View file

@ -5,18 +5,20 @@ from apscheduler.schedulers.asyncio import AsyncIOScheduler
from decouple import config
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
from fastapi.middleware.gzip import GZipMiddleware
from starlette import status
from starlette.responses import StreamingResponse, JSONResponse
from chalicelib.utils import helper
from chalicelib.utils import pg_client
from routers import core, core_dynamic, ee, saml
from routers.subs import v1_api
from routers.crons import core_crons
from routers.crons import core_dynamic_crons
from routers.subs import dashboard, insights, metrics, v1_api_ee
from routers.subs import v1_api
app = FastAPI(root_path="/api", docs_url=config("docs_url", default=""), redoc_url=config("redoc_url", default=""))
app.add_middleware(GZipMiddleware, minimum_size=1000)
@app.middleware('http')

View file

@ -15,13 +15,15 @@ class ProjectAuthorizer:
if len(request.path_params.keys()) == 0 or request.path_params.get(self.project_identifier) is None:
return
current_user: schemas.CurrentContext = await OR_context(request)
project_identifier = request.path_params[self.project_identifier]
value = request.path_params[self.project_identifier]
user_id = current_user.user_id if request.state.authorizer_identity == "jwt" else None
if (self.project_identifier == "projectId" \
and not projects.is_authorized(project_id=project_identifier, tenant_id=current_user.tenant_id,
and not projects.is_authorized(project_id=value, tenant_id=current_user.tenant_id,
user_id=user_id)) \
or (self.project_identifier.lower() == "projectKey" \
and not projects.is_authorized(project_id=projects.get_internal_project_id(project_identifier),
tenant_id=current_user.tenant_id, user_id=user_id)):
or (self.project_identifier == "projectKey" \
and not projects.is_authorized(
project_id=projects.get_internal_project_id(value),
tenant_id=current_user.tenant_id, user_id=user_id)):
print("unauthorized project")
print(value)
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="unauthorized project.")

View file

@ -18,6 +18,8 @@ check_prereq() {
}
function build_api(){
cp -R ../api ../_crons
cd ../_crons
tag=""
# Copy enterprise code
@ -25,15 +27,15 @@ function build_api(){
envarg="default-ee"
tag="ee-"
cp -R ../api ../_crons
docker build -f ../_crons/Dockerfile.crons --build-arg envarg=$envarg -t ${DOCKER_REPO:-'local'}/crons:${git_sha1} .
rm -rf ../crons
docker build -f ./Dockerfile.crons --build-arg envarg=$envarg -t ${DOCKER_REPO:-'local'}/crons:${git_sha1} .
cd ../api
rm -rf ../_crons
[[ $PUSH_IMAGE -eq 1 ]] && {
docker push ${DOCKER_REPO:-'local'}/crons:${git_sha1}
docker tag ${DOCKER_REPO:-'local'}/crons:${git_sha1} ${DOCKER_REPO:-'local'}/crons:${tag}latest
docker push ${DOCKER_REPO:-'local'}/crons:${tag}latest
}
echo "completed crons build"
echo "completed crons build"
}
check_prereq

View file

@ -0,0 +1,38 @@
from decouple import config
import logging
logging.basicConfig(level=config("LOGLEVEL", default=logging.INFO))
if config("EXP_SESSIONS_SEARCH", cast=bool, default=False):
print(">>> Using experimental sessions search")
from . import sessions as sessions_legacy
from . import sessions_exp as sessions
else:
from . import sessions as sessions
if config("EXP_AUTOCOMPLETE", cast=bool, default=False):
print(">>> Using experimental autocomplete")
from . import autocomplete_exp as autocomplete
else:
from . import autocomplete as autocomplete
if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
print(">>> Using experimental error search")
from . import errors_exp as errors
else:
from . import errors as errors
if config("EXP_METRICS", cast=bool, default=False):
print(">>> Using experimental metrics")
from . import metrics_exp as metrics
else:
from . import metrics as metrics
if config("EXP_ALERTS", cast=bool, default=False):
print(">>> Using experimental alerts")
from . import alerts_processor_exp as alerts_processor
else:
from . import alerts_processor as alerts_processor
from . import significance_exp as significance

View file

@ -12,7 +12,8 @@ def get_all_alerts():
(EXTRACT(EPOCH FROM alerts.created_at) * 1000)::BIGINT AS created_at,
alerts.name,
alerts.series_id,
filter
filter,
change
FROM public.alerts
LEFT JOIN metric_series USING (series_id)
INNER JOIN projects USING (project_id)

View file

@ -0,0 +1,224 @@
import logging
from decouple import config
import schemas
from chalicelib.core import alerts_listener, alerts_processor
from chalicelib.core import sessions, alerts
from chalicelib.utils import pg_client, ch_client, exp_ch_helper
from chalicelib.utils.TimeUTC import TimeUTC
logging.basicConfig(level=config("LOGLEVEL", default=logging.INFO))
LeftToDb = {
schemas.AlertColumn.performance__dom_content_loaded__average: {
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS pages",
"formula": "COALESCE(AVG(NULLIF(dom_content_loaded_event_time ,0)),0)",
"eventType": "LOCATION"
},
schemas.AlertColumn.performance__first_meaningful_paint__average: {
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS pages",
"formula": "COALESCE(AVG(NULLIF(first_contentful_paint_time,0)),0)",
"eventType": "LOCATION"
},
schemas.AlertColumn.performance__page_load_time__average: {
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS pages",
"formula": "AVG(NULLIF(load_event_time ,0))",
"eventType": "LOCATION"
},
schemas.AlertColumn.performance__dom_build_time__average: {
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS pages",
"formula": "AVG(NULLIF(dom_building_time,0))",
"eventType": "LOCATION"
},
schemas.AlertColumn.performance__speed_index__average: {
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS pages",
"formula": "AVG(NULLIF(speed_index,0))",
"eventType": "LOCATION"
},
schemas.AlertColumn.performance__page_response_time__average: {
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS pages",
"formula": "AVG(NULLIF(response_time,0))",
"eventType": "LOCATION"
},
schemas.AlertColumn.performance__ttfb__average: {
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS pages",
"formula": "AVG(NULLIF(first_contentful_paint_time,0))",
"eventType": "LOCATION"
},
schemas.AlertColumn.performance__time_to_render__average: {
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS pages",
"formula": "AVG(NULLIF(visually_complete,0))",
"eventType": "LOCATION"
},
schemas.AlertColumn.performance__image_load_time__average: {
"table": lambda timestamp: f"{exp_ch_helper.get_main_resources_table(timestamp)} AS resources",
"formula": "AVG(NULLIF(resources.duration,0))",
"condition": "type='img'"
},
schemas.AlertColumn.performance__request_load_time__average: {
"table": lambda timestamp: f"{exp_ch_helper.get_main_resources_table(timestamp)} AS resources",
"formula": "AVG(NULLIF(resources.duration,0))",
"condition": "type='fetch'"
},
schemas.AlertColumn.resources__load_time__average: {
"table": lambda timestamp: f"{exp_ch_helper.get_main_resources_table(timestamp)} AS resources",
"formula": "AVG(NULLIF(resources.duration,0))"
},
schemas.AlertColumn.resources__missing__count: {
"table": lambda timestamp: f"{exp_ch_helper.get_main_resources_table(timestamp)} AS resources",
"formula": "COUNT(DISTINCT url_hostpath)",
"condition": "success= FALSE AND type='img'"
},
schemas.AlertColumn.errors__4xx_5xx__count: {
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS requests",
"eventType": "REQUEST",
"formula": "COUNT(1)",
"condition": "intDiv(requests.status, 100)!=2"
},
schemas.AlertColumn.errors__4xx__count: {
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS requests",
"eventType": "REQUEST",
"formula": "COUNT(1)",
"condition": "intDiv(requests.status, 100)==4"
},
schemas.AlertColumn.errors__5xx__count: {
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS requests",
"eventType": "REQUEST",
"formula": "COUNT(1)",
"condition": "intDiv(requests.status, 100)==5"
},
schemas.AlertColumn.errors__javascript__impacted_sessions__count: {
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS errors",
"eventType": "ERROR",
"formula": "COUNT(DISTINCT session_id)",
"condition": "source='js_exception'"
},
schemas.AlertColumn.performance__crashes__count: {
"table": lambda timestamp: f"{exp_ch_helper.get_main_sessions_table(timestamp)} AS sessions",
"formula": "COUNT(DISTINCT session_id)",
"condition": "duration>0 AND errors_count>0"
},
schemas.AlertColumn.errors__javascript__count: {
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS errors",
"eventType": "ERROR",
"formula": "COUNT(DISTINCT session_id)",
"condition": "source='js_exception'"
},
schemas.AlertColumn.errors__backend__count: {
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS errors",
"eventType": "ERROR",
"formula": "COUNT(DISTINCT session_id)",
"condition": "source!='js_exception'"
},
}
def Build(a):
now = TimeUTC.now()
params = {"project_id": a["projectId"], "now": now}
full_args = {}
if a["seriesId"] is not None:
a["filter"]["sort"] = "session_id"
a["filter"]["order"] = schemas.SortOrderType.desc
a["filter"]["startDate"] = -1
a["filter"]["endDate"] = TimeUTC.now()
full_args, query_part = sessions.search_query_parts_ch(
data=schemas.SessionsSearchPayloadSchema.parse_obj(a["filter"]), error_status=None, errors_only=False,
issue=None, project_id=a["projectId"], user_id=None, favorite_only=False)
subQ = f"""SELECT COUNT(session_id) AS value
{query_part}"""
else:
colDef = LeftToDb[a["query"]["left"]]
params["event_type"] = LeftToDb[a["query"]["left"]].get("eventType")
subQ = f"""SELECT {colDef["formula"]} AS value
FROM {colDef["table"](now)}
WHERE project_id = %(project_id)s
{"AND event_type=%(event_type)s" if params["event_type"] else ""}
{"AND " + colDef["condition"] if colDef.get("condition") is not None else ""}"""
q = f"""SELECT coalesce(value,0) AS value, coalesce(value,0) {a["query"]["operator"]} {a["query"]["right"]} AS valid"""
if a["detectionMethod"] == schemas.AlertDetectionMethod.threshold:
if a["seriesId"] is not None:
q += f""" FROM ({subQ}) AS stat"""
else:
q += f""" FROM ({subQ}
AND datetime>=toDateTime(%(startDate)s/1000)
AND datetime<=toDateTime(%(now)s/1000) ) AS stat"""
params = {**params, **full_args, "startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000}
else:
if a["change"] == schemas.AlertDetectionType.change:
if a["seriesId"] is not None:
sub2 = subQ.replace("%(startDate)s", "%(timestamp_sub2)s").replace("%(endDate)s", "%(startDate)s")
sub1 = f"SELECT (({subQ})-({sub2})) AS value"
q += f" FROM ( {sub1} ) AS stat"
params = {**params, **full_args,
"startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000,
"timestamp_sub2": TimeUTC.now() - 2 * a["options"]["currentPeriod"] * 60 * 1000}
else:
sub1 = f"""{subQ} AND datetime>=toDateTime(%(startDate)s/1000)
AND datetime<=toDateTime(%(now)s/1000)"""
params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000
sub2 = f"""{subQ} AND datetime<toDateTime(%(startDate)s/1000)
AND datetime>=toDateTime(%(timestamp_sub2)s/1000)"""
params["timestamp_sub2"] = TimeUTC.now() - 2 * a["options"]["currentPeriod"] * 60 * 1000
sub1 = f"SELECT (( {sub1} )-( {sub2} )) AS value"
q += f" FROM ( {sub1} ) AS stat"
else:
if a["seriesId"] is not None:
sub2 = subQ.replace("%(startDate)s", "%(timestamp_sub2)s").replace("%(endDate)s", "%(startDate)s")
sub1 = f"SELECT (({subQ})/NULLIF(({sub2}),0)-1)*100 AS value"
q += f" FROM ({sub1}) AS stat"
params = {**params, **full_args,
"startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000,
"timestamp_sub2": TimeUTC.now() \
- (a["options"]["currentPeriod"] + a["options"]["currentPeriod"]) \
* 60 * 1000}
else:
sub1 = f"""{subQ} AND datetime>=toDateTime(%(startDate)s/1000)
AND datetime<=toDateTime(%(now)s/1000)"""
params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000
sub2 = f"""{subQ} AND datetime<toDateTime(%(startDate)s/1000)
AND datetime>=toDateTime(%(timestamp_sub2)s/1000)"""
params["timestamp_sub2"] = TimeUTC.now() \
- (a["options"]["currentPeriod"] + a["options"]["currentPeriod"]) * 60 * 1000
sub1 = f"SELECT (({sub1})/NULLIF(({sub2}),0)-1)*100 AS value"
q += f" FROM ({sub1}) AS stat"
return q, params
def process():
notifications = []
all_alerts = alerts_listener.get_all_alerts()
with pg_client.PostgresClient() as cur, ch_client.ClickHouseClient() as ch_cur:
for alert in all_alerts:
if alert["query"]["left"] != "CUSTOM":
continue
if alerts_processor.can_check(alert):
logging.info(f"Querying alertId:{alert['alertId']} name: {alert['name']}")
query, params = Build(alert)
query = ch_cur.format(query, params)
logging.debug(alert)
logging.debug(query)
try:
result = ch_cur.execute(query)
if len(result) > 0:
result = result[0]
if result["valid"]:
logging.info("Valid alert, notifying users")
notifications.append(alerts_processor.generate_notification(alert, result))
except Exception as e:
logging.error(f"!!!Error while running alert query for alertId:{alert['alertId']}")
logging.error(str(e))
logging.error(query)
if len(notifications) > 0:
cur.execute(
cur.mogrify(f"""UPDATE public.alerts
SET options = options||'{{"lastNotification":{TimeUTC.now()}}}'::jsonb
WHERE alert_id IN %(ids)s;""", {"ids": tuple([n["alertId"] for n in notifications])}))
if len(notifications) > 0:
alerts.process_notifications(notifications)

View file

@ -0,0 +1,107 @@
import schemas
from chalicelib.utils import ch_client
from chalicelib.utils import helper
from chalicelib.utils.event_filter_definition import Event
TABLE = "final.autocomplete"
def __get_autocomplete_table(value, project_id):
autocomplete_events = [schemas.FilterType.rev_id,
schemas.EventType.click,
schemas.FilterType.user_device,
schemas.FilterType.user_id,
schemas.FilterType.user_browser,
schemas.FilterType.user_os,
schemas.EventType.custom,
schemas.FilterType.user_country,
schemas.EventType.location,
schemas.EventType.input]
autocomplete_events.sort()
sub_queries = []
for e in autocomplete_events:
sub_queries.append(f"""(SELECT type, value
FROM {TABLE}
WHERE project_id = %(project_id)s
AND type= '{e}'
AND value ILIKE %(svalue)s
ORDER BY value
LIMIT 5)""")
if len(value) > 2:
sub_queries.append(f"""(SELECT type, value
FROM {TABLE}
WHERE project_id = %(project_id)s
AND type= '{e}'
AND value ILIKE %(value)s
ORDER BY value
LIMIT 5)""")
with ch_client.ClickHouseClient() as cur:
query = " UNION DISTINCT ".join(sub_queries) + ";"
params = {"project_id": project_id, "value": helper.string_to_sql_like(value),
"svalue": helper.string_to_sql_like("^" + value)}
results = []
try:
results = cur.execute(query=query, params=params)
except Exception as err:
print("--------- CH AUTOCOMPLETE SEARCH QUERY EXCEPTION -----------")
print(cur.format(query=query, params=params))
print("--------- PARAMS -----------")
print(params)
print("--------- VALUE -----------")
print(value)
print("--------------------")
raise err
return results
def __generic_query(typename, value_length=None):
if value_length is None or value_length > 2:
return f"""(SELECT DISTINCT value, type
FROM {TABLE}
WHERE
project_id = %(project_id)s
AND type='{typename}'
AND value ILIKE %(svalue)s
ORDER BY value
LIMIT 5)
UNION DISTINCT
(SELECT DISTINCT value, type
FROM {TABLE}
WHERE
project_id = %(project_id)s
AND type='{typename}'
AND value ILIKE %(value)s
ORDER BY value
LIMIT 5);"""
return f"""SELECT DISTINCT value, type
FROM {TABLE}
WHERE
project_id = %(project_id)s
AND type='{typename}'
AND value ILIKE %(svalue)s
ORDER BY value
LIMIT 10;"""
def __generic_autocomplete(event: Event):
def f(project_id, value, key=None, source=None):
with ch_client.ClickHouseClient() as cur:
query = __generic_query(event.ui_type, value_length=len(value))
params = {"project_id": project_id, "value": helper.string_to_sql_like(value),
"svalue": helper.string_to_sql_like("^" + value)}
results = cur.execute(query=query, params=params)
return helper.list_to_camel_case(results)
return f
def __generic_autocomplete_metas(typename):
def f(project_id, text):
with ch_client.ClickHouseClient() as cur:
query = __generic_query(typename, value_length=len(text))
params = {"project_id": project_id, "value": helper.string_to_sql_like(text),
"svalue": helper.string_to_sql_like("^" + text)}
results = cur.execute(query=query, params=params)
return results
return f

View file

@ -1,13 +1,59 @@
import json
import schemas
from chalicelib.core import metrics
from chalicelib.core import metrics, metadata
from chalicelib.core import sourcemaps, sessions
from chalicelib.utils import ch_client, metrics_helper
from chalicelib.utils import ch_client, metrics_helper, exp_ch_helper
from chalicelib.utils import pg_client, helper
from chalicelib.utils.TimeUTC import TimeUTC
def _multiple_values(values, value_key="value"):
query_values = {}
if values is not None and isinstance(values, list):
for i in range(len(values)):
k = f"{value_key}_{i}"
query_values[k] = values[i]
return query_values
def __get_sql_operator(op: schemas.SearchEventOperator):
return {
schemas.SearchEventOperator._is: "=",
schemas.SearchEventOperator._is_any: "IN",
schemas.SearchEventOperator._on: "=",
schemas.SearchEventOperator._on_any: "IN",
schemas.SearchEventOperator._is_not: "!=",
schemas.SearchEventOperator._not_on: "!=",
schemas.SearchEventOperator._contains: "ILIKE",
schemas.SearchEventOperator._not_contains: "NOT ILIKE",
schemas.SearchEventOperator._starts_with: "ILIKE",
schemas.SearchEventOperator._ends_with: "ILIKE",
}.get(op, "=")
def _isAny_opreator(op: schemas.SearchEventOperator):
return op in [schemas.SearchEventOperator._on_any, schemas.SearchEventOperator._is_any]
def _isUndefined_operator(op: schemas.SearchEventOperator):
return op in [schemas.SearchEventOperator._is_undefined]
def __is_negation_operator(op: schemas.SearchEventOperator):
return op in [schemas.SearchEventOperator._is_not,
schemas.SearchEventOperator._not_on,
schemas.SearchEventOperator._not_contains]
def _multiple_conditions(condition, values, value_key="value", is_not=False):
query = []
for i in range(len(values)):
k = f"{value_key}_{i}"
query.append(condition.replace(value_key, k))
return "(" + (" AND " if is_not else " OR ").join(query) + ")"
def get(error_id, family=False):
if family:
return get_batch([error_id])
@ -263,10 +309,7 @@ def get_details(project_id, error_id, user_id, **data):
parent_error_id,session_id, user_anonymous_id,
user_id, user_uuid, user_browser, user_browser_version,
user_os, user_os_version, user_device, payload,
COALESCE((SELECT TRUE
FROM public.user_favorite_errors AS fe
WHERE pe.error_id = fe.error_id
AND fe.user_id = %(userId)s), FALSE) AS favorite,
FALSE AS favorite,
True AS viewed
FROM public.errors AS pe
INNER JOIN events.errors AS ee USING (error_id)
@ -420,8 +463,10 @@ def get_details_chart(project_id, error_id, user_id, **data):
def __get_basic_constraints(platform=None, time_constraint=True, startTime_arg_name="startDate",
endTime_arg_name="endDate"):
endTime_arg_name="endDate", type_condition=True):
ch_sub_query = ["project_id =toUInt32(%(project_id)s)"]
if type_condition:
ch_sub_query.append("event_type='ERROR'")
if time_constraint:
ch_sub_query += [f"datetime >= toDateTime(%({startTime_arg_name})s/1000)",
f"datetime < toDateTime(%({endTime_arg_name})s/1000)"]
@ -465,214 +510,213 @@ def __get_basic_constraints_pg(platform=None, time_constraint=True, startTime_ar
return ch_sub_query
def search(data: schemas.SearchErrorsSchema, project_id, user_id, flows=False):
empty_response = {'total': 0,
'errors': []
}
def search(data: schemas.SearchErrorsSchema, project_id, user_id):
MAIN_EVENTS_TABLE = exp_ch_helper.get_main_events_table(data.startDate)
MAIN_SESSIONS_TABLE = exp_ch_helper.get_main_sessions_table(data.startDate)
platform = None
for f in data.filters:
if f.type == schemas.FilterType.platform and len(f.value) > 0:
platform = f.value[0]
pg_sub_query = __get_basic_constraints_pg(platform, project_key="sessions.project_id")
pg_sub_query += ["sessions.start_ts>=%(startDate)s", "sessions.start_ts<%(endDate)s", "source ='js_exception'",
"pe.project_id=%(project_id)s"]
# To ignore Script error
pg_sub_query.append("pe.message!='Script error.'")
pg_sub_query_chart = __get_basic_constraints_pg(platform, time_constraint=False, chart=True, project_key=None)
# pg_sub_query_chart.append("source ='js_exception'")
pg_sub_query_chart.append("errors.error_id =details.error_id")
statuses = []
error_ids = None
if data.startDate is None:
data.startDate = TimeUTC.now(-30)
if data.endDate is None:
data.endDate = TimeUTC.now(1)
if len(data.events) > 0 or len(data.filters) > 0:
print("-- searching for sessions before errors")
# if favorite_only=True search for sessions associated with favorite_error
statuses = sessions.search2_pg(data=data, project_id=project_id, user_id=user_id, errors_only=True,
error_status=data.status)
if len(statuses) == 0:
return empty_response
error_ids = [e["errorId"] for e in statuses]
with pg_client.PostgresClient() as cur:
if data.startDate is None:
data.startDate = TimeUTC.now(-7)
if data.endDate is None:
data.endDate = TimeUTC.now()
step_size = metrics_helper.__get_step_size(data.startDate, data.endDate, data.density, factor=1)
sort = __get_sort_key('datetime')
if data.sort is not None:
sort = __get_sort_key(data.sort)
order = "DESC"
if data.order is not None:
order = data.order
extra_join = ""
params = {
"startDate": data.startDate,
"endDate": data.endDate,
"project_id": project_id,
"userId": user_id,
"step_size": step_size}
if data.status != schemas.ErrorStatus.all:
pg_sub_query.append("status = %(error_status)s")
params["error_status"] = data.status
if data.limit is not None and data.page is not None:
params["errors_offset"] = (data.page - 1) * data.limit
params["errors_limit"] = data.limit
else:
params["errors_offset"] = 0
params["errors_limit"] = 200
if error_ids is not None:
params["error_ids"] = tuple(error_ids)
pg_sub_query.append("error_id IN %(error_ids)s")
if data.bookmarked:
pg_sub_query.append("ufe.user_id = %(userId)s")
extra_join += " INNER JOIN public.user_favorite_errors AS ufe USING (error_id)"
if data.query is not None and len(data.query) > 0:
pg_sub_query.append("(pe.name ILIKE %(error_query)s OR pe.message ILIKE %(error_query)s)")
params["error_query"] = helper.values_for_operator(value=data.query,
op=schemas.SearchEventOperator._contains)
main_pg_query = f"""SELECT full_count,
error_id,
name,
message,
users,
sessions,
last_occurrence,
first_occurrence,
chart
FROM (SELECT COUNT(details) OVER () AS full_count, details.*
FROM (SELECT error_id,
name,
message,
COUNT(DISTINCT user_uuid) AS users,
COUNT(DISTINCT session_id) AS sessions,
MAX(timestamp) AS max_datetime,
MIN(timestamp) AS min_datetime
FROM events.errors
INNER JOIN public.errors AS pe USING (error_id)
INNER JOIN public.sessions USING (session_id)
{extra_join}
WHERE {" AND ".join(pg_sub_query)}
GROUP BY error_id, name, message
ORDER BY {sort} {order}) AS details
LIMIT %(errors_limit)s OFFSET %(errors_offset)s
) AS details
INNER JOIN LATERAL (SELECT MAX(timestamp) AS last_occurrence,
MIN(timestamp) AS first_occurrence
FROM events.errors
WHERE errors.error_id = details.error_id) AS time_details ON (TRUE)
INNER JOIN LATERAL (SELECT jsonb_agg(chart_details) AS chart
FROM (SELECT generated_timestamp AS timestamp,
COUNT(session_id) AS count
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp
LEFT JOIN LATERAL (SELECT DISTINCT session_id
FROM events.errors
WHERE {" AND ".join(pg_sub_query_chart)}
) AS sessions ON (TRUE)
GROUP BY timestamp
ORDER BY timestamp) AS chart_details) AS chart_details ON (TRUE);"""
# print("--------------------")
# print(cur.mogrify(main_pg_query, params))
# print("--------------------")
cur.execute(cur.mogrify(main_pg_query, params))
rows = cur.fetchall()
total = 0 if len(rows) == 0 else rows[0]["full_count"]
if flows:
return {"count": total}
if total == 0:
rows = []
else:
if len(statuses) == 0:
query = cur.mogrify(
"""SELECT error_id, status, parent_error_id, payload,
COALESCE((SELECT TRUE
FROM public.user_favorite_errors AS fe
WHERE errors.error_id = fe.error_id
AND fe.user_id = %(user_id)s LIMIT 1), FALSE) AS favorite,
COALESCE((SELECT TRUE
FROM public.user_viewed_errors AS ve
WHERE errors.error_id = ve.error_id
AND ve.user_id = %(user_id)s LIMIT 1), FALSE) AS viewed
FROM public.errors
WHERE project_id = %(project_id)s AND error_id IN %(error_ids)s;""",
{"project_id": project_id, "error_ids": tuple([r["error_id"] for r in rows]),
"user_id": user_id})
cur.execute(query=query)
statuses = helper.list_to_camel_case(cur.fetchall())
statuses = {
s["errorId"]: s for s in statuses
}
for r in rows:
r.pop("full_count")
if r["error_id"] in statuses:
r["status"] = statuses[r["error_id"]]["status"]
r["parent_error_id"] = statuses[r["error_id"]]["parentErrorId"]
r["favorite"] = statuses[r["error_id"]]["favorite"]
r["viewed"] = statuses[r["error_id"]]["viewed"]
r["stack"] = format_first_stack_frame(statuses[r["error_id"]])["stack"]
else:
r["status"] = "untracked"
r["parent_error_id"] = None
r["favorite"] = False
r["viewed"] = False
r["stack"] = None
offset = len(rows)
rows = [r for r in rows if r["stack"] is None
or (len(r["stack"]) == 0 or len(r["stack"]) > 1
or len(r["stack"]) > 0
and (r["message"].lower() != "script error." or len(r["stack"][0]["absPath"]) > 0))]
offset -= len(rows)
return {
'total': total - offset,
'errors': helper.list_to_camel_case(rows)
}
# refactor this function after clickhouse structure changes (missing search by query)
def search_deprecated(data: schemas.SearchErrorsSchema, project_id, user_id, flows=False):
empty_response = {"data": {
'total': 0,
'errors': []
}}
platform = None
for f in data.filters:
if f.type == schemas.FilterType.platform and len(f.value) > 0:
platform = f.value[0]
ch_sub_query = __get_basic_constraints(platform)
ch_sessions_sub_query = __get_basic_constraints(platform, type_condition=False)
ch_sub_query = __get_basic_constraints(platform, type_condition=True)
ch_sub_query.append("source ='js_exception'")
# To ignore Script error
ch_sub_query.append("message!='Script error.'")
statuses = []
error_ids = None
# Clickhouse keeps data for the past month only, so no need to search beyond that
if data.startDate is None or data.startDate < TimeUTC.now(delta_days=-31):
data.startDate = TimeUTC.now(-30)
if data.startDate is None:
data.startDate = TimeUTC.now(-7)
if data.endDate is None:
data.endDate = TimeUTC.now(1)
if len(data.events) > 0 or len(data.filters) > 0 or data.status != schemas.ErrorStatus.all:
print("-- searching for sessions before errors")
# if favorite_only=True search for sessions associated with favorite_error
statuses = sessions.search2_pg(data=data, project_id=project_id, user_id=user_id, errors_only=True,
error_status=data.status)
if len(statuses) == 0:
return empty_response
error_ids = [e["errorId"] for e in statuses]
with ch_client.ClickHouseClient() as ch, pg_client.PostgresClient() as cur:
if data.startDate is None:
data.startDate = TimeUTC.now(-7)
if data.endDate is None:
data.endDate = TimeUTC.now()
subquery_part = ""
params = {}
if len(data.events) > 0:
errors_condition_count = 0
for i, e in enumerate(data.events):
if e.type == schemas.EventType.error:
errors_condition_count += 1
is_any = _isAny_opreator(e.operator)
op = __get_sql_operator(e.operator)
e_k = f"e_value{i}"
params = {**params, **_multiple_values(e.value, value_key=e_k)}
if not is_any and len(e.value) > 0 and e.value[1] not in [None, "*", ""]:
ch_sub_query.append(
_multiple_conditions(f"(message {op} %({e_k})s OR name {op} %({e_k})s)",
e.value, value_key=e_k))
if len(data.events) > errors_condition_count:
subquery_part_args, subquery_part = sessions.search_query_parts_ch(data=data, error_status=data.status,
errors_only=True,
project_id=project_id, user_id=user_id,
issue=None,
favorite_only=False)
subquery_part = f"INNER JOIN {subquery_part} USING(session_id)"
params = {**params, **subquery_part_args}
if len(data.filters) > 0:
meta_keys = None
# to reduce include a sub-query of sessions inside events query, in order to reduce the selected data
for i, f in enumerate(data.filters):
if not isinstance(f.value, list):
f.value = [f.value]
filter_type = f.type
f.value = helper.values_for_operator(value=f.value, op=f.operator)
f_k = f"f_value{i}"
params = {**params, f_k: f.value, **_multiple_values(f.value, value_key=f_k)}
op = __get_sql_operator(f.operator) \
if filter_type not in [schemas.FilterType.events_count] else f.operator
is_any = _isAny_opreator(f.operator)
is_undefined = _isUndefined_operator(f.operator)
if not is_any and not is_undefined and len(f.value) == 0:
continue
is_not = False
if __is_negation_operator(f.operator):
is_not = True
if filter_type == schemas.FilterType.user_browser:
if is_any:
ch_sessions_sub_query.append('isNotNull(s.user_browser)')
else:
ch_sessions_sub_query.append(
_multiple_conditions(f's.user_browser {op} %({f_k})s', f.value, is_not=is_not,
value_key=f_k))
elif filter_type in [schemas.FilterType.user_os, schemas.FilterType.user_os_ios]:
if is_any:
ch_sessions_sub_query.append('isNotNull(s.user_os)')
else:
ch_sessions_sub_query.append(
_multiple_conditions(f's.user_os {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
elif filter_type in [schemas.FilterType.user_device, schemas.FilterType.user_device_ios]:
if is_any:
ch_sessions_sub_query.append('isNotNull(s.user_device)')
else:
ch_sessions_sub_query.append(
_multiple_conditions(f's.user_device {op} %({f_k})s', f.value, is_not=is_not,
value_key=f_k))
elif filter_type in [schemas.FilterType.user_country, schemas.FilterType.user_country_ios]:
if is_any:
ch_sessions_sub_query.append('isNotNull(s.user_country)')
else:
ch_sessions_sub_query.append(
_multiple_conditions(f's.user_country {op} %({f_k})s', f.value, is_not=is_not,
value_key=f_k))
elif filter_type in [schemas.FilterType.utm_source]:
if is_any:
ch_sessions_sub_query.append('isNotNull(s.utm_source)')
elif is_undefined:
ch_sessions_sub_query.append('isNull(s.utm_source)')
else:
ch_sessions_sub_query.append(
_multiple_conditions(f's.utm_source {op} toString(%({f_k})s)', f.value, is_not=is_not,
value_key=f_k))
elif filter_type in [schemas.FilterType.utm_medium]:
if is_any:
ch_sessions_sub_query.append('isNotNull(s.utm_medium)')
elif is_undefined:
ch_sessions_sub_query.append('isNull(s.utm_medium)')
else:
ch_sessions_sub_query.append(
_multiple_conditions(f's.utm_medium {op} toString(%({f_k})s)', f.value, is_not=is_not,
value_key=f_k))
elif filter_type in [schemas.FilterType.utm_campaign]:
if is_any:
ch_sessions_sub_query.append('isNotNull(s.utm_campaign)')
elif is_undefined:
ch_sessions_sub_query.append('isNull(s.utm_campaign)')
else:
ch_sessions_sub_query.append(
_multiple_conditions(f's.utm_campaign {op} toString(%({f_k})s)', f.value, is_not=is_not,
value_key=f_k))
elif filter_type == schemas.FilterType.duration:
if len(f.value) > 0 and f.value[0] is not None:
ch_sessions_sub_query.append("s.duration >= %(minDuration)s")
params["minDuration"] = f.value[0]
if len(f.value) > 1 and f.value[1] is not None and int(f.value[1]) > 0:
ch_sessions_sub_query.append("s.duration <= %(maxDuration)s")
params["maxDuration"] = f.value[1]
elif filter_type == schemas.FilterType.referrer:
# extra_from += f"INNER JOIN {events.event_type.LOCATION.table} AS p USING(session_id)"
if is_any:
referrer_constraint = 'isNotNull(s.base_referrer)'
else:
referrer_constraint = _multiple_conditions(f"s.base_referrer {op} %({f_k})s", f.value,
is_not=is_not, value_key=f_k)
elif filter_type == schemas.FilterType.metadata:
# get metadata list only if you need it
if meta_keys is None:
meta_keys = metadata.get(project_id=project_id)
meta_keys = {m["key"]: m["index"] for m in meta_keys}
if f.source in meta_keys.keys():
if is_any:
ch_sessions_sub_query.append(f"isNotNull(s.{metadata.index_to_colname(meta_keys[f.source])})")
elif is_undefined:
ch_sessions_sub_query.append(f"isNull(s.{metadata.index_to_colname(meta_keys[f.source])})")
else:
ch_sessions_sub_query.append(
_multiple_conditions(
f"s.{metadata.index_to_colname(meta_keys[f.source])} {op} toString(%({f_k})s)",
f.value, is_not=is_not, value_key=f_k))
elif filter_type in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
if is_any:
ch_sessions_sub_query.append('isNotNull(s.user_id)')
elif is_undefined:
ch_sessions_sub_query.append('isNull(s.user_id)')
else:
ch_sessions_sub_query.append(
_multiple_conditions(f"s.user_id {op} toString(%({f_k})s)", f.value, is_not=is_not,
value_key=f_k))
elif filter_type in [schemas.FilterType.user_anonymous_id,
schemas.FilterType.user_anonymous_id_ios]:
if is_any:
ch_sessions_sub_query.append('isNotNull(s.user_anonymous_id)')
elif is_undefined:
ch_sessions_sub_query.append('isNull(s.user_anonymous_id)')
else:
ch_sessions_sub_query.append(
_multiple_conditions(f"s.user_anonymous_id {op} toString(%({f_k})s)", f.value,
is_not=is_not,
value_key=f_k))
elif filter_type in [schemas.FilterType.rev_id, schemas.FilterType.rev_id_ios]:
if is_any:
ch_sessions_sub_query.append('isNotNull(s.rev_id)')
elif is_undefined:
ch_sessions_sub_query.append('isNull(s.rev_id)')
else:
ch_sessions_sub_query.append(
_multiple_conditions(f"s.rev_id {op} toString(%({f_k})s)", f.value, is_not=is_not,
value_key=f_k))
elif filter_type == schemas.FilterType.platform:
# op = __get_sql_operator(f.operator)
ch_sessions_sub_query.append(
_multiple_conditions(f"s.user_device_type {op} %({f_k})s", f.value, is_not=is_not,
value_key=f_k))
# elif filter_type == schemas.FilterType.issue:
# if is_any:
# ch_sessions_sub_query.append("notEmpty(s.issue_types)")
# else:
# ch_sessions_sub_query.append(f"hasAny(s.issue_types,%({f_k})s)")
# # _multiple_conditions(f"%({f_k})s {op} ANY (s.issue_types)", f.value, is_not=is_not,
# # value_key=f_k))
#
# if is_not:
# extra_constraints[-1] = f"not({extra_constraints[-1]})"
# ss_constraints[-1] = f"not({ss_constraints[-1]})"
elif filter_type == schemas.FilterType.events_count:
ch_sessions_sub_query.append(
_multiple_conditions(f"s.events_count {op} %({f_k})s", f.value, is_not=is_not,
value_key=f_k))
with ch_client.ClickHouseClient() as ch:
step_size = __get_step_size(data.startDate, data.endDate, data.density)
sort = __get_sort_key('datetime')
if data.sort is not None:
@ -681,6 +725,7 @@ def search_deprecated(data: schemas.SearchErrorsSchema, project_id, user_id, flo
if data.order is not None:
order = data.order
params = {
**params,
"startDate": data.startDate,
"endDate": data.endDate,
"project_id": project_id,
@ -692,118 +737,82 @@ def search_deprecated(data: schemas.SearchErrorsSchema, project_id, user_id, flo
else:
params["errors_offset"] = 0
params["errors_limit"] = 200
if data.bookmarked:
cur.execute(cur.mogrify(f"""SELECT error_id
FROM public.user_favorite_errors
WHERE user_id = %(userId)s
{"" if error_ids is None else "AND error_id IN %(error_ids)s"}""",
{"userId": user_id, "error_ids": tuple(error_ids or [])}))
error_ids = cur.fetchall()
if len(error_ids) == 0:
return empty_response
error_ids = [e["error_id"] for e in error_ids]
# if data.bookmarked:
# cur.execute(cur.mogrify(f"""SELECT error_id
# FROM public.user_favorite_errors
# WHERE user_id = %(userId)s
# {"" if error_ids is None else "AND error_id IN %(error_ids)s"}""",
# {"userId": user_id, "error_ids": tuple(error_ids or [])}))
# error_ids = cur.fetchall()
# if len(error_ids) == 0:
# return empty_response
# error_ids = [e["error_id"] for e in error_ids]
if error_ids is not None:
params["error_ids"] = tuple(error_ids)
ch_sub_query.append("error_id IN %(error_ids)s")
main_ch_query = f"""\
SELECT COUNT(DISTINCT error_id) AS count
FROM errors
WHERE {" AND ".join(ch_sub_query)};"""
# print("------------")
# print(ch.client().substitute_params(main_ch_query, params))
# print("------------")
total = ch.execute(query=main_ch_query, params=params)[0]["count"]
if flows:
return {"data": {"count": total}}
if total == 0:
rows = []
else:
main_ch_query = f"""\
SELECT details.error_id AS error_id, name, message, users, sessions, last_occurrence, first_occurrence, chart
FROM (SELECT error_id,
name,
message,
COUNT(DISTINCT user_uuid) AS users,
COUNT(DISTINCT session_id) AS sessions,
MAX(datetime) AS max_datetime,
MIN(datetime) AS min_datetime
FROM errors
WHERE {" AND ".join(ch_sub_query)}
GROUP BY error_id, name, message
ORDER BY {sort} {order}
LIMIT %(errors_limit)s OFFSET %(errors_offset)s) AS details
INNER JOIN (SELECT error_id AS error_id, toUnixTimestamp(MAX(datetime))*1000 AS last_occurrence, toUnixTimestamp(MIN(datetime))*1000 AS first_occurrence
FROM errors
GROUP BY error_id) AS time_details
ON details.error_id=time_details.error_id
INNER JOIN (SELECT error_id, groupArray([timestamp, count]) AS chart
FROM (SELECT error_id, toUnixTimestamp(toStartOfInterval(datetime, INTERVAL %(step_size)s second)) * 1000 AS timestamp,
COUNT(DISTINCT session_id) AS count
FROM errors
WHERE {" AND ".join(ch_sub_query)}
GROUP BY error_id, timestamp
ORDER BY timestamp) AS sub_table
GROUP BY error_id) AS chart_details ON details.error_id=chart_details.error_id;"""
SELECT details.error_id AS error_id,
name, message, users, total, viewed,
sessions, last_occurrence, first_occurrence, chart
FROM (SELECT error_id,
name,
message,
COUNT(DISTINCT user_id) AS users,
COUNT(DISTINCT events.session_id) AS sessions,
MAX(datetime) AS max_datetime,
MIN(datetime) AS min_datetime,
COUNT(DISTINCT events.error_id) OVER() AS total,
any(isNotNull(viewed_error_id)) AS viewed
FROM {MAIN_EVENTS_TABLE} AS events
LEFT JOIN (SELECT error_id AS viewed_error_id
FROM {exp_ch_helper.get_user_viewed_errors_table()}
WHERE project_id=%(project_id)s
AND user_id=%(userId)s) AS viewed_errors ON(events.error_id=viewed_errors.viewed_error_id)
INNER JOIN (SELECT session_id, coalesce(user_id,toString(user_uuid)) AS user_id
FROM {MAIN_SESSIONS_TABLE} AS s
{subquery_part}
WHERE {" AND ".join(ch_sessions_sub_query)}) AS sessions
ON (events.session_id = sessions.session_id)
WHERE {" AND ".join(ch_sub_query)}
GROUP BY error_id, name, message
ORDER BY {sort} {order}
LIMIT %(errors_limit)s OFFSET %(errors_offset)s) AS details
INNER JOIN (SELECT error_id AS error_id,
toUnixTimestamp(MAX(datetime))*1000 AS last_occurrence,
toUnixTimestamp(MIN(datetime))*1000 AS first_occurrence
FROM {MAIN_EVENTS_TABLE}
WHERE project_id=%(project_id)s
AND event_type='ERROR'
GROUP BY error_id) AS time_details
ON details.error_id=time_details.error_id
INNER JOIN (SELECT error_id, groupArray([timestamp, count]) AS chart
FROM (SELECT error_id, toUnixTimestamp(toStartOfInterval(datetime, INTERVAL %(step_size)s second)) * 1000 AS timestamp,
COUNT(DISTINCT session_id) AS count
FROM {MAIN_EVENTS_TABLE}
WHERE {" AND ".join(ch_sub_query)}
GROUP BY error_id, timestamp
ORDER BY timestamp) AS sub_table
GROUP BY error_id) AS chart_details ON details.error_id=chart_details.error_id;"""
# print("------------")
# print(ch.client().substitute_params(main_ch_query, params))
# print("------------")
# print("------------")
# print(ch.format(main_ch_query, params))
# print("------------")
rows = ch.execute(query=main_ch_query, params=params)
if len(statuses) == 0:
query = cur.mogrify(
"""SELECT error_id, status, parent_error_id, payload,
COALESCE((SELECT TRUE
FROM public.user_favorite_errors AS fe
WHERE errors.error_id = fe.error_id
AND fe.user_id = %(userId)s LIMIT 1), FALSE) AS favorite,
COALESCE((SELECT TRUE
FROM public.user_viewed_errors AS ve
WHERE errors.error_id = ve.error_id
AND ve.user_id = %(userId)s LIMIT 1), FALSE) AS viewed
FROM public.errors
WHERE project_id = %(project_id)s AND error_id IN %(error_ids)s;""",
{"project_id": project_id, "error_ids": tuple([r["error_id"] for r in rows]),
"userId": user_id})
cur.execute(query=query)
statuses = helper.list_to_camel_case(cur.fetchall())
statuses = {
s["errorId"]: s for s in statuses
}
rows = ch.execute(query=main_ch_query, params=params)
total = rows[0]["total"] if len(rows) > 0 else 0
for r in rows:
if r["error_id"] in statuses:
r["status"] = statuses[r["error_id"]]["status"]
r["parent_error_id"] = statuses[r["error_id"]]["parentErrorId"]
r["favorite"] = statuses[r["error_id"]]["favorite"]
r["viewed"] = statuses[r["error_id"]]["viewed"]
r["stack"] = format_first_stack_frame(statuses[r["error_id"]])["stack"]
else:
r["status"] = "untracked"
r["parent_error_id"] = None
r["favorite"] = False
r["viewed"] = False
r["stack"] = None
r["chart"] = list(r["chart"])
for i in range(len(r["chart"])):
r["chart"][i] = {"timestamp": r["chart"][i][0], "count": r["chart"][i][1]}
r["chart"] = metrics.__complete_missing_steps(rows=r["chart"], start_time=data.startDate,
end_time=data.endDate,
density=data.density, neutral={"count": 0})
offset = len(rows)
rows = [r for r in rows if r["stack"] is None
or (len(r["stack"]) == 0 or len(r["stack"]) > 1
or len(r["stack"]) > 0
and (r["message"].lower() != "script error." or len(r["stack"][0]["absPath"]) > 0))]
offset -= len(rows)
return {
"data": {
'total': total - offset,
'errors': helper.list_to_camel_case(rows)
}
'total': total,
'errors': helper.list_to_camel_case(rows)
}

View file

@ -0,0 +1,39 @@
from chalicelib.utils import pg_client
from chalicelib.core import errors_viewed_exp
def add_viewed_error(project_id, user_id, error_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify("""INSERT INTO public.user_viewed_errors(user_id, error_id)
VALUES (%(userId)s,%(error_id)s);""",
{"userId": user_id, "error_id": error_id})
)
errors_viewed_exp.add_viewed_error(project_id=project_id, user_id=user_id, error_id=error_id)
def viewed_error_exists(user_id, error_id):
with pg_client.PostgresClient() as cur:
query = cur.mogrify(
"""SELECT
errors.error_id AS hydrated,
COALESCE((SELECT TRUE
FROM public.user_viewed_errors AS ve
WHERE ve.error_id = %(error_id)s
AND ve.user_id = %(userId)s LIMIT 1), FALSE) AS viewed
FROM public.errors
WHERE error_id = %(error_id)s""",
{"userId": user_id, "error_id": error_id})
cur.execute(
query=query
)
r = cur.fetchone()
if r:
return r.get("viewed")
return True
def viewed_error(project_id, user_id, error_id):
if viewed_error_exists(user_id=user_id, error_id=error_id):
return None
return add_viewed_error(project_id=project_id, user_id=user_id, error_id=error_id)

Some files were not shown because too many files have changed in this diff Show more