* feat(chalice): autocomplete return top 10 with stats

* fix(chalice): fixed autocomplete top 10 meta-filters

* refactor(chalice): pg client helper handles closed connexion

* refactor(chalice): upgraded dependencies
refactor(chalice): restricted get error's details
This commit is contained in:
Kraiem Taha Yassine 2024-12-23 17:19:43 +01:00 committed by GitHub
parent 763aed14a1
commit 4c4e1b6580
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
15 changed files with 558 additions and 549 deletions

View file

@ -4,22 +4,21 @@ verify_ssl = true
name = "pypi"
[packages]
sqlparse = "==0.5.2"
urllib3 = "==2.2.3"
requests = "==2.32.3"
boto3 = "==1.35.76"
boto3 = "==1.35.86"
pyjwt = "==2.10.1"
psycopg2-binary = "==2.9.10"
psycopg = {extras = ["binary", "pool"], version = "==3.2.3"}
psycopg = {extras = ["pool", "binary"], version = "==3.2.3"}
clickhouse-driver = {extras = ["lz4"], version = "==0.2.9"}
clickhouse-connect = "==0.8.9"
elasticsearch = "==8.16.0"
clickhouse-connect = "==0.8.11"
elasticsearch = "==8.17.0"
jira = "==3.8.0"
cachetools = "==5.5.0"
fastapi = "==0.115.6"
uvicorn = {extras = ["standard"], version = "==0.32.1"}
uvicorn = {extras = ["standard"], version = "==0.34.0"}
python-decouple = "==3.8"
pydantic = {extras = ["email"], version = "==2.10.3"}
pydantic = {extras = ["email"], version = "==2.10.4"}
apscheduler = "==3.11.0"
redis = "==5.2.1"

View file

@ -3,7 +3,6 @@ import json
import schemas
from chalicelib.core import sourcemaps
from chalicelib.core.errors.modules import sessions
from chalicelib.utils import errors_helper
from chalicelib.utils import pg_client, helper
from chalicelib.utils.TimeUTC import TimeUTC
from chalicelib.utils.metrics_helper import __get_step_size
@ -13,9 +12,7 @@ def get(error_id, family=False):
if family:
return get_batch([error_id])
with pg_client.PostgresClient() as cur:
# trying: return only 1 error, without event details
query = cur.mogrify(
# "SELECT * FROM events.errors AS e INNER JOIN public.errors AS re USING(error_id) WHERE error_id = %(error_id)s;",
"SELECT * FROM public.errors WHERE error_id = %(error_id)s LIMIT 1;",
{"error_id": error_id})
cur.execute(query=query)
@ -50,252 +47,6 @@ def get_batch(error_ids):
return helper.list_to_camel_case(errors)
def __flatten_sort_key_count_version(data, merge_nested=False):
if data is None:
return []
return sorted(
[
{
"name": f'{o["name"]}@{v["version"]}',
"count": v["count"]
} for o in data for v in o["partition"]
],
key=lambda o: o["count"], reverse=True) if merge_nested else \
[
{
"name": o["name"],
"count": o["count"],
} for o in data
]
def __process_tags(row):
return [
{"name": "browser", "partitions": __flatten_sort_key_count_version(data=row.get("browsers_partition"))},
{"name": "browser.ver",
"partitions": __flatten_sort_key_count_version(data=row.pop("browsers_partition"), merge_nested=True)},
{"name": "OS", "partitions": __flatten_sort_key_count_version(data=row.get("os_partition"))},
{"name": "OS.ver",
"partitions": __flatten_sort_key_count_version(data=row.pop("os_partition"), merge_nested=True)},
{"name": "device.family", "partitions": __flatten_sort_key_count_version(data=row.get("device_partition"))},
{"name": "device",
"partitions": __flatten_sort_key_count_version(data=row.pop("device_partition"), merge_nested=True)},
{"name": "country", "partitions": row.pop("country_partition")}
]
def get_details(project_id, error_id, user_id, **data):
pg_sub_query24 = __get_basic_constraints(time_constraint=False, chart=True, step_size_name="step_size24")
pg_sub_query24.append("error_id = %(error_id)s")
pg_sub_query30_session = __get_basic_constraints(time_constraint=True, chart=False,
startTime_arg_name="startDate30",
endTime_arg_name="endDate30", project_key="sessions.project_id")
pg_sub_query30_session.append("sessions.start_ts >= %(startDate30)s")
pg_sub_query30_session.append("sessions.start_ts <= %(endDate30)s")
pg_sub_query30_session.append("error_id = %(error_id)s")
pg_sub_query30_err = __get_basic_constraints(time_constraint=True, chart=False, startTime_arg_name="startDate30",
endTime_arg_name="endDate30", project_key="errors.project_id")
pg_sub_query30_err.append("sessions.project_id = %(project_id)s")
pg_sub_query30_err.append("sessions.start_ts >= %(startDate30)s")
pg_sub_query30_err.append("sessions.start_ts <= %(endDate30)s")
pg_sub_query30_err.append("error_id = %(error_id)s")
pg_sub_query30_err.append("source ='js_exception'")
pg_sub_query30 = __get_basic_constraints(time_constraint=False, chart=True, step_size_name="step_size30")
pg_sub_query30.append("error_id = %(error_id)s")
pg_basic_query = __get_basic_constraints(time_constraint=False)
pg_basic_query.append("error_id = %(error_id)s")
with pg_client.PostgresClient() as cur:
data["startDate24"] = TimeUTC.now(-1)
data["endDate24"] = TimeUTC.now()
data["startDate30"] = TimeUTC.now(-30)
data["endDate30"] = TimeUTC.now()
density24 = int(data.get("density24", 24))
step_size24 = __get_step_size(data["startDate24"], data["endDate24"], density24, factor=1)
density30 = int(data.get("density30", 30))
step_size30 = __get_step_size(data["startDate30"], data["endDate30"], density30, factor=1)
params = {
"startDate24": data['startDate24'],
"endDate24": data['endDate24'],
"startDate30": data['startDate30'],
"endDate30": data['endDate30'],
"project_id": project_id,
"userId": user_id,
"step_size24": step_size24,
"step_size30": step_size30,
"error_id": error_id}
main_pg_query = f"""\
SELECT error_id,
name,
message,
users,
sessions,
last_occurrence,
first_occurrence,
last_session_id,
browsers_partition,
os_partition,
device_partition,
country_partition,
chart24,
chart30,
custom_tags
FROM (SELECT error_id,
name,
message,
COUNT(DISTINCT user_id) AS users,
COUNT(DISTINCT session_id) AS sessions
FROM public.errors
INNER JOIN events.errors AS s_errors USING (error_id)
INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_err)}
GROUP BY error_id, name, message) AS details
INNER JOIN (SELECT MAX(timestamp) AS last_occurrence,
MIN(timestamp) AS first_occurrence
FROM events.errors
WHERE error_id = %(error_id)s) AS time_details ON (TRUE)
INNER JOIN (SELECT session_id AS last_session_id,
coalesce(custom_tags, '[]')::jsonb AS custom_tags
FROM events.errors
LEFT JOIN LATERAL (
SELECT jsonb_agg(jsonb_build_object(errors_tags.key, errors_tags.value)) AS custom_tags
FROM errors_tags
WHERE errors_tags.error_id = %(error_id)s
AND errors_tags.session_id = errors.session_id
AND errors_tags.message_id = errors.message_id) AS errors_tags ON (TRUE)
WHERE error_id = %(error_id)s
ORDER BY errors.timestamp DESC
LIMIT 1) AS last_session_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(browser_details) AS browsers_partition
FROM (SELECT *
FROM (SELECT user_browser AS name,
COUNT(session_id) AS count
FROM events.errors
INNER JOIN sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
GROUP BY user_browser
ORDER BY count DESC) AS count_per_browser_query
INNER JOIN LATERAL (SELECT JSONB_AGG(version_details) AS partition
FROM (SELECT user_browser_version AS version,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
AND sessions.user_browser = count_per_browser_query.name
GROUP BY user_browser_version
ORDER BY count DESC) AS version_details
) AS browser_version_details ON (TRUE)) AS browser_details) AS browser_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(os_details) AS os_partition
FROM (SELECT *
FROM (SELECT user_os AS name,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
GROUP BY user_os
ORDER BY count DESC) AS count_per_os_details
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_version_details) AS partition
FROM (SELECT COALESCE(user_os_version,'unknown') AS version, COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
AND sessions.user_os = count_per_os_details.name
GROUP BY user_os_version
ORDER BY count DESC) AS count_per_version_details
GROUP BY count_per_os_details.name ) AS os_version_details
ON (TRUE)) AS os_details) AS os_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(device_details) AS device_partition
FROM (SELECT *
FROM (SELECT user_device_type AS name,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
GROUP BY user_device_type
ORDER BY count DESC) AS count_per_device_details
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_device_v_details) AS partition
FROM (SELECT CASE
WHEN user_device = '' OR user_device ISNULL
THEN 'unknown'
ELSE user_device END AS version,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
AND sessions.user_device_type = count_per_device_details.name
GROUP BY user_device
ORDER BY count DESC) AS count_per_device_v_details
GROUP BY count_per_device_details.name ) AS device_version_details
ON (TRUE)) AS device_details) AS device_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(count_per_country_details) AS country_partition
FROM (SELECT user_country AS name,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
GROUP BY user_country
ORDER BY count DESC) AS count_per_country_details) AS country_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart24
FROM (SELECT generated_timestamp AS timestamp,
COUNT(session_id) AS count
FROM generate_series(%(startDate24)s, %(endDate24)s, %(step_size24)s) AS generated_timestamp
LEFT JOIN LATERAL (SELECT DISTINCT session_id
FROM events.errors
INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query24)}
) AS chart_details ON (TRUE)
GROUP BY generated_timestamp
ORDER BY generated_timestamp) AS chart_details) AS chart_details24 ON (TRUE)
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart30
FROM (SELECT generated_timestamp AS timestamp,
COUNT(session_id) AS count
FROM generate_series(%(startDate30)s, %(endDate30)s, %(step_size30)s) AS generated_timestamp
LEFT JOIN LATERAL (SELECT DISTINCT session_id
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30)}) AS chart_details
ON (TRUE)
GROUP BY timestamp
ORDER BY timestamp) AS chart_details) AS chart_details30 ON (TRUE);
"""
# print("--------------------")
# print(cur.mogrify(main_pg_query, params))
# print("--------------------")
cur.execute(cur.mogrify(main_pg_query, params))
row = cur.fetchone()
if row is None:
return {"errors": ["error not found"]}
row["tags"] = __process_tags(row)
query = cur.mogrify(
f"""SELECT error_id, status, session_id, start_ts,
parent_error_id,session_id, user_anonymous_id,
user_id, user_uuid, user_browser, user_browser_version,
user_os, user_os_version, user_device, payload,
FALSE AS favorite,
True AS viewed
FROM public.errors AS pe
INNER JOIN events.errors AS ee USING (error_id)
INNER JOIN public.sessions USING (session_id)
WHERE pe.project_id = %(project_id)s
AND error_id = %(error_id)s
ORDER BY start_ts DESC
LIMIT 1;""",
{"project_id": project_id, "error_id": error_id, "user_id": user_id})
cur.execute(query=query)
status = cur.fetchone()
if status is not None:
row["stack"] = errors_helper.format_first_stack_frame(status).pop("stack")
row["status"] = status.pop("status")
row["parent_error_id"] = status.pop("parent_error_id")
row["favorite"] = status.pop("favorite")
row["viewed"] = status.pop("viewed")
row["last_hydrated_session"] = status
else:
row["stack"] = []
row["last_hydrated_session"] = None
row["status"] = "untracked"
row["parent_error_id"] = None
row["favorite"] = False
row["viewed"] = False
return {"data": helper.dict_to_camel_case(row)}
def __get_basic_constraints(platform=None, time_constraint=True, startTime_arg_name="startDate",
endTime_arg_name="endDate", chart=False, step_size_name="step_size",
project_key="project_id"):

View file

@ -62,268 +62,6 @@ def get_batch(error_ids):
return errors_legacy.get_batch(error_ids=error_ids)
def __flatten_sort_key_count_version(data, merge_nested=False):
if data is None:
return []
return sorted(
[
{
"name": f"{o[0][0][0]}@{v[0]}",
"count": v[1]
} for o in data for v in o[2]
],
key=lambda o: o["count"], reverse=True) if merge_nested else \
[
{
"name": o[0][0][0],
"count": o[1][0][0],
# "versions": [{"version": v[0], "count": v[1]} for v in o[2]]
} for o in data
]
def __transform_map_to_tag(data, key1, key2, requested_key):
result = []
for i in data:
if requested_key == 0 and i.get(key1) is None and i.get(key2) is None:
result.append({"name": "all", "count": int(i.get("count"))})
elif requested_key == 1 and i.get(key1) is not None and i.get(key2) is None:
result.append({"name": i.get(key1), "count": int(i.get("count"))})
elif requested_key == 2 and i.get(key1) is not None and i.get(key2) is not None:
result.append({"name": i.get(key2), "count": int(i.get("count"))})
return result
def __flatten_sort_key_count(data):
if data is None:
return []
return [
{
"name": o[0][0][0],
"count": o[1][0][0]
} for o in data
]
def __process_tags_map(row):
browsers_partition = row.pop("browsers_partition")
os_partition = row.pop("os_partition")
device_partition = row.pop("device_partition")
country_partition = row.pop("country_partition")
return [
{"name": "browser",
"partitions": __transform_map_to_tag(data=browsers_partition,
key1="browser",
key2="browser_version",
requested_key=1)},
{"name": "browser.ver",
"partitions": __transform_map_to_tag(data=browsers_partition,
key1="browser",
key2="browser_version",
requested_key=2)},
{"name": "OS",
"partitions": __transform_map_to_tag(data=os_partition,
key1="os",
key2="os_version",
requested_key=1)
},
{"name": "OS.ver",
"partitions": __transform_map_to_tag(data=os_partition,
key1="os",
key2="os_version",
requested_key=2)},
{"name": "device.family",
"partitions": __transform_map_to_tag(data=device_partition,
key1="device_type",
key2="device",
requested_key=1)},
{"name": "device",
"partitions": __transform_map_to_tag(data=device_partition,
key1="device_type",
key2="device",
requested_key=2)},
{"name": "country", "partitions": __transform_map_to_tag(data=country_partition,
key1="country",
key2="",
requested_key=1)}
]
def get_details(project_id, error_id, user_id, **data):
MAIN_SESSIONS_TABLE = exp_ch_helper.get_main_sessions_table(0)
MAIN_ERR_SESS_TABLE = exp_ch_helper.get_main_js_errors_sessions_table(0)
MAIN_EVENTS_TABLE = exp_ch_helper.get_main_events_table(0)
ch_sub_query24 = __get_basic_constraints(startTime_arg_name="startDate24", endTime_arg_name="endDate24")
ch_sub_query24.append("error_id = %(error_id)s")
ch_sub_query30 = __get_basic_constraints(startTime_arg_name="startDate30", endTime_arg_name="endDate30",
project_key="errors.project_id")
ch_sub_query30.append("error_id = %(error_id)s")
ch_basic_query = __get_basic_constraints(time_constraint=False)
ch_basic_query.append("error_id = %(error_id)s")
with ch_client.ClickHouseClient() as ch:
data["startDate24"] = TimeUTC.now(-1)
data["endDate24"] = TimeUTC.now()
data["startDate30"] = TimeUTC.now(-30)
data["endDate30"] = TimeUTC.now()
density24 = int(data.get("density24", 24))
step_size24 = __get_step_size(data["startDate24"], data["endDate24"], density24)
density30 = int(data.get("density30", 30))
step_size30 = __get_step_size(data["startDate30"], data["endDate30"], density30)
params = {
"startDate24": data['startDate24'],
"endDate24": data['endDate24'],
"startDate30": data['startDate30'],
"endDate30": data['endDate30'],
"project_id": project_id,
"userId": user_id,
"step_size24": step_size24,
"step_size30": step_size30,
"error_id": error_id}
main_ch_query = f"""\
WITH pre_processed AS (SELECT error_id,
name,
message,
session_id,
datetime,
user_id,
user_browser,
user_browser_version,
user_os,
user_os_version,
user_device_type,
user_device,
user_country,
error_tags_keys,
error_tags_values
FROM {MAIN_ERR_SESS_TABLE} AS errors
WHERE {" AND ".join(ch_basic_query)}
)
SELECT %(error_id)s AS error_id, name, message,users,
first_occurrence,last_occurrence,last_session_id,
sessions,browsers_partition,os_partition,device_partition,
country_partition,chart24,chart30,custom_tags
FROM (SELECT error_id,
name,
message
FROM pre_processed
LIMIT 1) AS details
INNER JOIN (SELECT COUNT(DISTINCT user_id) AS users,
COUNT(DISTINCT session_id) AS sessions
FROM pre_processed
WHERE datetime >= toDateTime(%(startDate30)s / 1000)
AND datetime <= toDateTime(%(endDate30)s / 1000)
) AS last_month_stats ON TRUE
INNER JOIN (SELECT toUnixTimestamp(max(datetime)) * 1000 AS last_occurrence,
toUnixTimestamp(min(datetime)) * 1000 AS first_occurrence
FROM pre_processed) AS time_details ON TRUE
INNER JOIN (SELECT session_id AS last_session_id,
arrayMap((key, value)->(map(key, value)), error_tags_keys, error_tags_values) AS custom_tags
FROM pre_processed
ORDER BY datetime DESC
LIMIT 1) AS last_session_details ON TRUE
INNER JOIN (SELECT groupArray(details) AS browsers_partition
FROM (SELECT COUNT(1) AS count,
coalesce(nullIf(user_browser,''),toNullable('unknown')) AS browser,
coalesce(nullIf(user_browser_version,''),toNullable('unknown')) AS browser_version,
map('browser', browser,
'browser_version', browser_version,
'count', toString(count)) AS details
FROM pre_processed
GROUP BY ROLLUP(browser, browser_version)
ORDER BY browser nulls first, browser_version nulls first, count DESC) AS mapped_browser_details
) AS browser_details ON TRUE
INNER JOIN (SELECT groupArray(details) AS os_partition
FROM (SELECT COUNT(1) AS count,
coalesce(nullIf(user_os,''),toNullable('unknown')) AS os,
coalesce(nullIf(user_os_version,''),toNullable('unknown')) AS os_version,
map('os', os,
'os_version', os_version,
'count', toString(count)) AS details
FROM pre_processed
GROUP BY ROLLUP(os, os_version)
ORDER BY os nulls first, os_version nulls first, count DESC) AS mapped_os_details
) AS os_details ON TRUE
INNER JOIN (SELECT groupArray(details) AS device_partition
FROM (SELECT COUNT(1) AS count,
coalesce(nullIf(user_device,''),toNullable('unknown')) AS user_device,
map('device_type', toString(user_device_type),
'device', user_device,
'count', toString(count)) AS details
FROM pre_processed
GROUP BY ROLLUP(user_device_type, user_device)
ORDER BY user_device_type nulls first, user_device nulls first, count DESC
) AS count_per_device_details
) AS mapped_device_details ON TRUE
INNER JOIN (SELECT groupArray(details) AS country_partition
FROM (SELECT COUNT(1) AS count,
map('country', toString(user_country),
'count', toString(count)) AS details
FROM pre_processed
GROUP BY user_country
ORDER BY count DESC) AS count_per_country_details
) AS mapped_country_details ON TRUE
INNER JOIN (SELECT groupArray(map('timestamp', timestamp, 'count', count)) AS chart24
FROM (SELECT toUnixTimestamp(toStartOfInterval(datetime, INTERVAL 3756 second)) *
1000 AS timestamp,
COUNT(DISTINCT session_id) AS count
FROM {MAIN_EVENTS_TABLE} AS errors
WHERE {" AND ".join(ch_sub_query24)}
GROUP BY timestamp
ORDER BY timestamp) AS chart_details
) AS chart_details24 ON TRUE
INNER JOIN (SELECT groupArray(map('timestamp', timestamp, 'count', count)) AS chart30
FROM (SELECT toUnixTimestamp(toStartOfInterval(datetime, INTERVAL 3724 second)) *
1000 AS timestamp,
COUNT(DISTINCT session_id) AS count
FROM {MAIN_EVENTS_TABLE} AS errors
WHERE {" AND ".join(ch_sub_query30)}
GROUP BY timestamp
ORDER BY timestamp) AS chart_details
) AS chart_details30 ON TRUE;"""
# print("--------------------")
# print(ch.format(main_ch_query, params))
# print("--------------------")
row = ch.execute(query=main_ch_query, parameters=params)
if len(row) == 0:
return {"errors": ["error not found"]}
row = row[0]
row["tags"] = __process_tags_map(row)
query = f"""SELECT session_id, toUnixTimestamp(datetime) * 1000 AS start_ts,
user_anonymous_id,user_id, user_uuid, user_browser, user_browser_version,
user_os, user_os_version, user_device, FALSE AS favorite, True AS viewed
FROM {MAIN_SESSIONS_TABLE} AS sessions
WHERE project_id = toUInt16(%(project_id)s)
AND session_id = %(session_id)s
ORDER BY datetime DESC
LIMIT 1;"""
params = {"project_id": project_id, "session_id": row["last_session_id"], "userId": user_id}
# print("--------------------")
# print(ch.format(query, params))
# print("--------------------")
status = ch.execute(query=query, parameters=params)
if status is not None:
status = status[0]
row["favorite"] = status.pop("favorite")
row["viewed"] = status.pop("viewed")
row["last_hydrated_session"] = status
else:
row["last_hydrated_session"] = None
row["favorite"] = False
row["viewed"] = False
row["chart24"] = metrics.__complete_missing_steps(start_time=data["startDate24"], end_time=data["endDate24"],
density=density24, rows=row["chart24"], neutral={"count": 0})
row["chart30"] = metrics.__complete_missing_steps(start_time=data["startDate30"], end_time=data["endDate30"],
density=density30, rows=row["chart30"], neutral={"count": 0})
return {"data": helper.dict_to_camel_case(row)}
def __get_basic_constraints(platform=None, time_constraint=True, startTime_arg_name="startDate",

View file

@ -0,0 +1,253 @@
from chalicelib.core.errors import errors_legacy as errors
from chalicelib.utils import errors_helper
from chalicelib.utils import pg_client, helper
from chalicelib.utils.TimeUTC import TimeUTC
from chalicelib.utils.metrics_helper import __get_step_size
def __flatten_sort_key_count_version(data, merge_nested=False):
if data is None:
return []
return sorted(
[
{
"name": f'{o["name"]}@{v["version"]}',
"count": v["count"]
} for o in data for v in o["partition"]
],
key=lambda o: o["count"], reverse=True) if merge_nested else \
[
{
"name": o["name"],
"count": o["count"],
} for o in data
]
def __process_tags(row):
return [
{"name": "browser", "partitions": __flatten_sort_key_count_version(data=row.get("browsers_partition"))},
{"name": "browser.ver",
"partitions": __flatten_sort_key_count_version(data=row.pop("browsers_partition"), merge_nested=True)},
{"name": "OS", "partitions": __flatten_sort_key_count_version(data=row.get("os_partition"))},
{"name": "OS.ver",
"partitions": __flatten_sort_key_count_version(data=row.pop("os_partition"), merge_nested=True)},
{"name": "device.family", "partitions": __flatten_sort_key_count_version(data=row.get("device_partition"))},
{"name": "device",
"partitions": __flatten_sort_key_count_version(data=row.pop("device_partition"), merge_nested=True)},
{"name": "country", "partitions": row.pop("country_partition")}
]
def get_details(project_id, error_id, user_id, **data):
pg_sub_query24 = errors.__get_basic_constraints(time_constraint=False, chart=True, step_size_name="step_size24")
pg_sub_query24.append("error_id = %(error_id)s")
pg_sub_query30_session = errors.__get_basic_constraints(time_constraint=True, chart=False,
startTime_arg_name="startDate30",
endTime_arg_name="endDate30",
project_key="sessions.project_id")
pg_sub_query30_session.append("sessions.start_ts >= %(startDate30)s")
pg_sub_query30_session.append("sessions.start_ts <= %(endDate30)s")
pg_sub_query30_session.append("error_id = %(error_id)s")
pg_sub_query30_err = errors.__get_basic_constraints(time_constraint=True, chart=False,
startTime_arg_name="startDate30",
endTime_arg_name="endDate30", project_key="errors.project_id")
pg_sub_query30_err.append("sessions.project_id = %(project_id)s")
pg_sub_query30_err.append("sessions.start_ts >= %(startDate30)s")
pg_sub_query30_err.append("sessions.start_ts <= %(endDate30)s")
pg_sub_query30_err.append("error_id = %(error_id)s")
pg_sub_query30_err.append("source ='js_exception'")
pg_sub_query30 = errors.__get_basic_constraints(time_constraint=False, chart=True, step_size_name="step_size30")
pg_sub_query30.append("error_id = %(error_id)s")
pg_basic_query = errors.__get_basic_constraints(time_constraint=False)
pg_basic_query.append("error_id = %(error_id)s")
with pg_client.PostgresClient() as cur:
data["startDate24"] = TimeUTC.now(-1)
data["endDate24"] = TimeUTC.now()
data["startDate30"] = TimeUTC.now(-30)
data["endDate30"] = TimeUTC.now()
density24 = int(data.get("density24", 24))
step_size24 = __get_step_size(data["startDate24"], data["endDate24"], density24, factor=1)
density30 = int(data.get("density30", 30))
step_size30 = __get_step_size(data["startDate30"], data["endDate30"], density30, factor=1)
params = {
"startDate24": data['startDate24'],
"endDate24": data['endDate24'],
"startDate30": data['startDate30'],
"endDate30": data['endDate30'],
"project_id": project_id,
"userId": user_id,
"step_size24": step_size24,
"step_size30": step_size30,
"error_id": error_id}
main_pg_query = f"""\
SELECT error_id,
name,
message,
users,
sessions,
last_occurrence,
first_occurrence,
last_session_id,
browsers_partition,
os_partition,
device_partition,
country_partition,
chart24,
chart30,
custom_tags
FROM (SELECT error_id,
name,
message,
COUNT(DISTINCT user_id) AS users,
COUNT(DISTINCT session_id) AS sessions
FROM public.errors
INNER JOIN events.errors AS s_errors USING (error_id)
INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_err)}
GROUP BY error_id, name, message) AS details
INNER JOIN (SELECT MAX(timestamp) AS last_occurrence,
MIN(timestamp) AS first_occurrence
FROM events.errors
WHERE error_id = %(error_id)s) AS time_details ON (TRUE)
INNER JOIN (SELECT session_id AS last_session_id,
coalesce(custom_tags, '[]')::jsonb AS custom_tags
FROM events.errors
LEFT JOIN LATERAL (
SELECT jsonb_agg(jsonb_build_object(errors_tags.key, errors_tags.value)) AS custom_tags
FROM errors_tags
WHERE errors_tags.error_id = %(error_id)s
AND errors_tags.session_id = errors.session_id
AND errors_tags.message_id = errors.message_id) AS errors_tags ON (TRUE)
WHERE error_id = %(error_id)s
ORDER BY errors.timestamp DESC
LIMIT 1) AS last_session_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(browser_details) AS browsers_partition
FROM (SELECT *
FROM (SELECT user_browser AS name,
COUNT(session_id) AS count
FROM events.errors
INNER JOIN sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
GROUP BY user_browser
ORDER BY count DESC) AS count_per_browser_query
INNER JOIN LATERAL (SELECT JSONB_AGG(version_details) AS partition
FROM (SELECT user_browser_version AS version,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
AND sessions.user_browser = count_per_browser_query.name
GROUP BY user_browser_version
ORDER BY count DESC) AS version_details
) AS browser_version_details ON (TRUE)) AS browser_details) AS browser_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(os_details) AS os_partition
FROM (SELECT *
FROM (SELECT user_os AS name,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
GROUP BY user_os
ORDER BY count DESC) AS count_per_os_details
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_version_details) AS partition
FROM (SELECT COALESCE(user_os_version,'unknown') AS version, COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
AND sessions.user_os = count_per_os_details.name
GROUP BY user_os_version
ORDER BY count DESC) AS count_per_version_details
GROUP BY count_per_os_details.name ) AS os_version_details
ON (TRUE)) AS os_details) AS os_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(device_details) AS device_partition
FROM (SELECT *
FROM (SELECT user_device_type AS name,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
GROUP BY user_device_type
ORDER BY count DESC) AS count_per_device_details
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_device_v_details) AS partition
FROM (SELECT CASE
WHEN user_device = '' OR user_device ISNULL
THEN 'unknown'
ELSE user_device END AS version,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
AND sessions.user_device_type = count_per_device_details.name
GROUP BY user_device
ORDER BY count DESC) AS count_per_device_v_details
GROUP BY count_per_device_details.name ) AS device_version_details
ON (TRUE)) AS device_details) AS device_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(count_per_country_details) AS country_partition
FROM (SELECT user_country AS name,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
GROUP BY user_country
ORDER BY count DESC) AS count_per_country_details) AS country_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart24
FROM (SELECT generated_timestamp AS timestamp,
COUNT(session_id) AS count
FROM generate_series(%(startDate24)s, %(endDate24)s, %(step_size24)s) AS generated_timestamp
LEFT JOIN LATERAL (SELECT DISTINCT session_id
FROM events.errors
INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query24)}
) AS chart_details ON (TRUE)
GROUP BY generated_timestamp
ORDER BY generated_timestamp) AS chart_details) AS chart_details24 ON (TRUE)
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart30
FROM (SELECT generated_timestamp AS timestamp,
COUNT(session_id) AS count
FROM generate_series(%(startDate30)s, %(endDate30)s, %(step_size30)s) AS generated_timestamp
LEFT JOIN LATERAL (SELECT DISTINCT session_id
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30)}) AS chart_details
ON (TRUE)
GROUP BY timestamp
ORDER BY timestamp) AS chart_details) AS chart_details30 ON (TRUE);
"""
# print("--------------------")
# print(cur.mogrify(main_pg_query, params))
# print("--------------------")
cur.execute(cur.mogrify(main_pg_query, params))
row = cur.fetchone()
if row is None:
return {"errors": ["error not found"]}
row["tags"] = __process_tags(row)
query = cur.mogrify(
f"""SELECT error_id, status, session_id, start_ts,
parent_error_id,session_id, user_anonymous_id,
user_id, user_uuid, user_browser, user_browser_version,
user_os, user_os_version, user_device, payload,
FALSE AS favorite,
True AS viewed
FROM public.errors AS pe
INNER JOIN events.errors AS ee USING (error_id)
INNER JOIN public.sessions USING (session_id)
WHERE pe.project_id = %(project_id)s
AND error_id = %(error_id)s
ORDER BY start_ts DESC
LIMIT 1;""",
{"project_id": project_id, "error_id": error_id, "user_id": user_id})
cur.execute(query=query)
status = cur.fetchone()
if status is not None:
row["stack"] = errors_helper.format_first_stack_frame(status).pop("stack")
row["status"] = status.pop("status")
row["parent_error_id"] = status.pop("parent_error_id")
row["favorite"] = status.pop("favorite")
row["viewed"] = status.pop("viewed")
row["last_hydrated_session"] = status
else:
row["stack"] = []
row["last_hydrated_session"] = None
row["status"] = "untracked"
row["parent_error_id"] = None
row["favorite"] = False
row["viewed"] = False
return {"data": helper.dict_to_camel_case(row)}

View file

@ -1,4 +1,4 @@
from chalicelib.core import sourcemaps
from chalicelib.core.sourcemaps import sourcemaps
def format_first_stack_frame(error):

View file

@ -147,7 +147,12 @@ class PostgresClient:
logger.error(f"!!! Error of type:{type(error)} while executing query:")
logger.error(query)
logger.info("starting rollback to allow future execution")
self.connection.rollback()
try:
self.connection.rollback()
except psycopg2.InterfaceError as e:
logger.error("!!! Error while rollbacking connection", e)
logger.error("!!! Trying to recreate the cursor")
self.recreate_cursor()
raise error
return result

View file

@ -1,19 +1,19 @@
urllib3==2.2.3
requests==2.32.3
boto3==1.35.76
boto3==1.35.86
pyjwt==2.10.1
psycopg2-binary==2.9.10
psycopg[pool,binary]==3.2.3
clickhouse-driver[lz4]==0.2.9
clickhouse-connect==0.8.9
elasticsearch==8.16.0
clickhouse-connect==0.8.11
elasticsearch==8.17.0
jira==3.8.0
cachetools==5.5.0
fastapi==0.115.6
uvicorn[standard]==0.32.1
uvicorn[standard]==0.34.0
python-decouple==3.8
pydantic[email]==2.10.3
pydantic[email]==2.10.4
apscheduler==3.11.0

View file

@ -1,21 +1,21 @@
urllib3==2.2.3
requests==2.32.3
boto3==1.35.76
boto3==1.35.86
pyjwt==2.10.1
psycopg2-binary==2.9.10
psycopg[pool,binary]==3.2.3
clickhouse-driver[lz4]==0.2.9
clickhouse-connect==0.8.9
elasticsearch==8.16.0
clickhouse-connect==0.8.11
elasticsearch==8.17.0
jira==3.8.0
cachetools==5.5.0
fastapi==0.115.6
uvicorn[standard]==0.32.1
uvicorn[standard]==0.34.0
python-decouple==3.8
pydantic[email]==2.10.3
pydantic[email]==2.10.4
apscheduler==3.11.0
redis==5.2.1

View file

@ -8,9 +8,9 @@ from starlette.responses import RedirectResponse, FileResponse, JSONResponse, Re
import schemas
from chalicelib.core import scope
from chalicelib.core import errors, assist, signup, feature_flags
from chalicelib.core import assist, signup, feature_flags
from chalicelib.core.metrics import heatmaps
from chalicelib.core.errors import errors_favorite, errors_viewed
from chalicelib.core.errors import errors_favorite, errors_viewed, errors, errors_details
from chalicelib.core.sessions import sessions, sessions_notes, sessions_replay, sessions_favorite, sessions_viewed, \
sessions_assignments, unprocessed_sessions, sessions_search
from chalicelib.core import tenants, users, projects, license
@ -331,8 +331,8 @@ def get_error_trace(projectId: int, sessionId: int, errorId: str,
@app.get('/{projectId}/errors/{errorId}', tags=['errors'])
def errors_get_details(projectId: int, errorId: str, background_tasks: BackgroundTasks, density24: int = 24,
density30: int = 30, context: schemas.CurrentContext = Depends(OR_context)):
data = errors.get_details(project_id=projectId, user_id=context.user_id, error_id=errorId,
**{"density24": density24, "density30": density30})
data = errors_details.get_details(project_id=projectId, user_id=context.user_id, error_id=errorId,
**{"density24": density24, "density30": density30})
if data.get("data") is not None:
background_tasks.add_task(errors_viewed.viewed_error, project_id=projectId, user_id=context.user_id,
error_id=errorId)

1
ee/api/.gitignore vendored
View file

@ -293,3 +293,4 @@ Pipfile.lock
/chalicelib/core/errors/errors_ch.py
/chalicelib/core/errors/errors_favorite.py
/chalicelib/core/errors/errors_viewed.py
/chalicelib/core/errors/errors_details.py

View file

@ -6,24 +6,24 @@ name = "pypi"
[packages]
urllib3 = "==2.2.3"
requests = "==2.32.3"
boto3 = "==1.35.76"
boto3 = "==1.35.86"
pyjwt = "==2.10.1"
psycopg2-binary = "==2.9.10"
psycopg = {extras = ["pool", "binary"], version = "==3.2.3"}
psycopg = {extras = ["binary", "pool"], version = "==3.2.3"}
clickhouse-driver = {extras = ["lz4"], version = "==0.2.9"}
clickhouse-connect = "==0.8.9"
elasticsearch = "==8.16.0"
clickhouse-connect = "==0.8.11"
elasticsearch = "==8.17.0"
jira = "==3.8.0"
cachetools = "==5.5.0"
fastapi = "==0.115.6"
uvicorn = {extras = ["standard"], version = "==0.32.1"}
uvicorn = {extras = ["standard"], version = "==0.34.0"}
gunicorn = "==23.0.0"
python-decouple = "==3.8"
pydantic = {extras = ["email"], version = "==2.10.3"}
pydantic = {extras = ["email"], version = "==2.10.4"}
apscheduler = "==3.11.0"
redis = "==5.2.1"
python3-saml = "==1.16.0"
python-multipart = "==0.0.17"
python-multipart = "==0.0.20"
azure-storage-blob = "==12.24.0"
[dev-packages]

View file

@ -8,6 +8,7 @@ if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
logger.info(">>> Using experimental error search")
from . import errors as errors_legacy
from . import errors_ch as errors
from . import errors_details_exp as errors_details
else:
from . import errors

View file

@ -0,0 +1,261 @@
from decouple import config
import schemas
from . import errors
from chalicelib.core import metrics, metadata
from chalicelib.core import sessions
from chalicelib.utils import ch_client, exp_ch_helper
from chalicelib.utils import pg_client, helper
from chalicelib.utils.TimeUTC import TimeUTC
def __flatten_sort_key_count_version(data, merge_nested=False):
if data is None:
return []
return sorted(
[
{
"name": f"{o[0][0][0]}@{v[0]}",
"count": v[1]
} for o in data for v in o[2]
],
key=lambda o: o["count"], reverse=True) if merge_nested else \
[
{
"name": o[0][0][0],
"count": o[1][0][0],
} for o in data
]
def __transform_map_to_tag(data, key1, key2, requested_key):
result = []
for i in data:
if requested_key == 0 and i.get(key1) is None and i.get(key2) is None:
result.append({"name": "all", "count": int(i.get("count"))})
elif requested_key == 1 and i.get(key1) is not None and i.get(key2) is None:
result.append({"name": i.get(key1), "count": int(i.get("count"))})
elif requested_key == 2 and i.get(key1) is not None and i.get(key2) is not None:
result.append({"name": i.get(key2), "count": int(i.get("count"))})
return result
def __process_tags_map(row):
browsers_partition = row.pop("browsers_partition")
os_partition = row.pop("os_partition")
device_partition = row.pop("device_partition")
country_partition = row.pop("country_partition")
return [
{"name": "browser",
"partitions": __transform_map_to_tag(data=browsers_partition,
key1="browser",
key2="browser_version",
requested_key=1)},
{"name": "browser.ver",
"partitions": __transform_map_to_tag(data=browsers_partition,
key1="browser",
key2="browser_version",
requested_key=2)},
{"name": "OS",
"partitions": __transform_map_to_tag(data=os_partition,
key1="os",
key2="os_version",
requested_key=1)
},
{"name": "OS.ver",
"partitions": __transform_map_to_tag(data=os_partition,
key1="os",
key2="os_version",
requested_key=2)},
{"name": "device.family",
"partitions": __transform_map_to_tag(data=device_partition,
key1="device_type",
key2="device",
requested_key=1)},
{"name": "device",
"partitions": __transform_map_to_tag(data=device_partition,
key1="device_type",
key2="device",
requested_key=2)},
{"name": "country", "partitions": __transform_map_to_tag(data=country_partition,
key1="country",
key2="",
requested_key=1)}
]
def get_details(project_id, error_id, user_id, **data):
MAIN_SESSIONS_TABLE = exp_ch_helper.get_main_sessions_table(0)
MAIN_ERR_SESS_TABLE = exp_ch_helper.get_main_js_errors_sessions_table(0)
MAIN_EVENTS_TABLE = exp_ch_helper.get_main_events_table(0)
ch_sub_query24 = errors.__get_basic_constraints(startTime_arg_name="startDate24", endTime_arg_name="endDate24")
ch_sub_query24.append("error_id = %(error_id)s")
ch_sub_query30 = errors.__get_basic_constraints(startTime_arg_name="startDate30", endTime_arg_name="endDate30",
project_key="errors.project_id")
ch_sub_query30.append("error_id = %(error_id)s")
ch_basic_query = errors.__get_basic_constraints(time_constraint=False)
ch_basic_query.append("error_id = %(error_id)s")
with ch_client.ClickHouseClient() as ch:
data["startDate24"] = TimeUTC.now(-1)
data["endDate24"] = TimeUTC.now()
data["startDate30"] = TimeUTC.now(-30)
data["endDate30"] = TimeUTC.now()
density24 = int(data.get("density24", 24))
step_size24 = errors.__get_step_size(data["startDate24"], data["endDate24"], density24)
density30 = int(data.get("density30", 30))
step_size30 = errors.__get_step_size(data["startDate30"], data["endDate30"], density30)
params = {
"startDate24": data['startDate24'],
"endDate24": data['endDate24'],
"startDate30": data['startDate30'],
"endDate30": data['endDate30'],
"project_id": project_id,
"userId": user_id,
"step_size24": step_size24,
"step_size30": step_size30,
"error_id": error_id}
main_ch_query = f"""\
WITH pre_processed AS (SELECT error_id,
name,
message,
session_id,
datetime,
user_id,
user_browser,
user_browser_version,
user_os,
user_os_version,
user_device_type,
user_device,
user_country,
error_tags_keys,
error_tags_values
FROM {MAIN_ERR_SESS_TABLE} AS errors
WHERE {" AND ".join(ch_basic_query)}
)
SELECT %(error_id)s AS error_id, name, message,users,
first_occurrence,last_occurrence,last_session_id,
sessions,browsers_partition,os_partition,device_partition,
country_partition,chart24,chart30,custom_tags
FROM (SELECT error_id,
name,
message
FROM pre_processed
LIMIT 1) AS details
INNER JOIN (SELECT COUNT(DISTINCT user_id) AS users,
COUNT(DISTINCT session_id) AS sessions
FROM pre_processed
WHERE datetime >= toDateTime(%(startDate30)s / 1000)
AND datetime <= toDateTime(%(endDate30)s / 1000)
) AS last_month_stats ON TRUE
INNER JOIN (SELECT toUnixTimestamp(max(datetime)) * 1000 AS last_occurrence,
toUnixTimestamp(min(datetime)) * 1000 AS first_occurrence
FROM pre_processed) AS time_details ON TRUE
INNER JOIN (SELECT session_id AS last_session_id,
arrayMap((key, value)->(map(key, value)), error_tags_keys, error_tags_values) AS custom_tags
FROM pre_processed
ORDER BY datetime DESC
LIMIT 1) AS last_session_details ON TRUE
INNER JOIN (SELECT groupArray(details) AS browsers_partition
FROM (SELECT COUNT(1) AS count,
coalesce(nullIf(user_browser,''),toNullable('unknown')) AS browser,
coalesce(nullIf(user_browser_version,''),toNullable('unknown')) AS browser_version,
map('browser', browser,
'browser_version', browser_version,
'count', toString(count)) AS details
FROM pre_processed
GROUP BY ROLLUP(browser, browser_version)
ORDER BY browser nulls first, browser_version nulls first, count DESC) AS mapped_browser_details
) AS browser_details ON TRUE
INNER JOIN (SELECT groupArray(details) AS os_partition
FROM (SELECT COUNT(1) AS count,
coalesce(nullIf(user_os,''),toNullable('unknown')) AS os,
coalesce(nullIf(user_os_version,''),toNullable('unknown')) AS os_version,
map('os', os,
'os_version', os_version,
'count', toString(count)) AS details
FROM pre_processed
GROUP BY ROLLUP(os, os_version)
ORDER BY os nulls first, os_version nulls first, count DESC) AS mapped_os_details
) AS os_details ON TRUE
INNER JOIN (SELECT groupArray(details) AS device_partition
FROM (SELECT COUNT(1) AS count,
coalesce(nullIf(user_device,''),toNullable('unknown')) AS user_device,
map('device_type', toString(user_device_type),
'device', user_device,
'count', toString(count)) AS details
FROM pre_processed
GROUP BY ROLLUP(user_device_type, user_device)
ORDER BY user_device_type nulls first, user_device nulls first, count DESC
) AS count_per_device_details
) AS mapped_device_details ON TRUE
INNER JOIN (SELECT groupArray(details) AS country_partition
FROM (SELECT COUNT(1) AS count,
map('country', toString(user_country),
'count', toString(count)) AS details
FROM pre_processed
GROUP BY user_country
ORDER BY count DESC) AS count_per_country_details
) AS mapped_country_details ON TRUE
INNER JOIN (SELECT groupArray(map('timestamp', timestamp, 'count', count)) AS chart24
FROM (SELECT toUnixTimestamp(toStartOfInterval(datetime, INTERVAL 3756 second)) *
1000 AS timestamp,
COUNT(DISTINCT session_id) AS count
FROM {MAIN_EVENTS_TABLE} AS errors
WHERE {" AND ".join(ch_sub_query24)}
GROUP BY timestamp
ORDER BY timestamp) AS chart_details
) AS chart_details24 ON TRUE
INNER JOIN (SELECT groupArray(map('timestamp', timestamp, 'count', count)) AS chart30
FROM (SELECT toUnixTimestamp(toStartOfInterval(datetime, INTERVAL 3724 second)) *
1000 AS timestamp,
COUNT(DISTINCT session_id) AS count
FROM {MAIN_EVENTS_TABLE} AS errors
WHERE {" AND ".join(ch_sub_query30)}
GROUP BY timestamp
ORDER BY timestamp) AS chart_details
) AS chart_details30 ON TRUE;"""
# print("--------------------")
# print(ch.format(main_ch_query, params))
# print("--------------------")
row = ch.execute(query=main_ch_query, parameters=params)
if len(row) == 0:
return {"errors": ["error not found"]}
row = row[0]
row["tags"] = __process_tags_map(row)
query = f"""SELECT session_id, toUnixTimestamp(datetime) * 1000 AS start_ts,
user_anonymous_id,user_id, user_uuid, user_browser, user_browser_version,
user_os, user_os_version, user_device, FALSE AS favorite, True AS viewed
FROM {MAIN_SESSIONS_TABLE} AS sessions
WHERE project_id = toUInt16(%(project_id)s)
AND session_id = %(session_id)s
ORDER BY datetime DESC
LIMIT 1;"""
params = {"project_id": project_id, "session_id": row["last_session_id"], "userId": user_id}
# print("--------------------")
# print(ch.format(query, params))
# print("--------------------")
status = ch.execute(query=query, parameters=params)
if status is not None:
status = status[0]
row["favorite"] = status.pop("favorite")
row["viewed"] = status.pop("viewed")
row["last_hydrated_session"] = status
else:
row["last_hydrated_session"] = None
row["favorite"] = False
row["viewed"] = False
row["chart24"] = metrics.__complete_missing_steps(start_time=data["startDate24"], end_time=data["endDate24"],
density=density24, rows=row["chart24"], neutral={"count": 0})
row["chart30"] = metrics.__complete_missing_steps(start_time=data["startDate30"], end_time=data["endDate30"],
density=density30, rows=row["chart30"], neutral={"count": 0})
return {"data": helper.dict_to_camel_case(row)}

View file

@ -113,3 +113,4 @@ rm -rf /chalicelib/core/errors/errors.py
rm -rf /chalicelib/core/errors/errors_ch.py
rm -rf /chalicelib/core/errors/errors_favorite.py
rm -rf /chalicelib/core/errors/errors_viewed.py
rm -rf /chalicelib/core/errors/errors_details.py

View file

@ -1,31 +1,30 @@
urllib3==2.2.3
requests==2.32.3
boto3==1.35.76
boto3==1.35.86
pyjwt==2.10.1
psycopg2-binary==2.9.10
psycopg[pool,binary]==3.2.3
clickhouse-driver[lz4]==0.2.9
clickhouse-connect==0.8.9
elasticsearch==8.16.0
clickhouse-connect==0.8.11
elasticsearch==8.17.0
jira==3.8.0
cachetools==5.5.0
fastapi==0.115.6
uvicorn[standard]==0.32.1
uvicorn[standard]==0.34.0
gunicorn==23.0.0
python-decouple==3.8
pydantic[email]==2.10.3
pydantic[email]==2.10.4
apscheduler==3.11.0
redis==5.2.1
# TODO: enable after xmlsec fix https://github.com/xmlsec/python-xmlsec/issues/252
#--no-binary is used to avoid libxml2 library version incompatibilities between xmlsec and lxml
python3-saml==1.16.0
--no-binary=lxml
python-multipart==0.0.18
python3-saml==1.16.0 --no-binary=lxml
python-multipart==0.0.20
#confluent-kafka==2.1.0
azure-storage-blob==12.24.0