Merge branch 'dev' into live-se-red

This commit is contained in:
nick-delirium 2024-12-23 17:50:06 +01:00
commit 48038b4fc6
No known key found for this signature in database
GPG key ID: 93ABD695DF5FDBA0
34 changed files with 1013 additions and 971 deletions

View file

@ -4,22 +4,21 @@ verify_ssl = true
name = "pypi"
[packages]
sqlparse = "==0.5.2"
urllib3 = "==2.2.3"
requests = "==2.32.3"
boto3 = "==1.35.76"
boto3 = "==1.35.86"
pyjwt = "==2.10.1"
psycopg2-binary = "==2.9.10"
psycopg = {extras = ["binary", "pool"], version = "==3.2.3"}
psycopg = {extras = ["pool", "binary"], version = "==3.2.3"}
clickhouse-driver = {extras = ["lz4"], version = "==0.2.9"}
clickhouse-connect = "==0.8.9"
elasticsearch = "==8.16.0"
clickhouse-connect = "==0.8.11"
elasticsearch = "==8.17.0"
jira = "==3.8.0"
cachetools = "==5.5.0"
fastapi = "==0.115.6"
uvicorn = {extras = ["standard"], version = "==0.32.1"}
uvicorn = {extras = ["standard"], version = "==0.34.0"}
python-decouple = "==3.8"
pydantic = {extras = ["email"], version = "==2.10.3"}
pydantic = {extras = ["email"], version = "==2.10.4"}
apscheduler = "==3.11.0"
redis = "==5.2.1"

View file

@ -3,7 +3,6 @@ import json
import schemas
from chalicelib.core import sourcemaps
from chalicelib.core.errors.modules import sessions
from chalicelib.utils import errors_helper
from chalicelib.utils import pg_client, helper
from chalicelib.utils.TimeUTC import TimeUTC
from chalicelib.utils.metrics_helper import __get_step_size
@ -13,9 +12,7 @@ def get(error_id, family=False):
if family:
return get_batch([error_id])
with pg_client.PostgresClient() as cur:
# trying: return only 1 error, without event details
query = cur.mogrify(
# "SELECT * FROM events.errors AS e INNER JOIN public.errors AS re USING(error_id) WHERE error_id = %(error_id)s;",
"SELECT * FROM public.errors WHERE error_id = %(error_id)s LIMIT 1;",
{"error_id": error_id})
cur.execute(query=query)
@ -50,252 +47,6 @@ def get_batch(error_ids):
return helper.list_to_camel_case(errors)
def __flatten_sort_key_count_version(data, merge_nested=False):
if data is None:
return []
return sorted(
[
{
"name": f'{o["name"]}@{v["version"]}',
"count": v["count"]
} for o in data for v in o["partition"]
],
key=lambda o: o["count"], reverse=True) if merge_nested else \
[
{
"name": o["name"],
"count": o["count"],
} for o in data
]
def __process_tags(row):
return [
{"name": "browser", "partitions": __flatten_sort_key_count_version(data=row.get("browsers_partition"))},
{"name": "browser.ver",
"partitions": __flatten_sort_key_count_version(data=row.pop("browsers_partition"), merge_nested=True)},
{"name": "OS", "partitions": __flatten_sort_key_count_version(data=row.get("os_partition"))},
{"name": "OS.ver",
"partitions": __flatten_sort_key_count_version(data=row.pop("os_partition"), merge_nested=True)},
{"name": "device.family", "partitions": __flatten_sort_key_count_version(data=row.get("device_partition"))},
{"name": "device",
"partitions": __flatten_sort_key_count_version(data=row.pop("device_partition"), merge_nested=True)},
{"name": "country", "partitions": row.pop("country_partition")}
]
def get_details(project_id, error_id, user_id, **data):
pg_sub_query24 = __get_basic_constraints(time_constraint=False, chart=True, step_size_name="step_size24")
pg_sub_query24.append("error_id = %(error_id)s")
pg_sub_query30_session = __get_basic_constraints(time_constraint=True, chart=False,
startTime_arg_name="startDate30",
endTime_arg_name="endDate30", project_key="sessions.project_id")
pg_sub_query30_session.append("sessions.start_ts >= %(startDate30)s")
pg_sub_query30_session.append("sessions.start_ts <= %(endDate30)s")
pg_sub_query30_session.append("error_id = %(error_id)s")
pg_sub_query30_err = __get_basic_constraints(time_constraint=True, chart=False, startTime_arg_name="startDate30",
endTime_arg_name="endDate30", project_key="errors.project_id")
pg_sub_query30_err.append("sessions.project_id = %(project_id)s")
pg_sub_query30_err.append("sessions.start_ts >= %(startDate30)s")
pg_sub_query30_err.append("sessions.start_ts <= %(endDate30)s")
pg_sub_query30_err.append("error_id = %(error_id)s")
pg_sub_query30_err.append("source ='js_exception'")
pg_sub_query30 = __get_basic_constraints(time_constraint=False, chart=True, step_size_name="step_size30")
pg_sub_query30.append("error_id = %(error_id)s")
pg_basic_query = __get_basic_constraints(time_constraint=False)
pg_basic_query.append("error_id = %(error_id)s")
with pg_client.PostgresClient() as cur:
data["startDate24"] = TimeUTC.now(-1)
data["endDate24"] = TimeUTC.now()
data["startDate30"] = TimeUTC.now(-30)
data["endDate30"] = TimeUTC.now()
density24 = int(data.get("density24", 24))
step_size24 = __get_step_size(data["startDate24"], data["endDate24"], density24, factor=1)
density30 = int(data.get("density30", 30))
step_size30 = __get_step_size(data["startDate30"], data["endDate30"], density30, factor=1)
params = {
"startDate24": data['startDate24'],
"endDate24": data['endDate24'],
"startDate30": data['startDate30'],
"endDate30": data['endDate30'],
"project_id": project_id,
"userId": user_id,
"step_size24": step_size24,
"step_size30": step_size30,
"error_id": error_id}
main_pg_query = f"""\
SELECT error_id,
name,
message,
users,
sessions,
last_occurrence,
first_occurrence,
last_session_id,
browsers_partition,
os_partition,
device_partition,
country_partition,
chart24,
chart30,
custom_tags
FROM (SELECT error_id,
name,
message,
COUNT(DISTINCT user_id) AS users,
COUNT(DISTINCT session_id) AS sessions
FROM public.errors
INNER JOIN events.errors AS s_errors USING (error_id)
INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_err)}
GROUP BY error_id, name, message) AS details
INNER JOIN (SELECT MAX(timestamp) AS last_occurrence,
MIN(timestamp) AS first_occurrence
FROM events.errors
WHERE error_id = %(error_id)s) AS time_details ON (TRUE)
INNER JOIN (SELECT session_id AS last_session_id,
coalesce(custom_tags, '[]')::jsonb AS custom_tags
FROM events.errors
LEFT JOIN LATERAL (
SELECT jsonb_agg(jsonb_build_object(errors_tags.key, errors_tags.value)) AS custom_tags
FROM errors_tags
WHERE errors_tags.error_id = %(error_id)s
AND errors_tags.session_id = errors.session_id
AND errors_tags.message_id = errors.message_id) AS errors_tags ON (TRUE)
WHERE error_id = %(error_id)s
ORDER BY errors.timestamp DESC
LIMIT 1) AS last_session_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(browser_details) AS browsers_partition
FROM (SELECT *
FROM (SELECT user_browser AS name,
COUNT(session_id) AS count
FROM events.errors
INNER JOIN sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
GROUP BY user_browser
ORDER BY count DESC) AS count_per_browser_query
INNER JOIN LATERAL (SELECT JSONB_AGG(version_details) AS partition
FROM (SELECT user_browser_version AS version,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
AND sessions.user_browser = count_per_browser_query.name
GROUP BY user_browser_version
ORDER BY count DESC) AS version_details
) AS browser_version_details ON (TRUE)) AS browser_details) AS browser_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(os_details) AS os_partition
FROM (SELECT *
FROM (SELECT user_os AS name,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
GROUP BY user_os
ORDER BY count DESC) AS count_per_os_details
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_version_details) AS partition
FROM (SELECT COALESCE(user_os_version,'unknown') AS version, COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
AND sessions.user_os = count_per_os_details.name
GROUP BY user_os_version
ORDER BY count DESC) AS count_per_version_details
GROUP BY count_per_os_details.name ) AS os_version_details
ON (TRUE)) AS os_details) AS os_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(device_details) AS device_partition
FROM (SELECT *
FROM (SELECT user_device_type AS name,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
GROUP BY user_device_type
ORDER BY count DESC) AS count_per_device_details
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_device_v_details) AS partition
FROM (SELECT CASE
WHEN user_device = '' OR user_device ISNULL
THEN 'unknown'
ELSE user_device END AS version,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
AND sessions.user_device_type = count_per_device_details.name
GROUP BY user_device
ORDER BY count DESC) AS count_per_device_v_details
GROUP BY count_per_device_details.name ) AS device_version_details
ON (TRUE)) AS device_details) AS device_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(count_per_country_details) AS country_partition
FROM (SELECT user_country AS name,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
GROUP BY user_country
ORDER BY count DESC) AS count_per_country_details) AS country_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart24
FROM (SELECT generated_timestamp AS timestamp,
COUNT(session_id) AS count
FROM generate_series(%(startDate24)s, %(endDate24)s, %(step_size24)s) AS generated_timestamp
LEFT JOIN LATERAL (SELECT DISTINCT session_id
FROM events.errors
INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query24)}
) AS chart_details ON (TRUE)
GROUP BY generated_timestamp
ORDER BY generated_timestamp) AS chart_details) AS chart_details24 ON (TRUE)
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart30
FROM (SELECT generated_timestamp AS timestamp,
COUNT(session_id) AS count
FROM generate_series(%(startDate30)s, %(endDate30)s, %(step_size30)s) AS generated_timestamp
LEFT JOIN LATERAL (SELECT DISTINCT session_id
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30)}) AS chart_details
ON (TRUE)
GROUP BY timestamp
ORDER BY timestamp) AS chart_details) AS chart_details30 ON (TRUE);
"""
# print("--------------------")
# print(cur.mogrify(main_pg_query, params))
# print("--------------------")
cur.execute(cur.mogrify(main_pg_query, params))
row = cur.fetchone()
if row is None:
return {"errors": ["error not found"]}
row["tags"] = __process_tags(row)
query = cur.mogrify(
f"""SELECT error_id, status, session_id, start_ts,
parent_error_id,session_id, user_anonymous_id,
user_id, user_uuid, user_browser, user_browser_version,
user_os, user_os_version, user_device, payload,
FALSE AS favorite,
True AS viewed
FROM public.errors AS pe
INNER JOIN events.errors AS ee USING (error_id)
INNER JOIN public.sessions USING (session_id)
WHERE pe.project_id = %(project_id)s
AND error_id = %(error_id)s
ORDER BY start_ts DESC
LIMIT 1;""",
{"project_id": project_id, "error_id": error_id, "user_id": user_id})
cur.execute(query=query)
status = cur.fetchone()
if status is not None:
row["stack"] = errors_helper.format_first_stack_frame(status).pop("stack")
row["status"] = status.pop("status")
row["parent_error_id"] = status.pop("parent_error_id")
row["favorite"] = status.pop("favorite")
row["viewed"] = status.pop("viewed")
row["last_hydrated_session"] = status
else:
row["stack"] = []
row["last_hydrated_session"] = None
row["status"] = "untracked"
row["parent_error_id"] = None
row["favorite"] = False
row["viewed"] = False
return {"data": helper.dict_to_camel_case(row)}
def __get_basic_constraints(platform=None, time_constraint=True, startTime_arg_name="startDate",
endTime_arg_name="endDate", chart=False, step_size_name="step_size",
project_key="project_id"):

View file

@ -62,268 +62,6 @@ def get_batch(error_ids):
return errors_legacy.get_batch(error_ids=error_ids)
def __flatten_sort_key_count_version(data, merge_nested=False):
if data is None:
return []
return sorted(
[
{
"name": f"{o[0][0][0]}@{v[0]}",
"count": v[1]
} for o in data for v in o[2]
],
key=lambda o: o["count"], reverse=True) if merge_nested else \
[
{
"name": o[0][0][0],
"count": o[1][0][0],
# "versions": [{"version": v[0], "count": v[1]} for v in o[2]]
} for o in data
]
def __transform_map_to_tag(data, key1, key2, requested_key):
result = []
for i in data:
if requested_key == 0 and i.get(key1) is None and i.get(key2) is None:
result.append({"name": "all", "count": int(i.get("count"))})
elif requested_key == 1 and i.get(key1) is not None and i.get(key2) is None:
result.append({"name": i.get(key1), "count": int(i.get("count"))})
elif requested_key == 2 and i.get(key1) is not None and i.get(key2) is not None:
result.append({"name": i.get(key2), "count": int(i.get("count"))})
return result
def __flatten_sort_key_count(data):
if data is None:
return []
return [
{
"name": o[0][0][0],
"count": o[1][0][0]
} for o in data
]
def __process_tags_map(row):
browsers_partition = row.pop("browsers_partition")
os_partition = row.pop("os_partition")
device_partition = row.pop("device_partition")
country_partition = row.pop("country_partition")
return [
{"name": "browser",
"partitions": __transform_map_to_tag(data=browsers_partition,
key1="browser",
key2="browser_version",
requested_key=1)},
{"name": "browser.ver",
"partitions": __transform_map_to_tag(data=browsers_partition,
key1="browser",
key2="browser_version",
requested_key=2)},
{"name": "OS",
"partitions": __transform_map_to_tag(data=os_partition,
key1="os",
key2="os_version",
requested_key=1)
},
{"name": "OS.ver",
"partitions": __transform_map_to_tag(data=os_partition,
key1="os",
key2="os_version",
requested_key=2)},
{"name": "device.family",
"partitions": __transform_map_to_tag(data=device_partition,
key1="device_type",
key2="device",
requested_key=1)},
{"name": "device",
"partitions": __transform_map_to_tag(data=device_partition,
key1="device_type",
key2="device",
requested_key=2)},
{"name": "country", "partitions": __transform_map_to_tag(data=country_partition,
key1="country",
key2="",
requested_key=1)}
]
def get_details(project_id, error_id, user_id, **data):
MAIN_SESSIONS_TABLE = exp_ch_helper.get_main_sessions_table(0)
MAIN_ERR_SESS_TABLE = exp_ch_helper.get_main_js_errors_sessions_table(0)
MAIN_EVENTS_TABLE = exp_ch_helper.get_main_events_table(0)
ch_sub_query24 = __get_basic_constraints(startTime_arg_name="startDate24", endTime_arg_name="endDate24")
ch_sub_query24.append("error_id = %(error_id)s")
ch_sub_query30 = __get_basic_constraints(startTime_arg_name="startDate30", endTime_arg_name="endDate30",
project_key="errors.project_id")
ch_sub_query30.append("error_id = %(error_id)s")
ch_basic_query = __get_basic_constraints(time_constraint=False)
ch_basic_query.append("error_id = %(error_id)s")
with ch_client.ClickHouseClient() as ch:
data["startDate24"] = TimeUTC.now(-1)
data["endDate24"] = TimeUTC.now()
data["startDate30"] = TimeUTC.now(-30)
data["endDate30"] = TimeUTC.now()
density24 = int(data.get("density24", 24))
step_size24 = __get_step_size(data["startDate24"], data["endDate24"], density24)
density30 = int(data.get("density30", 30))
step_size30 = __get_step_size(data["startDate30"], data["endDate30"], density30)
params = {
"startDate24": data['startDate24'],
"endDate24": data['endDate24'],
"startDate30": data['startDate30'],
"endDate30": data['endDate30'],
"project_id": project_id,
"userId": user_id,
"step_size24": step_size24,
"step_size30": step_size30,
"error_id": error_id}
main_ch_query = f"""\
WITH pre_processed AS (SELECT error_id,
name,
message,
session_id,
datetime,
user_id,
user_browser,
user_browser_version,
user_os,
user_os_version,
user_device_type,
user_device,
user_country,
error_tags_keys,
error_tags_values
FROM {MAIN_ERR_SESS_TABLE} AS errors
WHERE {" AND ".join(ch_basic_query)}
)
SELECT %(error_id)s AS error_id, name, message,users,
first_occurrence,last_occurrence,last_session_id,
sessions,browsers_partition,os_partition,device_partition,
country_partition,chart24,chart30,custom_tags
FROM (SELECT error_id,
name,
message
FROM pre_processed
LIMIT 1) AS details
INNER JOIN (SELECT COUNT(DISTINCT user_id) AS users,
COUNT(DISTINCT session_id) AS sessions
FROM pre_processed
WHERE datetime >= toDateTime(%(startDate30)s / 1000)
AND datetime <= toDateTime(%(endDate30)s / 1000)
) AS last_month_stats ON TRUE
INNER JOIN (SELECT toUnixTimestamp(max(datetime)) * 1000 AS last_occurrence,
toUnixTimestamp(min(datetime)) * 1000 AS first_occurrence
FROM pre_processed) AS time_details ON TRUE
INNER JOIN (SELECT session_id AS last_session_id,
arrayMap((key, value)->(map(key, value)), error_tags_keys, error_tags_values) AS custom_tags
FROM pre_processed
ORDER BY datetime DESC
LIMIT 1) AS last_session_details ON TRUE
INNER JOIN (SELECT groupArray(details) AS browsers_partition
FROM (SELECT COUNT(1) AS count,
coalesce(nullIf(user_browser,''),toNullable('unknown')) AS browser,
coalesce(nullIf(user_browser_version,''),toNullable('unknown')) AS browser_version,
map('browser', browser,
'browser_version', browser_version,
'count', toString(count)) AS details
FROM pre_processed
GROUP BY ROLLUP(browser, browser_version)
ORDER BY browser nulls first, browser_version nulls first, count DESC) AS mapped_browser_details
) AS browser_details ON TRUE
INNER JOIN (SELECT groupArray(details) AS os_partition
FROM (SELECT COUNT(1) AS count,
coalesce(nullIf(user_os,''),toNullable('unknown')) AS os,
coalesce(nullIf(user_os_version,''),toNullable('unknown')) AS os_version,
map('os', os,
'os_version', os_version,
'count', toString(count)) AS details
FROM pre_processed
GROUP BY ROLLUP(os, os_version)
ORDER BY os nulls first, os_version nulls first, count DESC) AS mapped_os_details
) AS os_details ON TRUE
INNER JOIN (SELECT groupArray(details) AS device_partition
FROM (SELECT COUNT(1) AS count,
coalesce(nullIf(user_device,''),toNullable('unknown')) AS user_device,
map('device_type', toString(user_device_type),
'device', user_device,
'count', toString(count)) AS details
FROM pre_processed
GROUP BY ROLLUP(user_device_type, user_device)
ORDER BY user_device_type nulls first, user_device nulls first, count DESC
) AS count_per_device_details
) AS mapped_device_details ON TRUE
INNER JOIN (SELECT groupArray(details) AS country_partition
FROM (SELECT COUNT(1) AS count,
map('country', toString(user_country),
'count', toString(count)) AS details
FROM pre_processed
GROUP BY user_country
ORDER BY count DESC) AS count_per_country_details
) AS mapped_country_details ON TRUE
INNER JOIN (SELECT groupArray(map('timestamp', timestamp, 'count', count)) AS chart24
FROM (SELECT toUnixTimestamp(toStartOfInterval(datetime, INTERVAL 3756 second)) *
1000 AS timestamp,
COUNT(DISTINCT session_id) AS count
FROM {MAIN_EVENTS_TABLE} AS errors
WHERE {" AND ".join(ch_sub_query24)}
GROUP BY timestamp
ORDER BY timestamp) AS chart_details
) AS chart_details24 ON TRUE
INNER JOIN (SELECT groupArray(map('timestamp', timestamp, 'count', count)) AS chart30
FROM (SELECT toUnixTimestamp(toStartOfInterval(datetime, INTERVAL 3724 second)) *
1000 AS timestamp,
COUNT(DISTINCT session_id) AS count
FROM {MAIN_EVENTS_TABLE} AS errors
WHERE {" AND ".join(ch_sub_query30)}
GROUP BY timestamp
ORDER BY timestamp) AS chart_details
) AS chart_details30 ON TRUE;"""
# print("--------------------")
# print(ch.format(main_ch_query, params))
# print("--------------------")
row = ch.execute(query=main_ch_query, parameters=params)
if len(row) == 0:
return {"errors": ["error not found"]}
row = row[0]
row["tags"] = __process_tags_map(row)
query = f"""SELECT session_id, toUnixTimestamp(datetime) * 1000 AS start_ts,
user_anonymous_id,user_id, user_uuid, user_browser, user_browser_version,
user_os, user_os_version, user_device, FALSE AS favorite, True AS viewed
FROM {MAIN_SESSIONS_TABLE} AS sessions
WHERE project_id = toUInt16(%(project_id)s)
AND session_id = %(session_id)s
ORDER BY datetime DESC
LIMIT 1;"""
params = {"project_id": project_id, "session_id": row["last_session_id"], "userId": user_id}
# print("--------------------")
# print(ch.format(query, params))
# print("--------------------")
status = ch.execute(query=query, parameters=params)
if status is not None:
status = status[0]
row["favorite"] = status.pop("favorite")
row["viewed"] = status.pop("viewed")
row["last_hydrated_session"] = status
else:
row["last_hydrated_session"] = None
row["favorite"] = False
row["viewed"] = False
row["chart24"] = metrics.__complete_missing_steps(start_time=data["startDate24"], end_time=data["endDate24"],
density=density24, rows=row["chart24"], neutral={"count": 0})
row["chart30"] = metrics.__complete_missing_steps(start_time=data["startDate30"], end_time=data["endDate30"],
density=density30, rows=row["chart30"], neutral={"count": 0})
return {"data": helper.dict_to_camel_case(row)}
def __get_basic_constraints(platform=None, time_constraint=True, startTime_arg_name="startDate",

View file

@ -0,0 +1,253 @@
from chalicelib.core.errors import errors_legacy as errors
from chalicelib.utils import errors_helper
from chalicelib.utils import pg_client, helper
from chalicelib.utils.TimeUTC import TimeUTC
from chalicelib.utils.metrics_helper import __get_step_size
def __flatten_sort_key_count_version(data, merge_nested=False):
if data is None:
return []
return sorted(
[
{
"name": f'{o["name"]}@{v["version"]}',
"count": v["count"]
} for o in data for v in o["partition"]
],
key=lambda o: o["count"], reverse=True) if merge_nested else \
[
{
"name": o["name"],
"count": o["count"],
} for o in data
]
def __process_tags(row):
return [
{"name": "browser", "partitions": __flatten_sort_key_count_version(data=row.get("browsers_partition"))},
{"name": "browser.ver",
"partitions": __flatten_sort_key_count_version(data=row.pop("browsers_partition"), merge_nested=True)},
{"name": "OS", "partitions": __flatten_sort_key_count_version(data=row.get("os_partition"))},
{"name": "OS.ver",
"partitions": __flatten_sort_key_count_version(data=row.pop("os_partition"), merge_nested=True)},
{"name": "device.family", "partitions": __flatten_sort_key_count_version(data=row.get("device_partition"))},
{"name": "device",
"partitions": __flatten_sort_key_count_version(data=row.pop("device_partition"), merge_nested=True)},
{"name": "country", "partitions": row.pop("country_partition")}
]
def get_details(project_id, error_id, user_id, **data):
pg_sub_query24 = errors.__get_basic_constraints(time_constraint=False, chart=True, step_size_name="step_size24")
pg_sub_query24.append("error_id = %(error_id)s")
pg_sub_query30_session = errors.__get_basic_constraints(time_constraint=True, chart=False,
startTime_arg_name="startDate30",
endTime_arg_name="endDate30",
project_key="sessions.project_id")
pg_sub_query30_session.append("sessions.start_ts >= %(startDate30)s")
pg_sub_query30_session.append("sessions.start_ts <= %(endDate30)s")
pg_sub_query30_session.append("error_id = %(error_id)s")
pg_sub_query30_err = errors.__get_basic_constraints(time_constraint=True, chart=False,
startTime_arg_name="startDate30",
endTime_arg_name="endDate30", project_key="errors.project_id")
pg_sub_query30_err.append("sessions.project_id = %(project_id)s")
pg_sub_query30_err.append("sessions.start_ts >= %(startDate30)s")
pg_sub_query30_err.append("sessions.start_ts <= %(endDate30)s")
pg_sub_query30_err.append("error_id = %(error_id)s")
pg_sub_query30_err.append("source ='js_exception'")
pg_sub_query30 = errors.__get_basic_constraints(time_constraint=False, chart=True, step_size_name="step_size30")
pg_sub_query30.append("error_id = %(error_id)s")
pg_basic_query = errors.__get_basic_constraints(time_constraint=False)
pg_basic_query.append("error_id = %(error_id)s")
with pg_client.PostgresClient() as cur:
data["startDate24"] = TimeUTC.now(-1)
data["endDate24"] = TimeUTC.now()
data["startDate30"] = TimeUTC.now(-30)
data["endDate30"] = TimeUTC.now()
density24 = int(data.get("density24", 24))
step_size24 = __get_step_size(data["startDate24"], data["endDate24"], density24, factor=1)
density30 = int(data.get("density30", 30))
step_size30 = __get_step_size(data["startDate30"], data["endDate30"], density30, factor=1)
params = {
"startDate24": data['startDate24'],
"endDate24": data['endDate24'],
"startDate30": data['startDate30'],
"endDate30": data['endDate30'],
"project_id": project_id,
"userId": user_id,
"step_size24": step_size24,
"step_size30": step_size30,
"error_id": error_id}
main_pg_query = f"""\
SELECT error_id,
name,
message,
users,
sessions,
last_occurrence,
first_occurrence,
last_session_id,
browsers_partition,
os_partition,
device_partition,
country_partition,
chart24,
chart30,
custom_tags
FROM (SELECT error_id,
name,
message,
COUNT(DISTINCT user_id) AS users,
COUNT(DISTINCT session_id) AS sessions
FROM public.errors
INNER JOIN events.errors AS s_errors USING (error_id)
INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_err)}
GROUP BY error_id, name, message) AS details
INNER JOIN (SELECT MAX(timestamp) AS last_occurrence,
MIN(timestamp) AS first_occurrence
FROM events.errors
WHERE error_id = %(error_id)s) AS time_details ON (TRUE)
INNER JOIN (SELECT session_id AS last_session_id,
coalesce(custom_tags, '[]')::jsonb AS custom_tags
FROM events.errors
LEFT JOIN LATERAL (
SELECT jsonb_agg(jsonb_build_object(errors_tags.key, errors_tags.value)) AS custom_tags
FROM errors_tags
WHERE errors_tags.error_id = %(error_id)s
AND errors_tags.session_id = errors.session_id
AND errors_tags.message_id = errors.message_id) AS errors_tags ON (TRUE)
WHERE error_id = %(error_id)s
ORDER BY errors.timestamp DESC
LIMIT 1) AS last_session_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(browser_details) AS browsers_partition
FROM (SELECT *
FROM (SELECT user_browser AS name,
COUNT(session_id) AS count
FROM events.errors
INNER JOIN sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
GROUP BY user_browser
ORDER BY count DESC) AS count_per_browser_query
INNER JOIN LATERAL (SELECT JSONB_AGG(version_details) AS partition
FROM (SELECT user_browser_version AS version,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
AND sessions.user_browser = count_per_browser_query.name
GROUP BY user_browser_version
ORDER BY count DESC) AS version_details
) AS browser_version_details ON (TRUE)) AS browser_details) AS browser_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(os_details) AS os_partition
FROM (SELECT *
FROM (SELECT user_os AS name,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
GROUP BY user_os
ORDER BY count DESC) AS count_per_os_details
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_version_details) AS partition
FROM (SELECT COALESCE(user_os_version,'unknown') AS version, COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
AND sessions.user_os = count_per_os_details.name
GROUP BY user_os_version
ORDER BY count DESC) AS count_per_version_details
GROUP BY count_per_os_details.name ) AS os_version_details
ON (TRUE)) AS os_details) AS os_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(device_details) AS device_partition
FROM (SELECT *
FROM (SELECT user_device_type AS name,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
GROUP BY user_device_type
ORDER BY count DESC) AS count_per_device_details
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_device_v_details) AS partition
FROM (SELECT CASE
WHEN user_device = '' OR user_device ISNULL
THEN 'unknown'
ELSE user_device END AS version,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
AND sessions.user_device_type = count_per_device_details.name
GROUP BY user_device
ORDER BY count DESC) AS count_per_device_v_details
GROUP BY count_per_device_details.name ) AS device_version_details
ON (TRUE)) AS device_details) AS device_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(count_per_country_details) AS country_partition
FROM (SELECT user_country AS name,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
GROUP BY user_country
ORDER BY count DESC) AS count_per_country_details) AS country_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart24
FROM (SELECT generated_timestamp AS timestamp,
COUNT(session_id) AS count
FROM generate_series(%(startDate24)s, %(endDate24)s, %(step_size24)s) AS generated_timestamp
LEFT JOIN LATERAL (SELECT DISTINCT session_id
FROM events.errors
INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query24)}
) AS chart_details ON (TRUE)
GROUP BY generated_timestamp
ORDER BY generated_timestamp) AS chart_details) AS chart_details24 ON (TRUE)
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart30
FROM (SELECT generated_timestamp AS timestamp,
COUNT(session_id) AS count
FROM generate_series(%(startDate30)s, %(endDate30)s, %(step_size30)s) AS generated_timestamp
LEFT JOIN LATERAL (SELECT DISTINCT session_id
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30)}) AS chart_details
ON (TRUE)
GROUP BY timestamp
ORDER BY timestamp) AS chart_details) AS chart_details30 ON (TRUE);
"""
# print("--------------------")
# print(cur.mogrify(main_pg_query, params))
# print("--------------------")
cur.execute(cur.mogrify(main_pg_query, params))
row = cur.fetchone()
if row is None:
return {"errors": ["error not found"]}
row["tags"] = __process_tags(row)
query = cur.mogrify(
f"""SELECT error_id, status, session_id, start_ts,
parent_error_id,session_id, user_anonymous_id,
user_id, user_uuid, user_browser, user_browser_version,
user_os, user_os_version, user_device, payload,
FALSE AS favorite,
True AS viewed
FROM public.errors AS pe
INNER JOIN events.errors AS ee USING (error_id)
INNER JOIN public.sessions USING (session_id)
WHERE pe.project_id = %(project_id)s
AND error_id = %(error_id)s
ORDER BY start_ts DESC
LIMIT 1;""",
{"project_id": project_id, "error_id": error_id, "user_id": user_id})
cur.execute(query=query)
status = cur.fetchone()
if status is not None:
row["stack"] = errors_helper.format_first_stack_frame(status).pop("stack")
row["status"] = status.pop("status")
row["parent_error_id"] = status.pop("parent_error_id")
row["favorite"] = status.pop("favorite")
row["viewed"] = status.pop("viewed")
row["last_hydrated_session"] = status
else:
row["stack"] = []
row["last_hydrated_session"] = None
row["status"] = "untracked"
row["parent_error_id"] = None
row["favorite"] = False
row["viewed"] = False
return {"data": helper.dict_to_camel_case(row)}

View file

@ -7,7 +7,7 @@ import schemas
from chalicelib.core import issues
from chalicelib.core.errors import errors
from chalicelib.core.metrics import heatmaps, product_analytics, funnels, custom_metrics_predefined
from chalicelib.core.sessions import sessions
from chalicelib.core.sessions import sessions, sessions_search
from chalicelib.utils import helper, pg_client
from chalicelib.utils.TimeUTC import TimeUTC
@ -52,7 +52,7 @@ def __get_sessions_list(project: schemas.ProjectContext, user_id, data: schemas.
"total": 0,
"sessions": []
}
return sessions.search_sessions(data=data.series[0].filter, project_id=project.project_id, user_id=user_id)
return sessions_search.search_sessions(data=data.series[0].filter, project_id=project.project_id, user_id=user_id)
def __get_heat_map_chart(project: schemas.ProjectContext, user_id, data: schemas.CardHeatMap,
@ -174,7 +174,7 @@ def get_sessions_by_card_id(project_id, user_id, metric_id, data: schemas.CardSe
results = []
for s in data.series:
results.append({"seriesId": s.series_id, "seriesName": s.name,
**sessions.search_sessions(data=s.filter, project_id=project_id, user_id=user_id)})
**sessions_search.search_sessions(data=s.filter, project_id=project_id, user_id=user_id)})
return results
@ -189,7 +189,7 @@ def get_sessions(project_id, user_id, data: schemas.CardSessionsSchema):
s.filter = schemas.SessionsSearchPayloadSchema(**s.filter.model_dump(by_alias=True))
results.append({"seriesId": None, "seriesName": s.name,
**sessions.search_sessions(data=s.filter, project_id=project_id, user_id=user_id)})
**sessions_search.search_sessions(data=s.filter, project_id=project_id, user_id=user_id)})
return results

View file

@ -1,4 +1,4 @@
from chalicelib.core import sourcemaps
from chalicelib.core.sourcemaps import sourcemaps
def format_first_stack_frame(error):

View file

@ -147,7 +147,12 @@ class PostgresClient:
logger.error(f"!!! Error of type:{type(error)} while executing query:")
logger.error(query)
logger.info("starting rollback to allow future execution")
self.connection.rollback()
try:
self.connection.rollback()
except psycopg2.InterfaceError as e:
logger.error("!!! Error while rollbacking connection", e)
logger.error("!!! Trying to recreate the cursor")
self.recreate_cursor()
raise error
return result

View file

@ -1,19 +1,19 @@
urllib3==2.2.3
requests==2.32.3
boto3==1.35.76
boto3==1.35.86
pyjwt==2.10.1
psycopg2-binary==2.9.10
psycopg[pool,binary]==3.2.3
clickhouse-driver[lz4]==0.2.9
clickhouse-connect==0.8.9
elasticsearch==8.16.0
clickhouse-connect==0.8.11
elasticsearch==8.17.0
jira==3.8.0
cachetools==5.5.0
fastapi==0.115.6
uvicorn[standard]==0.32.1
uvicorn[standard]==0.34.0
python-decouple==3.8
pydantic[email]==2.10.3
pydantic[email]==2.10.4
apscheduler==3.11.0

View file

@ -1,21 +1,21 @@
urllib3==2.2.3
requests==2.32.3
boto3==1.35.76
boto3==1.35.86
pyjwt==2.10.1
psycopg2-binary==2.9.10
psycopg[pool,binary]==3.2.3
clickhouse-driver[lz4]==0.2.9
clickhouse-connect==0.8.9
elasticsearch==8.16.0
clickhouse-connect==0.8.11
elasticsearch==8.17.0
jira==3.8.0
cachetools==5.5.0
fastapi==0.115.6
uvicorn[standard]==0.32.1
uvicorn[standard]==0.34.0
python-decouple==3.8
pydantic[email]==2.10.3
pydantic[email]==2.10.4
apscheduler==3.11.0
redis==5.2.1

View file

@ -8,9 +8,9 @@ from starlette.responses import RedirectResponse, FileResponse, JSONResponse, Re
import schemas
from chalicelib.core import scope
from chalicelib.core import errors, assist, signup, feature_flags
from chalicelib.core import assist, signup, feature_flags
from chalicelib.core.metrics import heatmaps
from chalicelib.core.errors import errors_favorite, errors_viewed
from chalicelib.core.errors import errors_favorite, errors_viewed, errors, errors_details
from chalicelib.core.sessions import sessions, sessions_notes, sessions_replay, sessions_favorite, sessions_viewed, \
sessions_assignments, unprocessed_sessions, sessions_search
from chalicelib.core import tenants, users, projects, license
@ -331,8 +331,8 @@ def get_error_trace(projectId: int, sessionId: int, errorId: str,
@app.get('/{projectId}/errors/{errorId}', tags=['errors'])
def errors_get_details(projectId: int, errorId: str, background_tasks: BackgroundTasks, density24: int = 24,
density30: int = 30, context: schemas.CurrentContext = Depends(OR_context)):
data = errors.get_details(project_id=projectId, user_id=context.user_id, error_id=errorId,
**{"density24": density24, "density30": density30})
data = errors_details.get_details(project_id=projectId, user_id=context.user_id, error_id=errorId,
**{"density24": density24, "density30": density30})
if data.get("data") is not None:
background_tasks.add_task(errors_viewed.viewed_error, project_id=projectId, user_id=context.user_id,
error_id=errorId)

View file

@ -37,7 +37,7 @@ func main() {
if err != nil {
log.Fatal(ctx, "failed while creating router: %s", err)
}
router.AddHandlers(api.NoPrefix, builder.AnalyticsAPI)
router.AddHandlers(api.NoPrefix, builder.CardsAPI, builder.DashboardsAPI, builder.ChartsAPI)
router.AddMiddlewares(builder.Auth.Middleware, builder.RateLimiter.Middleware, builder.AuditTrail.Middleware)
server.Run(ctx, log, &cfg.HTTP, router)

View file

@ -1,92 +0,0 @@
package api
import (
"time"
)
// CardBase Common fields for the Card entity
type CardBase struct {
Name string `json:"name" validate:"required"`
IsPublic bool `json:"isPublic" validate:"omitempty"`
DefaultConfig map[string]any `json:"defaultConfig"`
Thumbnail *string `json:"thumbnail" validate:"omitempty,url"`
MetricType string `json:"metricType" validate:"required,oneof=timeseries table funnel"`
MetricOf string `json:"metricOf" validate:"required,oneof=session_count user_count"`
MetricFormat string `json:"metricFormat" validate:"required,oneof=default percentage"`
ViewType string `json:"viewType" validate:"required,oneof=line_chart table_view"`
MetricValue []string `json:"metricValue" validate:"omitempty"`
SessionID *int64 `json:"sessionId" validate:"omitempty"`
Series []CardSeries `json:"series" validate:"required,dive"`
}
// Card Fields specific to database operations
type Card struct {
CardBase
ProjectID int64 `json:"projectId" validate:"required"`
UserID int64 `json:"userId" validate:"required"`
CardID int64 `json:"cardId"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
DeletedAt *time.Time `json:"deleted_at,omitempty"`
EditedAt *time.Time `json:"edited_at,omitempty"`
}
type CardSeries struct {
SeriesID int64 `json:"seriesId" validate:"omitempty"`
MetricID int64 `json:"metricId" validate:"omitempty"`
Name string `json:"name" validate:"required"`
CreatedAt time.Time `json:"createdAt" validate:"omitempty"`
DeletedAt *time.Time `json:"deletedAt" validate:"omitempty"`
Index int64 `json:"index" validate:"required"`
Filter SeriesFilter `json:"filter"`
}
type SeriesFilter struct {
EventOrder string `json:"eventOrder" validate:"required,oneof=then or and"`
Filters []FilterItem `json:"filters"`
}
type FilterItem struct {
Type string `json:"type" validate:"required"`
Operator string `json:"operator" validate:"required"`
Source string `json:"source" validate:"required"`
SourceOperator string `json:"sourceOperator" validate:"required"`
Value []string `json:"value" validate:"required,dive,required"`
IsEvent bool `json:"isEvent"`
}
// CardCreateRequest Fields required for creating a card (from the frontend)
type CardCreateRequest struct {
CardBase
}
type CardGetResponse struct {
Card
}
type CardUpdateRequest struct {
CardBase
}
type GetCardsResponse struct {
Cards []Card `json:"cards"`
Total int64 `json:"total"`
}
type DataPoint struct {
Timestamp int64 `json:"timestamp"`
Series map[string]int64 `json:"series"`
}
type GetCardChartDataRequest struct {
ProjectID int64 `json:"projectId" validate:"required"`
MetricType string `json:"metricType" validate:"required,oneof=timeseries table funnel"`
MetricOf string `json:"metricOf" validate:"required,oneof=session_count user_count"`
MetricFormat string `json:"metricFormat" validate:"required,oneof=default percentage"`
SessionID int64 `json:"sessionId" validate:"required"`
Series []CardSeries `json:"series"`
}
type GetCardChartDataResponse struct {
Data []DataPoint `json:"data"`
}

View file

@ -1,64 +0,0 @@
package api
import (
"fmt"
"net/http"
"strconv"
"github.com/gorilla/mux"
config "openreplay/backend/internal/config/analytics"
"openreplay/backend/pkg/analytics/service"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/server/api"
)
type handlersImpl struct {
log logger.Logger
responser *api.Responser
jsonSizeLimit int64
service service.Service
}
func (e *handlersImpl) GetAll() []*api.Description {
return []*api.Description{
{"/v1/analytics/{projectId}/dashboards", e.createDashboard, "POST"},
{"/v1/analytics/{projectId}/dashboards", e.getDashboards, "GET"},
{"/v1/analytics/{projectId}/dashboards/{id}", e.getDashboard, "GET"},
{"/v1/analytics/{projectId}/dashboards/{id}", e.updateDashboard, "PUT"},
{"/v1/analytics/{projectId}/dashboards/{id}", e.deleteDashboard, "DELETE"},
{"/v1/analytics/{projectId}/dashboards/{id}/cards", e.addCardToDashboard, "POST"},
{"/v1/analytics/{projectId}/dashboards/{id}/cards/{cardId}", e.removeCardFromDashboard, "DELETE"},
{"/v1/analytics/{projectId}/cards", e.createCard, "POST"},
{"/v1/analytics/{projectId}/cards", e.getCardsPaginated, "GET"},
{"/v1/analytics/{projectId}/cards/{id}", e.getCard, "GET"},
{"/v1/analytics/{projectId}/cards/{id}", e.updateCard, "PUT"},
{"/v1/analytics/{projectId}/cards/{id}", e.deleteCard, "DELETE"},
{"/v1/analytics/{projectId}/cards/{id}/chart", e.getCardChartData, "POST"},
{"/v1/analytics/{projectId}/cards/{id}/try", e.getCardChartData, "POST"},
}
}
func NewHandlers(log logger.Logger, cfg *config.Config, responser *api.Responser, service service.Service) (api.Handlers, error) {
return &handlersImpl{
log: log,
responser: responser,
jsonSizeLimit: cfg.JsonSizeLimit,
service: service,
}, nil
}
func getIDFromRequest(r *http.Request, key string) (int, error) {
vars := mux.Vars(r)
idStr := vars[key]
if idStr == "" {
return 0, fmt.Errorf("missing %s in request", key)
}
id, err := strconv.Atoi(idStr)
if err != nil {
return 0, fmt.Errorf("invalid %s format", key)
}
return id, nil
}

View file

@ -1,11 +1,12 @@
package analytics
import (
"openreplay/backend/pkg/analytics/charts"
"time"
"openreplay/backend/internal/config/analytics"
analyticsAPI "openreplay/backend/pkg/analytics/api"
"openreplay/backend/pkg/analytics/service"
"openreplay/backend/pkg/analytics/cards"
"openreplay/backend/pkg/analytics/dashboards"
"openreplay/backend/pkg/db/postgres/pool"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/metrics/web"
@ -16,34 +17,50 @@ import (
)
type ServicesBuilder struct {
Auth auth.Auth
RateLimiter *limiter.UserRateLimiter
AuditTrail tracer.Tracer
AnalyticsAPI api.Handlers
Auth auth.Auth
RateLimiter *limiter.UserRateLimiter
AuditTrail tracer.Tracer
CardsAPI api.Handlers
DashboardsAPI api.Handlers
ChartsAPI api.Handlers
}
func NewServiceBuilder(log logger.Logger, cfg *analytics.Config, webMetrics web.Web, pgconn pool.Pool) (*ServicesBuilder, error) {
responser := api.NewResponser(webMetrics)
audiTrail, err := tracer.NewTracer(log, pgconn)
if err != nil {
return nil, err
}
analyticsService, err := service.NewService(log, pgconn)
cardsService, err := cards.New(log, pgconn)
if err != nil {
return nil, err
}
handlers, err := analyticsAPI.NewHandlers(log, cfg, responser, analyticsService)
cardsHandlers, err := cards.NewHandlers(log, cfg, responser, cardsService)
if err != nil {
return nil, err
}
dashboardsService, err := dashboards.New(log, pgconn)
if err != nil {
return nil, err
}
dashboardsHandlers, err := dashboards.NewHandlers(log, cfg, responser, dashboardsService)
if err != nil {
return nil, err
}
chartsService, err := charts.New(log, pgconn)
if err != nil {
return nil, err
}
chartsHandlers, err := charts.NewHandlers(log, cfg, responser, chartsService)
if err != nil {
return nil, err
}
return &ServicesBuilder{
Auth: auth.NewAuth(log, cfg.JWTSecret, cfg.JWTSpotSecret, pgconn, nil),
RateLimiter: limiter.NewUserRateLimiter(10, 30, 1*time.Minute, 5*time.Minute),
AuditTrail: audiTrail,
AnalyticsAPI: handlers,
Auth: auth.NewAuth(log, cfg.JWTSecret, cfg.JWTSpotSecret, pgconn, nil),
RateLimiter: limiter.NewUserRateLimiter(10, 30, 1*time.Minute, 5*time.Minute),
AuditTrail: audiTrail,
CardsAPI: cardsHandlers,
DashboardsAPI: dashboardsHandlers,
ChartsAPI: chartsHandlers,
}, nil
}

View file

@ -1,16 +1,41 @@
package service
package cards
import (
"context"
"encoding/json"
"fmt"
"strings"
"github.com/jackc/pgx/v4"
"github.com/lib/pq"
"openreplay/backend/pkg/analytics/api/models"
"strings"
"openreplay/backend/pkg/db/postgres/pool"
"openreplay/backend/pkg/logger"
)
func (s *serviceImpl) CreateCard(projectId int, userID uint64, req *models.CardCreateRequest) (*models.CardGetResponse, error) {
type Cards interface {
Create(projectId int, userId uint64, req *CardCreateRequest) (*CardGetResponse, error)
Get(projectId int, cardId int) (*CardGetResponse, error)
GetWithSeries(projectId int, cardId int) (*CardGetResponse, error)
GetAll(projectId int) (*GetCardsResponse, error)
GetAllPaginated(projectId int, filters CardListFilter, sort CardListSort, limit int, offset int) (*GetCardsResponsePaginated, error)
Update(projectId int, cardId int64, userId uint64, req *CardUpdateRequest) (*CardGetResponse, error)
Delete(projectId int, cardId int64, userId uint64) error
}
type cardsImpl struct {
log logger.Logger
pgconn pool.Pool
}
func New(log logger.Logger, conn pool.Pool) (Cards, error) {
return &cardsImpl{
log: log,
pgconn: conn,
}, nil
}
func (s *cardsImpl) Create(projectId int, userID uint64, req *CardCreateRequest) (*CardGetResponse, error) {
if req.MetricValue == nil {
req.MetricValue = []string{}
}
@ -41,7 +66,7 @@ func (s *serviceImpl) CreateCard(projectId int, userID uint64, req *models.CardC
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
RETURNING metric_id, project_id, user_id, name, metric_type, view_type, metric_of, metric_value, metric_format, is_public, created_at, edited_at`
card := &models.CardGetResponse{}
card := &CardGetResponse{}
err = tx.QueryRow(
ctx, sql,
projectId, userID, req.Name, req.MetricType, req.ViewType, req.MetricOf, req.MetricValue, req.MetricFormat, req.IsPublic,
@ -73,7 +98,7 @@ func (s *serviceImpl) CreateCard(projectId int, userID uint64, req *models.CardC
return card, nil
}
func (s *serviceImpl) CreateSeries(ctx context.Context, tx pgx.Tx, metricId int64, series []models.CardSeriesBase) []models.CardSeries {
func (s *cardsImpl) CreateSeries(ctx context.Context, tx pgx.Tx, metricId int64, series []CardSeriesBase) []CardSeries {
if len(series) == 0 {
return nil // No series to create
}
@ -114,9 +139,9 @@ func (s *serviceImpl) CreateSeries(ctx context.Context, tx pgx.Tx, metricId int6
}
// Collect inserted series
var seriesList []models.CardSeries
var seriesList []CardSeries
for rows.Next() {
cardSeries := models.CardSeries{}
cardSeries := CardSeries{}
if err := rows.Scan(&cardSeries.SeriesID, &cardSeries.MetricID, &cardSeries.Name, &cardSeries.Index, &cardSeries.Filter); err != nil {
s.log.Error(ctx, "failed to scan series: %v", err)
continue
@ -127,13 +152,13 @@ func (s *serviceImpl) CreateSeries(ctx context.Context, tx pgx.Tx, metricId int6
return seriesList
}
func (s *serviceImpl) GetCard(projectId int, cardID int) (*models.CardGetResponse, error) {
func (s *cardsImpl) Get(projectId int, cardID int) (*CardGetResponse, error) {
sql :=
`SELECT metric_id, project_id, user_id, name, metric_type, view_type, metric_of, metric_value, metric_format, is_public, created_at, edited_at
FROM public.metrics
WHERE metric_id = $1 AND project_id = $2 AND deleted_at IS NULL`
card := &models.CardGetResponse{}
card := &CardGetResponse{}
err := s.pgconn.QueryRow(sql, cardID, projectId).Scan(
&card.CardID, &card.ProjectID, &card.UserID, &card.Name, &card.MetricType, &card.ViewType, &card.MetricOf, &card.MetricValue, &card.MetricFormat, &card.IsPublic, &card.CreatedAt, &card.EditedAt,
)
@ -145,7 +170,7 @@ func (s *serviceImpl) GetCard(projectId int, cardID int) (*models.CardGetRespons
return card, nil
}
func (s *serviceImpl) GetCardWithSeries(projectId int, cardID int) (*models.CardGetResponse, error) {
func (s *cardsImpl) GetWithSeries(projectId int, cardID int) (*CardGetResponse, error) {
sql := `
SELECT m.metric_id, m.project_id, m.user_id, m.name, m.metric_type, m.view_type, m.metric_of,
m.metric_value, m.metric_format, m.is_public, m.created_at, m.edited_at,
@ -166,7 +191,7 @@ func (s *serviceImpl) GetCardWithSeries(projectId int, cardID int) (*models.Card
m.metric_of, m.metric_value, m.metric_format, m.is_public, m.created_at, m.edited_at
`
card := &models.CardGetResponse{}
card := &CardGetResponse{}
var seriesJSON []byte
err := s.pgconn.QueryRow(sql, cardID, projectId).Scan(
&card.CardID, &card.ProjectID, &card.UserID, &card.Name, &card.MetricType, &card.ViewType, &card.MetricOf,
@ -184,7 +209,7 @@ func (s *serviceImpl) GetCardWithSeries(projectId int, cardID int) (*models.Card
return card, nil
}
func (s *serviceImpl) GetCards(projectId int) (*models.GetCardsResponse, error) {
func (s *cardsImpl) GetAll(projectId int) (*GetCardsResponse, error) {
sql := `
SELECT metric_id, project_id, user_id, name, metric_type, view_type, metric_of, metric_value, metric_format, is_public, created_at, edited_at
FROM public.metrics
@ -196,9 +221,9 @@ func (s *serviceImpl) GetCards(projectId int) (*models.GetCardsResponse, error)
}
defer rows.Close()
cards := make([]models.Card, 0)
cards := make([]Card, 0)
for rows.Next() {
card := models.Card{}
card := Card{}
if err := rows.Scan(
&card.CardID, &card.ProjectID, &card.UserID, &card.Name, &card.MetricType, &card.ViewType, &card.MetricOf,
&card.MetricValue, &card.MetricFormat, &card.IsPublic, &card.CreatedAt, &card.EditedAt,
@ -208,21 +233,21 @@ func (s *serviceImpl) GetCards(projectId int) (*models.GetCardsResponse, error)
cards = append(cards, card)
}
return &models.GetCardsResponse{Cards: cards}, nil
return &GetCardsResponse{Cards: cards}, nil
}
func (s *serviceImpl) GetCardsPaginated(
func (s *cardsImpl) GetAllPaginated(
projectId int,
filters models.CardListFilter,
sort models.CardListSort,
filters CardListFilter,
sort CardListSort,
limit,
offset int,
) (*models.GetCardsResponsePaginated, error) {
) (*GetCardsResponsePaginated, error) {
// Validate inputs
if err := models.ValidateStruct(filters); err != nil {
if err := ValidateStruct(filters); err != nil {
return nil, fmt.Errorf("invalid filters: %w", err)
}
if err := models.ValidateStruct(sort); err != nil {
if err := ValidateStruct(sort); err != nil {
return nil, fmt.Errorf("invalid sort: %w", err)
}
@ -289,9 +314,9 @@ func (s *serviceImpl) GetCardsPaginated(
}
defer rows.Close()
var cards []models.Card
var cards []Card
for rows.Next() {
var card models.Card
var card Card
if err := rows.Scan(
&card.CardID, &card.ProjectID, &card.UserID, &card.Name, &card.MetricType, &card.ViewType, &card.MetricOf,
&card.MetricValue, &card.MetricFormat, &card.IsPublic, &card.CreatedAt, &card.EditedAt,
@ -315,13 +340,13 @@ func (s *serviceImpl) GetCardsPaginated(
return nil, fmt.Errorf("failed to get total count: %w", err)
}
return &models.GetCardsResponsePaginated{
return &GetCardsResponsePaginated{
Cards: cards,
Total: total,
}, nil
}
func (s *serviceImpl) UpdateCard(projectId int, cardID int64, userID uint64, req *models.CardUpdateRequest) (*models.CardGetResponse, error) {
func (s *cardsImpl) Update(projectId int, cardID int64, userID uint64, req *CardUpdateRequest) (*CardGetResponse, error) {
if req.MetricValue == nil {
req.MetricValue = []string{}
}
@ -353,7 +378,7 @@ func (s *serviceImpl) UpdateCard(projectId int, cardID int64, userID uint64, req
WHERE metric_id = $8 AND project_id = $9 AND deleted_at IS NULL
RETURNING metric_id, project_id, user_id, name, metric_type, view_type, metric_of, metric_value, metric_format, is_public, created_at, edited_at`
card := &models.CardGetResponse{}
card := &CardGetResponse{}
err = tx.QueryRow(ctx, sql,
req.Name, req.MetricType, req.ViewType, req.MetricOf, req.MetricValue, req.MetricFormat, req.IsPublic, cardID, projectId,
).Scan(
@ -380,7 +405,7 @@ func (s *serviceImpl) UpdateCard(projectId int, cardID int64, userID uint64, req
return card, nil
}
func (s *serviceImpl) DeleteCardSeries(cardId int64) error {
func (s *cardsImpl) DeleteCardSeries(cardId int64) error {
sql := `DELETE FROM public.metric_series WHERE metric_id = $1`
err := s.pgconn.Exec(sql, cardId)
if err != nil {
@ -389,7 +414,7 @@ func (s *serviceImpl) DeleteCardSeries(cardId int64) error {
return nil
}
func (s *serviceImpl) DeleteCard(projectId int, cardID int64, userID uint64) error {
func (s *cardsImpl) Delete(projectId int, cardID int64, userID uint64) error {
sql := `
UPDATE public.metrics
SET deleted_at = now()
@ -401,28 +426,3 @@ func (s *serviceImpl) DeleteCard(projectId int, cardID int64, userID uint64) err
}
return nil
}
func (s *serviceImpl) GetCardChartData(projectId int, userID uint64, req *models.GetCardChartDataRequest) ([]models.DataPoint, error) {
jsonInput := `
{
"data": [
{
"timestamp": 1733934939000,
"Series A": 100,
"Series B": 200
},
{
"timestamp": 1733935939000,
"Series A": 150,
"Series B": 250
}
]
}`
var resp models.GetCardChartDataResponse
if err := json.Unmarshal([]byte(jsonInput), &resp); err != nil {
return nil, fmt.Errorf("failed to unmarshal response: %w", err)
}
return resp.Data, nil
}

View file

@ -1,18 +1,62 @@
package api
package cards
import (
"encoding/json"
"fmt"
"net/http"
"openreplay/backend/pkg/analytics/api/models"
"openreplay/backend/pkg/server/api"
"openreplay/backend/pkg/server/user"
"strconv"
"time"
"github.com/go-playground/validator/v10"
"github.com/gorilla/mux"
config "openreplay/backend/internal/config/analytics"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/server/api"
"openreplay/backend/pkg/server/user"
)
func getIDFromRequest(r *http.Request, key string) (int, error) {
vars := mux.Vars(r)
idStr := vars[key]
if idStr == "" {
return 0, fmt.Errorf("missing %s in request", key)
}
id, err := strconv.Atoi(idStr)
if err != nil {
return 0, fmt.Errorf("invalid %s format", key)
}
return id, nil
}
type handlersImpl struct {
log logger.Logger
responser *api.Responser
jsonSizeLimit int64
cards Cards
}
func (e *handlersImpl) GetAll() []*api.Description {
return []*api.Description{
{"/v1/analytics/{projectId}/cards", e.createCard, "POST"},
{"/v1/analytics/{projectId}/cards", e.getCardsPaginated, "GET"},
{"/v1/analytics/{projectId}/cards/{id}", e.getCard, "GET"},
{"/v1/analytics/{projectId}/cards/{id}", e.updateCard, "PUT"},
{"/v1/analytics/{projectId}/cards/{id}", e.deleteCard, "DELETE"},
}
}
func NewHandlers(log logger.Logger, cfg *config.Config, responser *api.Responser, cards Cards) (api.Handlers, error) {
return &handlersImpl{
log: log,
responser: responser,
jsonSizeLimit: cfg.JsonSizeLimit,
cards: cards,
}, nil
}
func (e *handlersImpl) createCard(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
@ -24,7 +68,7 @@ func (e *handlersImpl) createCard(w http.ResponseWriter, r *http.Request) {
}
bodySize = len(bodyBytes)
req := &models.CardCreateRequest{}
req := &CardCreateRequest{}
if err := json.Unmarshal(bodyBytes, req); err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
@ -44,7 +88,7 @@ func (e *handlersImpl) createCard(w http.ResponseWriter, r *http.Request) {
}
currentUser := r.Context().Value("userData").(*user.User)
resp, err := e.service.CreateCard(projectID, currentUser.ID, req)
resp, err := e.cards.Create(projectID, currentUser.ID, req)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
@ -70,7 +114,7 @@ func (e *handlersImpl) getCard(w http.ResponseWriter, r *http.Request) {
return
}
resp, err := e.service.GetCardWithSeries(projectID, id)
resp, err := e.cards.GetWithSeries(projectID, id)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
@ -90,7 +134,7 @@ func (e *handlersImpl) getCards(w http.ResponseWriter, r *http.Request) {
}
//currentUser := r.Context().Value("userData").(*user.User)
resp, err := e.service.GetCards(projectID)
resp, err := e.cards.GetAll(projectID)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
@ -114,7 +158,7 @@ func (e *handlersImpl) getCardsPaginated(w http.ResponseWriter, r *http.Request)
query := r.URL.Query()
// Filters
filters := models.CardListFilter{
filters := CardListFilter{
Filters: make(map[string]interface{}),
}
@ -136,7 +180,7 @@ func (e *handlersImpl) getCardsPaginated(w http.ResponseWriter, r *http.Request)
}
// Sorting
sort := models.CardListSort{
sort := CardListSort{
Field: query.Get("sort_field"),
Order: query.Get("sort_order"),
}
@ -163,17 +207,17 @@ func (e *handlersImpl) getCardsPaginated(w http.ResponseWriter, r *http.Request)
offset := (page - 1) * limit
// Validate inputs
if err := models.ValidateStruct(filters); err != nil {
if err := ValidateStruct(filters); err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, fmt.Errorf("invalid filters: %w", err), startTime, r.URL.Path, bodySize)
return
}
if err := models.ValidateStruct(sort); err != nil {
if err := ValidateStruct(sort); err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, fmt.Errorf("invalid sort: %w", err), startTime, r.URL.Path, bodySize)
return
}
// Call the service
resp, err := e.service.GetCardsPaginated(projectID, filters, sort, limit, offset)
resp, err := e.cards.GetAllPaginated(projectID, filters, sort, limit, offset)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
@ -206,7 +250,7 @@ func (e *handlersImpl) updateCard(w http.ResponseWriter, r *http.Request) {
}
bodySize = len(bodyBytes)
req := &models.CardUpdateRequest{}
req := &CardUpdateRequest{}
if err := json.Unmarshal(bodyBytes, req); err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
@ -220,7 +264,7 @@ func (e *handlersImpl) updateCard(w http.ResponseWriter, r *http.Request) {
}
currentUser := r.Context().Value("userData").(*user.User)
resp, err := e.service.UpdateCard(projectID, int64(cardId), currentUser.ID, req)
resp, err := e.cards.Update(projectID, int64(cardId), currentUser.ID, req)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
@ -246,7 +290,7 @@ func (e *handlersImpl) deleteCard(w http.ResponseWriter, r *http.Request) {
}
currentUser := r.Context().Value("userData").(*user.User)
err = e.service.DeleteCard(projectID, int64(cardId), currentUser.ID)
err = e.cards.Delete(projectID, int64(cardId), currentUser.ID)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
@ -254,43 +298,3 @@ func (e *handlersImpl) deleteCard(w http.ResponseWriter, r *http.Request) {
e.responser.ResponseWithJSON(e.log, r.Context(), w, nil, startTime, r.URL.Path, bodySize)
}
func (e *handlersImpl) getCardChartData(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
projectID, err := getIDFromRequest(r, "projectId")
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
bodyBytes, err := api.ReadBody(e.log, w, r, e.jsonSizeLimit)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
return
}
bodySize = len(bodyBytes)
req := &models.GetCardChartDataRequest{}
if err := json.Unmarshal(bodyBytes, req); err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
validate := validator.New()
err = validate.Struct(req)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
currentUser := r.Context().Value("userData").(*user.User)
resp, err := e.service.GetCardChartData(projectID, currentUser.ID, req)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
e.responser.ResponseWithJSON(e.log, r.Context(), w, resp, startTime, r.URL.Path, bodySize)
}

View file

@ -1,4 +1,4 @@
package models
package cards
import (
"github.com/go-playground/validator/v10"
@ -85,24 +85,6 @@ type GetCardsResponsePaginated struct {
Total int `json:"total"`
}
type DataPoint struct {
Timestamp int64 `json:"timestamp"`
Series map[string]int64 `json:"series"`
}
type GetCardChartDataRequest struct {
MetricType string `json:"metricType" validate:"required,oneof=timeseries table funnel"`
MetricOf string `json:"metricOf" validate:"required,oneof=session_count user_count"`
ViewType string `json:"viewType" validate:"required,oneof=line_chart table_view"`
MetricFormat string `json:"metricFormat" validate:"required,oneof=default percentage"`
SessionID int64 `json:"sessionId"`
Series []CardSeries `json:"series" validate:"required,dive"`
}
type GetCardChartDataResponse struct {
Data []DataPoint `json:"data"`
}
/************************************************************
* CardListFilter and Sorter
*/

View file

@ -0,0 +1,50 @@
package charts
import (
"encoding/json"
"fmt"
"openreplay/backend/pkg/db/postgres/pool"
"openreplay/backend/pkg/logger"
)
type Charts interface {
GetData(projectId int, userId uint64, req *GetCardChartDataRequest) ([]DataPoint, error)
}
type chartsImpl struct {
log logger.Logger
pgconn pool.Pool
}
func New(log logger.Logger, conn pool.Pool) (Charts, error) {
return &chartsImpl{
log: log,
pgconn: conn,
}, nil
}
func (s *chartsImpl) GetData(projectId int, userID uint64, req *GetCardChartDataRequest) ([]DataPoint, error) {
jsonInput := `
{
"data": [
{
"timestamp": 1733934939000,
"Series A": 100,
"Series B": 200
},
{
"timestamp": 1733935939000,
"Series A": 150,
"Series B": 250
}
]
}`
var resp GetCardChartDataResponse
if err := json.Unmarshal([]byte(jsonInput), &resp); err != nil {
return nil, fmt.Errorf("failed to unmarshal response: %w", err)
}
return resp.Data, nil
}

View file

@ -0,0 +1,95 @@
package charts
import (
"encoding/json"
"fmt"
"net/http"
"strconv"
"time"
"github.com/go-playground/validator/v10"
"github.com/gorilla/mux"
config "openreplay/backend/internal/config/analytics"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/server/api"
"openreplay/backend/pkg/server/user"
)
func getIDFromRequest(r *http.Request, key string) (int, error) {
vars := mux.Vars(r)
idStr := vars[key]
if idStr == "" {
return 0, fmt.Errorf("missing %s in request", key)
}
id, err := strconv.Atoi(idStr)
if err != nil {
return 0, fmt.Errorf("invalid %s format", key)
}
return id, nil
}
type handlersImpl struct {
log logger.Logger
responser *api.Responser
jsonSizeLimit int64
charts Charts
}
func (e *handlersImpl) GetAll() []*api.Description {
return []*api.Description{
{"/v1/analytics/{projectId}/cards/{id}/chart", e.getCardChartData, "POST"},
{"/v1/analytics/{projectId}/cards/{id}/try", e.getCardChartData, "POST"},
}
}
func NewHandlers(log logger.Logger, cfg *config.Config, responser *api.Responser, charts Charts) (api.Handlers, error) {
return &handlersImpl{
log: log,
responser: responser,
jsonSizeLimit: cfg.JsonSizeLimit,
charts: charts,
}, nil
}
func (e *handlersImpl) getCardChartData(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
projectID, err := getIDFromRequest(r, "projectId")
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
bodyBytes, err := api.ReadBody(e.log, w, r, e.jsonSizeLimit)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
return
}
bodySize = len(bodyBytes)
req := &GetCardChartDataRequest{}
if err := json.Unmarshal(bodyBytes, req); err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
validate := validator.New()
err = validate.Struct(req)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
currentUser := r.Context().Value("userData").(*user.User)
resp, err := e.charts.GetData(projectID, currentUser.ID, req)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
e.responser.ResponseWithJSON(e.log, r.Context(), w, resp, startTime, r.URL.Path, bodySize)
}

View file

@ -0,0 +1,21 @@
package charts
import "openreplay/backend/pkg/analytics/cards"
type DataPoint struct {
Timestamp int64 `json:"timestamp"`
Series map[string]int64 `json:"series"`
}
type GetCardChartDataRequest struct {
MetricType string `json:"metricType" validate:"required,oneof=timeseries table funnel"`
MetricOf string `json:"metricOf" validate:"required,oneof=session_count user_count"`
ViewType string `json:"viewType" validate:"required,oneof=line_chart table_view"`
MetricFormat string `json:"metricFormat" validate:"required,oneof=default percentage"`
SessionID int64 `json:"sessionId"`
Series []cards.CardSeries `json:"series" validate:"required,dive"`
}
type GetCardChartDataResponse struct {
Data []DataPoint `json:"data"`
}

View file

@ -1,21 +1,45 @@
package service
package dashboards
import (
"context"
"encoding/json"
"errors"
"fmt"
"openreplay/backend/pkg/analytics/api/models"
"openreplay/backend/pkg/db/postgres/pool"
"openreplay/backend/pkg/logger"
)
// CreateDashboard Create a new dashboard
func (s *serviceImpl) CreateDashboard(projectId int, userID uint64, req *models.CreateDashboardRequest) (*models.GetDashboardResponse, error) {
type Dashboards interface {
Create(projectId int, userId uint64, req *CreateDashboardRequest) (*GetDashboardResponse, error)
Get(projectId int, dashboardId int, userId uint64) (*GetDashboardResponse, error)
GetAll(projectId int, userId uint64) (*GetDashboardsResponse, error)
GetAllPaginated(projectId int, userId uint64, req *GetDashboardsRequest) (*GetDashboardsResponsePaginated, error)
Update(projectId int, dashboardId int, userId uint64, req *UpdateDashboardRequest) (*GetDashboardResponse, error)
Delete(projectId int, dashboardId int, userId uint64) error
AddCards(projectId int, dashboardId int, userId uint64, req *AddCardToDashboardRequest) error
DeleteCard(dashboardId int, cardId int) error
}
type dashboardsImpl struct {
log logger.Logger
pgconn pool.Pool
}
func New(log logger.Logger, conn pool.Pool) (Dashboards, error) {
return &dashboardsImpl{
log: log,
pgconn: conn,
}, nil
}
func (s *dashboardsImpl) Create(projectId int, userID uint64, req *CreateDashboardRequest) (*GetDashboardResponse, error) {
sql := `
INSERT INTO dashboards (project_id, user_id, name, description, is_public, is_pinned)
VALUES ($1, $2, $3, $4, $5, $6)
RETURNING dashboard_id, project_id, user_id, name, description, is_public, is_pinned`
dashboard := &models.GetDashboardResponse{}
dashboard := &GetDashboardResponse{}
err := s.pgconn.QueryRow(sql, projectId, userID, req.Name, req.Description, req.IsPublic, req.IsPinned).Scan(
&dashboard.DashboardID,
&dashboard.ProjectID,
@ -31,8 +55,7 @@ func (s *serviceImpl) CreateDashboard(projectId int, userID uint64, req *models.
return dashboard, nil
}
// GetDashboard Fetch a specific dashboard by ID
func (s *serviceImpl) GetDashboard(projectId int, dashboardID int, userID uint64) (*models.GetDashboardResponse, error) {
func (s *dashboardsImpl) Get(projectId int, dashboardID int, userID uint64) (*GetDashboardResponse, error) {
sql := `
WITH series_agg AS (
SELECT
@ -75,7 +98,7 @@ func (s *serviceImpl) GetDashboard(projectId int, dashboardID int, userID uint64
WHERE d.dashboard_id = $1 AND d.project_id = $2 AND d.deleted_at IS NULL
GROUP BY d.dashboard_id, d.project_id, d.name, d.description, d.is_public, d.is_pinned, d.user_id`
dashboard := &models.GetDashboardResponse{}
dashboard := &GetDashboardResponse{}
var ownerID int
var metricsJSON []byte
@ -108,7 +131,7 @@ func (s *serviceImpl) GetDashboard(projectId int, dashboardID int, userID uint64
return dashboard, nil
}
func (s *serviceImpl) GetDashboards(projectId int, userID uint64) (*models.GetDashboardsResponse, error) {
func (s *dashboardsImpl) GetAll(projectId int, userID uint64) (*GetDashboardsResponse, error) {
sql := `
SELECT d.dashboard_id, d.user_id, d.project_id, d.name, d.description, d.is_public, d.is_pinned, u.email AS owner_email, u.name AS owner_name
FROM dashboards d
@ -121,9 +144,9 @@ func (s *serviceImpl) GetDashboards(projectId int, userID uint64) (*models.GetDa
}
defer rows.Close()
var dashboards []models.Dashboard
var dashboards []Dashboard
for rows.Next() {
var dashboard models.Dashboard
var dashboard Dashboard
err := rows.Scan(&dashboard.DashboardID, &dashboard.UserID, &dashboard.ProjectID, &dashboard.Name, &dashboard.Description, &dashboard.IsPublic, &dashboard.IsPinned, &dashboard.OwnerEmail, &dashboard.OwnerName)
if err != nil {
@ -137,13 +160,12 @@ func (s *serviceImpl) GetDashboards(projectId int, userID uint64) (*models.GetDa
return nil, err
}
return &models.GetDashboardsResponse{
return &GetDashboardsResponse{
Dashboards: dashboards,
}, nil
}
// GetDashboardsPaginated Fetch dashboards with pagination
func (s *serviceImpl) GetDashboardsPaginated(projectId int, userID uint64, req *models.GetDashboardsRequest) (*models.GetDashboardsResponsePaginated, error) {
func (s *dashboardsImpl) GetAllPaginated(projectId int, userID uint64, req *GetDashboardsRequest) (*GetDashboardsResponsePaginated, error) {
baseSQL, args := buildBaseQuery(projectId, userID, req)
// Count total dashboards
@ -165,9 +187,9 @@ func (s *serviceImpl) GetDashboardsPaginated(projectId int, userID uint64, req *
}
defer rows.Close()
var dashboards []models.Dashboard
var dashboards []Dashboard
for rows.Next() {
var dashboard models.Dashboard
var dashboard Dashboard
err := rows.Scan(
&dashboard.DashboardID,
&dashboard.UserID,
@ -185,21 +207,20 @@ func (s *serviceImpl) GetDashboardsPaginated(projectId int, userID uint64, req *
dashboards = append(dashboards, dashboard)
}
return &models.GetDashboardsResponsePaginated{
return &GetDashboardsResponsePaginated{
Dashboards: dashboards,
Total: total,
}, nil
}
// UpdateDashboard Update a dashboard
func (s *serviceImpl) UpdateDashboard(projectId int, dashboardID int, userID uint64, req *models.UpdateDashboardRequest) (*models.GetDashboardResponse, error) {
func (s *dashboardsImpl) Update(projectId int, dashboardID int, userID uint64, req *UpdateDashboardRequest) (*GetDashboardResponse, error) {
sql := `
UPDATE dashboards
SET name = $1, description = $2, is_public = $3, is_pinned = $4
WHERE dashboard_id = $5 AND project_id = $6 AND user_id = $7 AND deleted_at IS NULL
RETURNING dashboard_id, project_id, user_id, name, description, is_public, is_pinned`
dashboard := &models.GetDashboardResponse{}
dashboard := &GetDashboardResponse{}
err := s.pgconn.QueryRow(sql, req.Name, req.Description, req.IsPublic, req.IsPinned, dashboardID, projectId, userID).Scan(
&dashboard.DashboardID,
&dashboard.ProjectID,
@ -215,8 +236,7 @@ func (s *serviceImpl) UpdateDashboard(projectId int, dashboardID int, userID uin
return dashboard, nil
}
// DeleteDashboard Soft-delete a dashboard
func (s *serviceImpl) DeleteDashboard(projectId int, dashboardID int, userID uint64) error {
func (s *dashboardsImpl) Delete(projectId int, dashboardID int, userID uint64) error {
sql := `
UPDATE dashboards
SET deleted_at = now()
@ -230,8 +250,7 @@ func (s *serviceImpl) DeleteDashboard(projectId int, dashboardID int, userID uin
return nil
}
// Helper to build the base query for dashboards
func buildBaseQuery(projectId int, userID uint64, req *models.GetDashboardsRequest) (string, []interface{}) {
func buildBaseQuery(projectId int, userID uint64, req *GetDashboardsRequest) (string, []interface{}) {
var conditions []string
args := []interface{}{projectId}
@ -282,7 +301,7 @@ func getOrder(order string) string {
return "ASC"
}
func (s *serviceImpl) CardsExist(projectId int, cardIDs []int) (bool, error) {
func (s *dashboardsImpl) CardsExist(projectId int, cardIDs []int) (bool, error) {
sql := `
SELECT COUNT(*) FROM public.metrics
WHERE project_id = $1 AND metric_id = ANY($2)
@ -295,8 +314,8 @@ func (s *serviceImpl) CardsExist(projectId int, cardIDs []int) (bool, error) {
return count == len(cardIDs), nil
}
func (s *serviceImpl) AddCardsToDashboard(projectId int, dashboardId int, userId uint64, req *models.AddCardToDashboardRequest) error {
_, err := s.GetDashboard(projectId, dashboardId, userId)
func (s *dashboardsImpl) AddCards(projectId int, dashboardId int, userId uint64, req *AddCardToDashboardRequest) error {
_, err := s.Get(projectId, dashboardId, userId)
if err != nil {
return fmt.Errorf("failed to get dashboard: %w", err)
}
@ -370,7 +389,7 @@ func (s *serviceImpl) AddCardsToDashboard(projectId int, dashboardId int, userId
return nil
}
func (s *serviceImpl) DeleteCardFromDashboard(dashboardId int, cardId int) error {
func (s *dashboardsImpl) DeleteCard(dashboardId int, cardId int) error {
sql := `DELETE FROM public.dashboard_widgets WHERE dashboard_id = $1 AND metric_id = $2`
err := s.pgconn.Exec(sql, dashboardId, cardId)
if err != nil {

View file

@ -1,15 +1,64 @@
package api
package dashboards
import (
"encoding/json"
"github.com/go-playground/validator/v10"
"fmt"
"net/http"
"openreplay/backend/pkg/analytics/api/models"
"strconv"
"time"
"github.com/go-playground/validator/v10"
"github.com/gorilla/mux"
config "openreplay/backend/internal/config/analytics"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/server/api"
"openreplay/backend/pkg/server/user"
"time"
)
func getIDFromRequest(r *http.Request, key string) (int, error) {
vars := mux.Vars(r)
idStr := vars[key]
if idStr == "" {
return 0, fmt.Errorf("missing %s in request", key)
}
id, err := strconv.Atoi(idStr)
if err != nil {
return 0, fmt.Errorf("invalid %s format", key)
}
return id, nil
}
type handlersImpl struct {
log logger.Logger
responser *api.Responser
jsonSizeLimit int64
dashboards Dashboards
}
func (e *handlersImpl) GetAll() []*api.Description {
return []*api.Description{
{"/v1/analytics/{projectId}/dashboards", e.createDashboard, "POST"},
{"/v1/analytics/{projectId}/dashboards", e.getDashboards, "GET"},
{"/v1/analytics/{projectId}/dashboards/{id}", e.getDashboard, "GET"},
{"/v1/analytics/{projectId}/dashboards/{id}", e.updateDashboard, "PUT"},
{"/v1/analytics/{projectId}/dashboards/{id}", e.deleteDashboard, "DELETE"},
{"/v1/analytics/{projectId}/dashboards/{id}/cards", e.addCardToDashboard, "POST"},
{"/v1/analytics/{projectId}/dashboards/{id}/cards/{cardId}", e.removeCardFromDashboard, "DELETE"},
}
}
func NewHandlers(log logger.Logger, cfg *config.Config, responser *api.Responser, dashboards Dashboards) (api.Handlers, error) {
return &handlersImpl{
log: log,
responser: responser,
jsonSizeLimit: cfg.JsonSizeLimit,
dashboards: dashboards,
}, nil
}
func (e *handlersImpl) createDashboard(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
@ -21,7 +70,7 @@ func (e *handlersImpl) createDashboard(w http.ResponseWriter, r *http.Request) {
}
bodySize = len(bodyBytes)
req := &models.CreateDashboardRequest{}
req := &CreateDashboardRequest{}
if err := json.Unmarshal(bodyBytes, req); err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
@ -41,7 +90,7 @@ func (e *handlersImpl) createDashboard(w http.ResponseWriter, r *http.Request) {
}
currentUser := r.Context().Value("userData").(*user.User)
resp, err := e.service.CreateDashboard(projectID, currentUser.ID, req)
resp, err := e.dashboards.Create(projectID, currentUser.ID, req)
e.responser.ResponseWithJSON(e.log, r.Context(), w, resp, startTime, r.URL.Path, bodySize)
}
@ -58,7 +107,7 @@ func (e *handlersImpl) getDashboards(w http.ResponseWriter, r *http.Request) {
}
u := r.Context().Value("userData").(*user.User)
resp, err := e.service.GetDashboards(projectID, u.ID)
resp, err := e.dashboards.GetAll(projectID, u.ID)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
@ -84,7 +133,7 @@ func (e *handlersImpl) getDashboard(w http.ResponseWriter, r *http.Request) {
}
u := r.Context().Value("userData").(*user.User)
res, err := e.service.GetDashboard(projectID, dashboardID, u.ID)
res, err := e.dashboards.Get(projectID, dashboardID, u.ID)
if err != nil {
// Map errors to appropriate HTTP status codes
if err.Error() == "not_found: dashboard not found" {
@ -124,7 +173,7 @@ func (e *handlersImpl) updateDashboard(w http.ResponseWriter, r *http.Request) {
bodySize = len(bodyBytes)
u := r.Context().Value("userData").(*user.User)
_, err = e.service.GetDashboard(projectID, dashboardID, u.ID)
_, err = e.dashboards.Get(projectID, dashboardID, u.ID)
if err != nil {
// Map errors to appropriate HTTP status codes
if err.Error() == "not_found: dashboard not found" {
@ -137,14 +186,14 @@ func (e *handlersImpl) updateDashboard(w http.ResponseWriter, r *http.Request) {
return
}
req := &models.UpdateDashboardRequest{}
req := &UpdateDashboardRequest{}
if err := json.Unmarshal(bodyBytes, req); err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
currentUser := r.Context().Value("userData").(*user.User)
resp, err := e.service.UpdateDashboard(projectID, dashboardID, currentUser.ID, req)
resp, err := e.dashboards.Update(projectID, dashboardID, currentUser.ID, req)
e.responser.ResponseWithJSON(e.log, r.Context(), w, resp, startTime, r.URL.Path, bodySize)
}
@ -166,7 +215,7 @@ func (e *handlersImpl) deleteDashboard(w http.ResponseWriter, r *http.Request) {
}
u := r.Context().Value("userData").(*user.User)
_, err = e.service.GetDashboard(projectID, dashboardID, u.ID)
_, err = e.dashboards.Get(projectID, dashboardID, u.ID)
if err != nil {
// Map errors to appropriate HTTP status codes
if err.Error() == "not_found: dashboard not found" {
@ -179,7 +228,7 @@ func (e *handlersImpl) deleteDashboard(w http.ResponseWriter, r *http.Request) {
return
}
err = e.service.DeleteDashboard(projectID, dashboardID, u.ID)
err = e.dashboards.Delete(projectID, dashboardID, u.ID)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
@ -224,7 +273,7 @@ func (e *handlersImpl) addCardToDashboard(w http.ResponseWriter, r *http.Request
bodySize = len(bodyBytes)
req := &models.AddCardToDashboardRequest{}
req := &AddCardToDashboardRequest{}
if err := json.Unmarshal(bodyBytes, req); err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
@ -237,7 +286,7 @@ func (e *handlersImpl) addCardToDashboard(w http.ResponseWriter, r *http.Request
return
}
err = e.service.AddCardsToDashboard(projectID, dashboardID, u.ID, req)
err = e.dashboards.AddCards(projectID, dashboardID, u.ID, req)
if err != nil {
if err.Error() == "not_found: dashboard not found" {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusNotFound, err, startTime, r.URL.Path, bodySize)
@ -276,7 +325,7 @@ func (e *handlersImpl) removeCardFromDashboard(w http.ResponseWriter, r *http.Re
}
u := r.Context().Value("userData").(*user.User)
_, err = e.service.GetDashboard(projectID, dashboardID, u.ID)
_, err = e.dashboards.Get(projectID, dashboardID, u.ID)
if err != nil {
if err.Error() == "not_found: dashboard not found" {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusNotFound, err, startTime, r.URL.Path, bodySize)
@ -287,7 +336,7 @@ func (e *handlersImpl) removeCardFromDashboard(w http.ResponseWriter, r *http.Re
}
}
err = e.service.DeleteCardFromDashboard(dashboardID, cardID)
err = e.dashboards.DeleteCard(dashboardID, cardID)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return

View file

@ -1,16 +1,18 @@
package models
package dashboards
import "openreplay/backend/pkg/analytics/cards"
type Dashboard struct {
DashboardID int `json:"dashboardId"`
ProjectID int `json:"projectId"`
UserID int `json:"userId"`
Name string `json:"name"`
Description string `json:"description"`
IsPublic bool `json:"isPublic"`
IsPinned bool `json:"isPinned"`
OwnerEmail string `json:"ownerEmail"`
OwnerName string `json:"ownerName"`
Metrics []CardBase `json:"cards"`
DashboardID int `json:"dashboardId"`
ProjectID int `json:"projectId"`
UserID int `json:"userId"`
Name string `json:"name"`
Description string `json:"description"`
IsPublic bool `json:"isPublic"`
IsPinned bool `json:"isPinned"`
OwnerEmail string `json:"ownerEmail"`
OwnerName string `json:"ownerName"`
Metrics []cards.CardBase `json:"cards"`
}
type CreateDashboardResponse struct {

View file

@ -1,50 +0,0 @@
package service
import (
"context"
"errors"
"openreplay/backend/pkg/analytics/api/models"
"openreplay/backend/pkg/db/postgres/pool"
"openreplay/backend/pkg/logger"
)
type Service interface {
GetDashboard(projectId int, dashboardId int, userId uint64) (*models.GetDashboardResponse, error)
GetDashboardsPaginated(projectId int, userId uint64, req *models.GetDashboardsRequest) (*models.GetDashboardsResponsePaginated, error)
GetDashboards(projectId int, userId uint64) (*models.GetDashboardsResponse, error)
CreateDashboard(projectId int, userId uint64, req *models.CreateDashboardRequest) (*models.GetDashboardResponse, error)
UpdateDashboard(projectId int, dashboardId int, userId uint64, req *models.UpdateDashboardRequest) (*models.GetDashboardResponse, error)
DeleteDashboard(projectId int, dashboardId int, userId uint64) error
AddCardsToDashboard(projectId int, dashboardId int, userId uint64, req *models.AddCardToDashboardRequest) error
DeleteCardFromDashboard(dashboardId int, cardId int) error
GetCard(projectId int, cardId int) (*models.CardGetResponse, error)
GetCardWithSeries(projectId int, cardId int) (*models.CardGetResponse, error)
GetCards(projectId int) (*models.GetCardsResponse, error)
GetCardsPaginated(projectId int, filters models.CardListFilter, sort models.CardListSort, limit int, offset int) (*models.GetCardsResponsePaginated, error)
CreateCard(projectId int, userId uint64, req *models.CardCreateRequest) (*models.CardGetResponse, error)
UpdateCard(projectId int, cardId int64, userId uint64, req *models.CardUpdateRequest) (*models.CardGetResponse, error)
DeleteCard(projectId int, cardId int64, userId uint64) error
GetCardChartData(projectId int, userId uint64, req *models.GetCardChartDataRequest) ([]models.DataPoint, error)
}
type serviceImpl struct {
log logger.Logger
pgconn pool.Pool
ctx context.Context
}
func NewService(log logger.Logger, conn pool.Pool) (Service, error) {
switch {
case log == nil:
return nil, errors.New("logger is empty")
case conn == nil:
return nil, errors.New("connection pool is empty")
}
return &serviceImpl{
log: log,
pgconn: conn,
ctx: context.Background(),
}, nil
}

1
ee/api/.gitignore vendored
View file

@ -293,3 +293,4 @@ Pipfile.lock
/chalicelib/core/errors/errors_ch.py
/chalicelib/core/errors/errors_favorite.py
/chalicelib/core/errors/errors_viewed.py
/chalicelib/core/errors/errors_details.py

View file

@ -6,24 +6,24 @@ name = "pypi"
[packages]
urllib3 = "==2.2.3"
requests = "==2.32.3"
boto3 = "==1.35.76"
boto3 = "==1.35.86"
pyjwt = "==2.10.1"
psycopg2-binary = "==2.9.10"
psycopg = {extras = ["pool", "binary"], version = "==3.2.3"}
psycopg = {extras = ["binary", "pool"], version = "==3.2.3"}
clickhouse-driver = {extras = ["lz4"], version = "==0.2.9"}
clickhouse-connect = "==0.8.9"
elasticsearch = "==8.16.0"
clickhouse-connect = "==0.8.11"
elasticsearch = "==8.17.0"
jira = "==3.8.0"
cachetools = "==5.5.0"
fastapi = "==0.115.6"
uvicorn = {extras = ["standard"], version = "==0.32.1"}
uvicorn = {extras = ["standard"], version = "==0.34.0"}
gunicorn = "==23.0.0"
python-decouple = "==3.8"
pydantic = {extras = ["email"], version = "==2.10.3"}
pydantic = {extras = ["email"], version = "==2.10.4"}
apscheduler = "==3.11.0"
redis = "==5.2.1"
python3-saml = "==1.16.0"
python-multipart = "==0.0.17"
python-multipart = "==0.0.20"
azure-storage-blob = "==12.24.0"
[dev-packages]

View file

@ -8,6 +8,7 @@ if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
logger.info(">>> Using experimental error search")
from . import errors as errors_legacy
from . import errors_ch as errors
from . import errors_details_exp as errors_details
else:
from . import errors

View file

@ -0,0 +1,261 @@
from decouple import config
import schemas
from . import errors
from chalicelib.core import metrics, metadata
from chalicelib.core import sessions
from chalicelib.utils import ch_client, exp_ch_helper
from chalicelib.utils import pg_client, helper
from chalicelib.utils.TimeUTC import TimeUTC
def __flatten_sort_key_count_version(data, merge_nested=False):
if data is None:
return []
return sorted(
[
{
"name": f"{o[0][0][0]}@{v[0]}",
"count": v[1]
} for o in data for v in o[2]
],
key=lambda o: o["count"], reverse=True) if merge_nested else \
[
{
"name": o[0][0][0],
"count": o[1][0][0],
} for o in data
]
def __transform_map_to_tag(data, key1, key2, requested_key):
result = []
for i in data:
if requested_key == 0 and i.get(key1) is None and i.get(key2) is None:
result.append({"name": "all", "count": int(i.get("count"))})
elif requested_key == 1 and i.get(key1) is not None and i.get(key2) is None:
result.append({"name": i.get(key1), "count": int(i.get("count"))})
elif requested_key == 2 and i.get(key1) is not None and i.get(key2) is not None:
result.append({"name": i.get(key2), "count": int(i.get("count"))})
return result
def __process_tags_map(row):
browsers_partition = row.pop("browsers_partition")
os_partition = row.pop("os_partition")
device_partition = row.pop("device_partition")
country_partition = row.pop("country_partition")
return [
{"name": "browser",
"partitions": __transform_map_to_tag(data=browsers_partition,
key1="browser",
key2="browser_version",
requested_key=1)},
{"name": "browser.ver",
"partitions": __transform_map_to_tag(data=browsers_partition,
key1="browser",
key2="browser_version",
requested_key=2)},
{"name": "OS",
"partitions": __transform_map_to_tag(data=os_partition,
key1="os",
key2="os_version",
requested_key=1)
},
{"name": "OS.ver",
"partitions": __transform_map_to_tag(data=os_partition,
key1="os",
key2="os_version",
requested_key=2)},
{"name": "device.family",
"partitions": __transform_map_to_tag(data=device_partition,
key1="device_type",
key2="device",
requested_key=1)},
{"name": "device",
"partitions": __transform_map_to_tag(data=device_partition,
key1="device_type",
key2="device",
requested_key=2)},
{"name": "country", "partitions": __transform_map_to_tag(data=country_partition,
key1="country",
key2="",
requested_key=1)}
]
def get_details(project_id, error_id, user_id, **data):
MAIN_SESSIONS_TABLE = exp_ch_helper.get_main_sessions_table(0)
MAIN_ERR_SESS_TABLE = exp_ch_helper.get_main_js_errors_sessions_table(0)
MAIN_EVENTS_TABLE = exp_ch_helper.get_main_events_table(0)
ch_sub_query24 = errors.__get_basic_constraints(startTime_arg_name="startDate24", endTime_arg_name="endDate24")
ch_sub_query24.append("error_id = %(error_id)s")
ch_sub_query30 = errors.__get_basic_constraints(startTime_arg_name="startDate30", endTime_arg_name="endDate30",
project_key="errors.project_id")
ch_sub_query30.append("error_id = %(error_id)s")
ch_basic_query = errors.__get_basic_constraints(time_constraint=False)
ch_basic_query.append("error_id = %(error_id)s")
with ch_client.ClickHouseClient() as ch:
data["startDate24"] = TimeUTC.now(-1)
data["endDate24"] = TimeUTC.now()
data["startDate30"] = TimeUTC.now(-30)
data["endDate30"] = TimeUTC.now()
density24 = int(data.get("density24", 24))
step_size24 = errors.__get_step_size(data["startDate24"], data["endDate24"], density24)
density30 = int(data.get("density30", 30))
step_size30 = errors.__get_step_size(data["startDate30"], data["endDate30"], density30)
params = {
"startDate24": data['startDate24'],
"endDate24": data['endDate24'],
"startDate30": data['startDate30'],
"endDate30": data['endDate30'],
"project_id": project_id,
"userId": user_id,
"step_size24": step_size24,
"step_size30": step_size30,
"error_id": error_id}
main_ch_query = f"""\
WITH pre_processed AS (SELECT error_id,
name,
message,
session_id,
datetime,
user_id,
user_browser,
user_browser_version,
user_os,
user_os_version,
user_device_type,
user_device,
user_country,
error_tags_keys,
error_tags_values
FROM {MAIN_ERR_SESS_TABLE} AS errors
WHERE {" AND ".join(ch_basic_query)}
)
SELECT %(error_id)s AS error_id, name, message,users,
first_occurrence,last_occurrence,last_session_id,
sessions,browsers_partition,os_partition,device_partition,
country_partition,chart24,chart30,custom_tags
FROM (SELECT error_id,
name,
message
FROM pre_processed
LIMIT 1) AS details
INNER JOIN (SELECT COUNT(DISTINCT user_id) AS users,
COUNT(DISTINCT session_id) AS sessions
FROM pre_processed
WHERE datetime >= toDateTime(%(startDate30)s / 1000)
AND datetime <= toDateTime(%(endDate30)s / 1000)
) AS last_month_stats ON TRUE
INNER JOIN (SELECT toUnixTimestamp(max(datetime)) * 1000 AS last_occurrence,
toUnixTimestamp(min(datetime)) * 1000 AS first_occurrence
FROM pre_processed) AS time_details ON TRUE
INNER JOIN (SELECT session_id AS last_session_id,
arrayMap((key, value)->(map(key, value)), error_tags_keys, error_tags_values) AS custom_tags
FROM pre_processed
ORDER BY datetime DESC
LIMIT 1) AS last_session_details ON TRUE
INNER JOIN (SELECT groupArray(details) AS browsers_partition
FROM (SELECT COUNT(1) AS count,
coalesce(nullIf(user_browser,''),toNullable('unknown')) AS browser,
coalesce(nullIf(user_browser_version,''),toNullable('unknown')) AS browser_version,
map('browser', browser,
'browser_version', browser_version,
'count', toString(count)) AS details
FROM pre_processed
GROUP BY ROLLUP(browser, browser_version)
ORDER BY browser nulls first, browser_version nulls first, count DESC) AS mapped_browser_details
) AS browser_details ON TRUE
INNER JOIN (SELECT groupArray(details) AS os_partition
FROM (SELECT COUNT(1) AS count,
coalesce(nullIf(user_os,''),toNullable('unknown')) AS os,
coalesce(nullIf(user_os_version,''),toNullable('unknown')) AS os_version,
map('os', os,
'os_version', os_version,
'count', toString(count)) AS details
FROM pre_processed
GROUP BY ROLLUP(os, os_version)
ORDER BY os nulls first, os_version nulls first, count DESC) AS mapped_os_details
) AS os_details ON TRUE
INNER JOIN (SELECT groupArray(details) AS device_partition
FROM (SELECT COUNT(1) AS count,
coalesce(nullIf(user_device,''),toNullable('unknown')) AS user_device,
map('device_type', toString(user_device_type),
'device', user_device,
'count', toString(count)) AS details
FROM pre_processed
GROUP BY ROLLUP(user_device_type, user_device)
ORDER BY user_device_type nulls first, user_device nulls first, count DESC
) AS count_per_device_details
) AS mapped_device_details ON TRUE
INNER JOIN (SELECT groupArray(details) AS country_partition
FROM (SELECT COUNT(1) AS count,
map('country', toString(user_country),
'count', toString(count)) AS details
FROM pre_processed
GROUP BY user_country
ORDER BY count DESC) AS count_per_country_details
) AS mapped_country_details ON TRUE
INNER JOIN (SELECT groupArray(map('timestamp', timestamp, 'count', count)) AS chart24
FROM (SELECT toUnixTimestamp(toStartOfInterval(datetime, INTERVAL 3756 second)) *
1000 AS timestamp,
COUNT(DISTINCT session_id) AS count
FROM {MAIN_EVENTS_TABLE} AS errors
WHERE {" AND ".join(ch_sub_query24)}
GROUP BY timestamp
ORDER BY timestamp) AS chart_details
) AS chart_details24 ON TRUE
INNER JOIN (SELECT groupArray(map('timestamp', timestamp, 'count', count)) AS chart30
FROM (SELECT toUnixTimestamp(toStartOfInterval(datetime, INTERVAL 3724 second)) *
1000 AS timestamp,
COUNT(DISTINCT session_id) AS count
FROM {MAIN_EVENTS_TABLE} AS errors
WHERE {" AND ".join(ch_sub_query30)}
GROUP BY timestamp
ORDER BY timestamp) AS chart_details
) AS chart_details30 ON TRUE;"""
# print("--------------------")
# print(ch.format(main_ch_query, params))
# print("--------------------")
row = ch.execute(query=main_ch_query, parameters=params)
if len(row) == 0:
return {"errors": ["error not found"]}
row = row[0]
row["tags"] = __process_tags_map(row)
query = f"""SELECT session_id, toUnixTimestamp(datetime) * 1000 AS start_ts,
user_anonymous_id,user_id, user_uuid, user_browser, user_browser_version,
user_os, user_os_version, user_device, FALSE AS favorite, True AS viewed
FROM {MAIN_SESSIONS_TABLE} AS sessions
WHERE project_id = toUInt16(%(project_id)s)
AND session_id = %(session_id)s
ORDER BY datetime DESC
LIMIT 1;"""
params = {"project_id": project_id, "session_id": row["last_session_id"], "userId": user_id}
# print("--------------------")
# print(ch.format(query, params))
# print("--------------------")
status = ch.execute(query=query, parameters=params)
if status is not None:
status = status[0]
row["favorite"] = status.pop("favorite")
row["viewed"] = status.pop("viewed")
row["last_hydrated_session"] = status
else:
row["last_hydrated_session"] = None
row["favorite"] = False
row["viewed"] = False
row["chart24"] = metrics.__complete_missing_steps(start_time=data["startDate24"], end_time=data["endDate24"],
density=density24, rows=row["chart24"], neutral={"count": 0})
row["chart30"] = metrics.__complete_missing_steps(start_time=data["startDate30"], end_time=data["endDate30"],
density=density30, rows=row["chart30"], neutral={"count": 0})
return {"data": helper.dict_to_camel_case(row)}

View file

@ -113,3 +113,4 @@ rm -rf /chalicelib/core/errors/errors.py
rm -rf /chalicelib/core/errors/errors_ch.py
rm -rf /chalicelib/core/errors/errors_favorite.py
rm -rf /chalicelib/core/errors/errors_viewed.py
rm -rf /chalicelib/core/errors/errors_details.py

View file

@ -1,31 +1,30 @@
urllib3==2.2.3
requests==2.32.3
boto3==1.35.76
boto3==1.35.86
pyjwt==2.10.1
psycopg2-binary==2.9.10
psycopg[pool,binary]==3.2.3
clickhouse-driver[lz4]==0.2.9
clickhouse-connect==0.8.9
elasticsearch==8.16.0
clickhouse-connect==0.8.11
elasticsearch==8.17.0
jira==3.8.0
cachetools==5.5.0
fastapi==0.115.6
uvicorn[standard]==0.32.1
uvicorn[standard]==0.34.0
gunicorn==23.0.0
python-decouple==3.8
pydantic[email]==2.10.3
pydantic[email]==2.10.4
apscheduler==3.11.0
redis==5.2.1
# TODO: enable after xmlsec fix https://github.com/xmlsec/python-xmlsec/issues/252
#--no-binary is used to avoid libxml2 library version incompatibilities between xmlsec and lxml
python3-saml==1.16.0
--no-binary=lxml
python-multipart==0.0.18
python3-saml==1.16.0 --no-binary=lxml
python-multipart==0.0.20
#confluent-kafka==2.1.0
azure-storage-blob==12.24.0

View file

@ -91,7 +91,7 @@ dependencies {
//noinspection GradleDynamicVersion
implementation("com.facebook.react:react-native:0.20.1")
implementation("org.jetbrains.kotlin:kotlin-stdlib:$kotlin_version")
implementation("com.github.openreplay:android-tracker:v1.1.3")
implementation("com.github.openreplay:android-tracker:v1.1.4")
}
//allprojects {

View file

@ -9,14 +9,14 @@ PODS:
- hermes-engine (0.74.0):
- hermes-engine/Pre-built (= 0.74.0)
- hermes-engine/Pre-built (0.74.0)
- Openreplay (1.0.14):
- Openreplay (1.0.15):
- DeviceKit
- SWCompression
- openreplay-react-native (0.6.6):
- openreplay-react-native (0.6.11):
- DoubleConversion
- glog
- hermes-engine
- Openreplay (= 1.0.14)
- Openreplay (= 1.0.15)
- RCT-Folly (= 2024.01.01.00)
- RCTRequired
- RCTTypeSafety
@ -1423,8 +1423,8 @@ SPEC CHECKSUMS:
fmt: 4c2741a687cc09f0634a2e2c72a838b99f1ff120
glog: c5d68082e772fa1c511173d6b30a9de2c05a69a2
hermes-engine: 6eae7edb2f563ee41d7c1f91f4f2e57c26d8a5c3
Openreplay: e0b8de0203bbbd8fc161afab77ac370e2cef3d12
openreplay-react-native: 3382fc4f597052f10ec79c37e872fc83890c1ce4
Openreplay: ae72a7ca1a05d7da026b7ee8f4f84dcdfaa44021
openreplay-react-native: 0cfa1842c5b2457f6e9d5fa564f746192317b20c
RCT-Folly: 045d6ecaa59d826c5736dfba0b2f4083ff8d79df
RCTDeprecation: 3ca8b6c36bfb302e1895b72cfe7db0de0c92cd47
RCTRequired: 9fc183af555fd0c89a366c34c1ae70b7e03b1dc5

View file

@ -19,11 +19,11 @@ Pod::Spec.new do |s|
# Use install_modules_dependencies helper to install the dependencies if React Native version >=0.71.0.
# See https://github.com/facebook/react-native/blob/febf6b7f33fdb4904669f99d795eba4c0f95d7bf/scripts/cocoapods/new_architecture.rb#L79.
if respond_to?(:install_modules_dependencies, true)
s.dependency "Openreplay", '1.0.14'
s.dependency "Openreplay", '1.0.15'
install_modules_dependencies(s)
else
s.dependency "React-Core"
s.dependency "Openreplay", '1.0.14'
s.dependency "Openreplay", '1.0.15'
# Don't install the dependencies when we run `pod install` in the old architecture.
if ENV['RCT_NEW_ARCH_ENABLED'] == '1' then
@ -38,7 +38,7 @@ Pod::Spec.new do |s|
s.dependency "RCTRequired"
s.dependency "RCTTypeSafety"
s.dependency "ReactCommon/turbomodule/core"
s.dependency "Openreplay", '1.0.14'
s.dependency "Openreplay", '1.0.15'
end
end
end

View file

@ -1,6 +1,6 @@
{
"name": "@openreplay/react-native",
"version": "0.6.10",
"version": "0.6.11",
"description": "Openreplay React-native connector for iOS applications",
"main": "lib/commonjs/index",
"module": "lib/module/index",