* fix(chalice): fixed share to slack

* fix(chalice): fixed empty right table limitation
This commit is contained in:
Kraiem Taha Yassine 2025-02-13 14:09:15 +01:00 committed by GitHub
parent d031210365
commit 7b61d06454
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 47 additions and 30 deletions

View file

@ -149,7 +149,11 @@ def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, us
params["error_query"] = helper.values_for_operator(value=data.query,
op=schemas.SearchEventOperator.CONTAINS)
main_pg_query = f"""SELECT full_count,
main_pg_query = f"""WITH raw_data AS (SELECT DISTINCT session_id
FROM events.errors
{"INNER JOIN public.sessions USING(session_id)" if platform else ""}
WHERE {" AND ".join(pg_sub_query_chart)})
SELECT full_count,
error_id,
name,
message,
@ -183,11 +187,11 @@ def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, us
FROM (SELECT generated_timestamp AS timestamp,
COUNT(session_id) AS count
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp
LEFT JOIN LATERAL (SELECT DISTINCT session_id
FROM events.errors
{"INNER JOIN public.sessions USING(session_id)" if platform else ""}
WHERE {" AND ".join(pg_sub_query_chart)}
) AS sessions ON (TRUE)
LEFT JOIN LATERAL (SELECT *
FROM raw_data
UNION ALL
SELECT NULL AS session_id
WHERE NOT EXISTS(SELECT 1 FROM raw_data)) AS sessions ON (TRUE)
GROUP BY timestamp
ORDER BY timestamp) AS chart_details) AS chart_details ON (TRUE);"""

View file

@ -36,24 +36,24 @@ def search2_series(data: schemas.SessionsSearchPayloadSchema, project_id: int, d
SELECT generated_timestamp AS timestamp,
COUNT(s) AS count
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp
LEFT JOIN LATERAL ( SELECT 1 AS s
FROM full_sessions
WHERE start_ts >= generated_timestamp
AND start_ts <= generated_timestamp + %(step_size)s) AS sessions ON (TRUE)
LEFT JOIN LATERAL (SELECT 1 AS s
FROM full_sessions
WHERE start_ts >= generated_timestamp
AND start_ts <= generated_timestamp + %(step_size)s) AS sessions ON (TRUE)
GROUP BY generated_timestamp
ORDER BY generated_timestamp;""", full_args)
elif metric_of == schemas.MetricOfTimeseries.USER_COUNT:
main_query = cur.mogrify(f"""WITH full_sessions AS (SELECT s.user_id, s.start_ts
{query_part}
AND s.user_id IS NOT NULL
AND s.user_id != '')
{query_part}
AND s.user_id IS NOT NULL
AND s.user_id != '')
SELECT generated_timestamp AS timestamp,
COUNT(s) AS count
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp
LEFT JOIN LATERAL ( SELECT DISTINCT user_id AS s
FROM full_sessions
WHERE start_ts >= generated_timestamp
AND start_ts <= generated_timestamp + %(step_size)s) AS sessions ON (TRUE)
LEFT JOIN LATERAL (SELECT DISTINCT user_id AS s
FROM full_sessions
WHERE start_ts >= generated_timestamp
AND start_ts <= generated_timestamp + %(step_size)s) AS sessions ON (TRUE)
GROUP BY generated_timestamp
ORDER BY generated_timestamp;""", full_args)
else:

View file

@ -34,27 +34,40 @@ def search2_series(data: schemas.SessionsSearchPayloadSchema, project_id: int, d
with ch_client.ClickHouseClient() as cur:
if metric_type == schemas.MetricType.TIMESERIES:
if metric_of == schemas.MetricOfTimeseries.SESSION_COUNT:
query = f"""SELECT gs.generate_series AS timestamp,
query = f"""WITH raw_data AS (SELECT s.session_id AS session_id,
s.datetime AS datetime
{query_part})
SELECT gs.generate_series AS timestamp,
COALESCE(COUNT(DISTINCT processed_sessions.session_id),0) AS count
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS gs
LEFT JOIN (SELECT s.session_id AS session_id,
s.datetime AS datetime
{query_part}) AS processed_sessions ON(TRUE)
LEFT JOIN (SELECT *
FROM raw_data
UNION ALL
SELECT NULL AS session_id,
NULL AS datetime
WHERE NOT EXISTS(SELECT 1 FROM raw_data)) AS processed_sessions ON(TRUE)
WHERE processed_sessions.datetime >= toDateTime(timestamp / 1000)
AND processed_sessions.datetime < toDateTime((timestamp + %(step_size)s) / 1000)
GROUP BY timestamp
ORDER BY timestamp;"""
elif metric_of == schemas.MetricOfTimeseries.USER_COUNT:
query = f"""SELECT gs.generate_series AS timestamp,
query = f"""WITH raw_data AS (SELECT s.user_id AS user_id,
s.datetime AS datetime
{query_part}
WHERE isNotNull(s.user_id)
AND s.user_id != '')
SELECT gs.generate_series AS timestamp,
COALESCE(COUNT(DISTINCT processed_sessions.user_id),0) AS count
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS gs
LEFT JOIN (SELECT s.user_id AS user_id,
s.datetime AS datetime
{query_part}
WHERE isNotNull(s.user_id)
AND s.user_id != '') AS processed_sessions ON(TRUE)
WHERE processed_sessions.datetime >= toDateTime(timestamp / 1000)
AND processed_sessions.datetime < toDateTime((timestamp + %(step_size)s) / 1000)
LEFT JOIN (SELECT *
FROM raw_data
UNION ALL
SELECT NULL AS user_id,
NULL AS datatime
WHERE NOT EXISTS(SELECT 1 FROM raw_data)) AS processed_sessions ON(TRUE)
WHERE processed_sessions.user_id IS NULL
OR processed_sessions.datetime >= toDateTime(timestamp / 1000)
AND processed_sessions.datetime < toDateTime((timestamp + %(step_size)s) / 1000)
GROUP BY timestamp
ORDER BY timestamp;"""
else:

View file

@ -79,7 +79,7 @@ def notify(projectId: int, integration: str, webhookId: int, source: str, source
args = {"tenant_id": context.tenant_id,
"user": context.email, "comment": comment, "project_id": projectId,
"id": webhookId,
"integration_id": webhookId,
"project_name": context.project.name}
if integration == schemas.WebhookType.SLACK:
if source == "sessions":