Merge remote-tracking branch 'origin/api-v1.8.2' into dev

This commit is contained in:
Taha Yassine Kraiem 2022-11-25 17:27:00 +01:00
commit 7b383f7bba
8 changed files with 45 additions and 29 deletions

View file

@ -76,19 +76,21 @@ def get_projects(tenant_id, recording_state=False, gdpr=None, recorded=False, st
rows = cur.fetchall()
# if recorded is requested, check if it was saved or computed
if recorded:
for r in rows:
u_values = []
params = {}
for i, r in enumerate(rows):
if r["first_recorded_session_at"] is None:
extra_update = ""
if r["recorded"]:
extra_update = ", first_recorded_session_at=to_timestamp(%(first_recorded)s/1000)"
query = cur.mogrify(f"""UPDATE public.projects
SET sessions_last_check_at=(now() at time zone 'utc')
{extra_update}
WHERE project_id=%(project_id)s""",
{"project_id": r["project_id"], "first_recorded": r["first_recorded"]})
cur.execute(query)
u_values.append(f"(%(project_id_{i})s,to_timestamp(%(first_recorded_{i})s/1000))")
params[f"project_id_{i}"] = r["project_id"]
params[f"first_recorded_{i}"] = r["first_recorded"] if r["recorded"] else None
r.pop("first_recorded_session_at")
r.pop("first_recorded")
if len(u_values) > 0:
query = cur.mogrify(f"""UPDATE public.projects
SET sessions_last_check_at=(now() at time zone 'utc'), first_recorded_session_at=u.first_recorded
FROM (VALUES {",".join(u_values)}) AS u(project_id,first_recorded)
WHERE projects.project_id=u.project_id;""", params)
cur.execute(query)
if recording_state and len(rows) > 0:
project_ids = [f'({r["project_id"]})' for r in rows]

View file

@ -191,7 +191,7 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]:
GROUP BY main.session_id)
AS T{i + 1} {"ON (TRUE)" if i > 0 else ""}
""")
n_stages=len(n_stages_query)
n_stages = len(n_stages_query)
if n_stages == 0:
return []
n_stages_query = " LEFT JOIN LATERAL ".join(n_stages_query)
@ -215,7 +215,7 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]:
AND ISE.session_id = stages_t.session_id
AND ISS.type!='custom' -- ignore custom issues because they are massive
{"AND ISS.type IN %(issueTypes)s" if len(filter_issues) > 0 else ""}
LIMIT 50 -- remove the limit to get exact stats
LIMIT 10 -- remove the limit to get exact stats
) AS issues_t ON (TRUE)
) AS stages_and_issues_t INNER JOIN sessions USING(session_id);
"""
@ -348,7 +348,7 @@ def get_transitions_and_issues_of_each_type(rows: List[RealDictRow], all_issues,
if error_id not in errors:
errors[error_id] = []
ic = 0
row_issue_id=row['issue_id']
row_issue_id = row['issue_id']
if row_issue_id is not None:
if last_ts is None or (first_ts < row['issue_timestamp'] < last_ts):
if error_id == row_issue_id:
@ -533,6 +533,9 @@ def get_issues(stages, rows, first_stage=None, last_stage=None, drop_only=False)
if is_sign:
n_critical_issues += n_issues_dict[issue_id]
# To limit the number of returned issues to the frontend
issues_dict["significant"] = issues_dict["significant"][:50]
issues_dict["insignificant"] = issues_dict["insignificant"][:50]
return n_critical_issues, issues_dict, total_drop_due_to_issues

View file

@ -87,22 +87,23 @@ def get_projects(tenant_id, recording_state=False, gdpr=None, recorded=False, st
{"tenant_id": tenant_id, "user_id": user_id, "now": TimeUTC.now()})
cur.execute(query)
rows = cur.fetchall()
# if recorded is requested, check if it was saved or computed
if recorded:
for r in rows:
u_values = []
params = {}
for i, r in enumerate(rows):
if r["first_recorded_session_at"] is None:
extra_update = ""
if r["recorded"]:
extra_update = ", first_recorded_session_at=to_timestamp(%(first_recorded)s/1000)"
query = cur.mogrify(f"""UPDATE public.projects
SET sessions_last_check_at=(now() at time zone 'utc')
{extra_update}
WHERE project_id=%(project_id)s""",
{"project_id": r["project_id"], "first_recorded": r["first_recorded"]})
cur.execute(query)
u_values.append(f"(%(project_id_{i})s,to_timestamp(%(first_recorded_{i})s/1000))")
params[f"project_id_{i}"] = r["project_id"]
params[f"first_recorded_{i}"] = r["first_recorded"] if r["recorded"] else None
r.pop("first_recorded_session_at")
r.pop("first_recorded")
if len(u_values) > 0:
query = cur.mogrify(f"""UPDATE public.projects
SET sessions_last_check_at=(now() at time zone 'utc'), first_recorded_session_at=u.first_recorded
FROM (VALUES {",".join(u_values)}) AS u(project_id,first_recorded)
WHERE projects.project_id=u.project_id;""", params)
cur.execute(query)
if recording_state and len(rows) > 0:
project_ids = [f'({r["project_id"]})' for r in rows]
@ -112,6 +113,7 @@ def get_projects(tenant_id, recording_state=False, gdpr=None, recorded=False, st
WHERE sessions.start_ts >= %(startDate)s AND sessions.start_ts <= %(endDate)s
GROUP BY project_id;""",
{"startDate": TimeUTC.now(delta_days=-3), "endDate": TimeUTC.now(delta_days=1)})
cur.execute(query=query)
status = cur.fetchall()
for r in rows:

View file

@ -198,7 +198,7 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]:
GROUP BY main.session_id)
AS T{i + 1} {"ON (TRUE)" if i > 0 else ""}
""")
n_stages=len(n_stages_query)
n_stages = len(n_stages_query)
if n_stages == 0:
return []
n_stages_query = " LEFT JOIN LATERAL ".join(n_stages_query)
@ -222,7 +222,7 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]:
AND ISE.session_id = stages_t.session_id
AND ISS.type!='custom' -- ignore custom issues because they are massive
{"AND ISS.type IN %(issueTypes)s" if len(filter_issues) > 0 else ""}
LIMIT 50 -- remove the limit to get exact stats
LIMIT 10 -- remove the limit to get exact stats
) AS issues_t ON (TRUE)
) AS stages_and_issues_t INNER JOIN sessions USING(session_id);
"""
@ -355,7 +355,7 @@ def get_transitions_and_issues_of_each_type(rows: List[RealDictRow], all_issues,
if error_id not in errors:
errors[error_id] = []
ic = 0
row_issue_id=row['issue_id']
row_issue_id = row['issue_id']
if row_issue_id is not None:
if last_ts is None or (first_ts < row['issue_timestamp'] < last_ts):
if error_id == row_issue_id:
@ -540,6 +540,9 @@ def get_issues(stages, rows, first_stage=None, last_stage=None, drop_only=False)
if is_sign:
n_critical_issues += n_issues_dict[issue_id]
# To limit the number of returned issues to the frontend
issues_dict["significant"] = issues_dict["significant"][:50]
issues_dict["insignificant"] = issues_dict["insignificant"][:50]
return n_critical_issues, issues_dict, total_drop_due_to_issues

View file

@ -77,4 +77,6 @@ DROP INDEX IF EXISTS events_common.requests_url_gin_idx2;
DROP INDEX IF EXISTS events.resources_url_gin_idx;
DROP INDEX IF EXISTS events.resources_url_idx;
COMMIT;
COMMIT;
CREATE INDEX CONCURRENTLY IF NOT EXISTS requests_session_id_status_code_nn_idx ON events_common.requests (session_id, status_code) WHERE status_code IS NOT NULL;

View file

@ -1228,6 +1228,7 @@ $$
CREATE INDEX IF NOT EXISTS requests_request_body_nn_gin_idx ON events_common.requests USING GIN (request_body gin_trgm_ops) WHERE request_body IS NOT NULL;
CREATE INDEX IF NOT EXISTS requests_response_body_nn_gin_idx ON events_common.requests USING GIN (response_body gin_trgm_ops) WHERE response_body IS NOT NULL;
CREATE INDEX IF NOT EXISTS requests_status_code_nn_idx ON events_common.requests (status_code) WHERE status_code IS NOT NULL;
CREATE INDEX IF NOT EXISTS requests_session_id_status_code_nn_idx ON events_common.requests (session_id, status_code) WHERE status_code IS NOT NULL;
CREATE INDEX IF NOT EXISTS requests_host_nn_idx ON events_common.requests (host) WHERE host IS NOT NULL;
CREATE INDEX IF NOT EXISTS requests_host_nn_gin_idx ON events_common.requests USING GIN (host gin_trgm_ops) WHERE host IS NOT NULL;
CREATE INDEX IF NOT EXISTS requests_path_nn_idx ON events_common.requests (path) WHERE path IS NOT NULL;

View file

@ -67,4 +67,6 @@ DROP INDEX IF EXISTS events_common.requests_url_gin_idx2;
DROP INDEX IF EXISTS events.resources_url_gin_idx;
DROP INDEX IF EXISTS events.resources_url_idx;
COMMIT;
COMMIT;
CREATE INDEX CONCURRENTLY IF NOT EXISTS requests_session_id_status_code_nn_idx ON events_common.requests (session_id, status_code) WHERE status_code IS NOT NULL;

View file

@ -603,6 +603,7 @@ $$
CREATE INDEX requests_request_body_nn_gin_idx ON events_common.requests USING GIN (request_body gin_trgm_ops) WHERE request_body IS NOT NULL;
CREATE INDEX requests_response_body_nn_gin_idx ON events_common.requests USING GIN (response_body gin_trgm_ops) WHERE response_body IS NOT NULL;
CREATE INDEX requests_status_code_nn_idx ON events_common.requests (status_code) WHERE status_code IS NOT NULL;
CREATE INDEX requests_session_id_status_code_nn_idx ON events_common.requests (session_id, status_code) WHERE status_code IS NOT NULL;
CREATE INDEX requests_host_nn_idx ON events_common.requests (host) WHERE host IS NOT NULL;
CREATE INDEX requests_host_nn_gin_idx ON events_common.requests USING GIN (host gin_trgm_ops) WHERE host IS NOT NULL;
CREATE INDEX requests_path_nn_idx ON events_common.requests (path) WHERE path IS NOT NULL;