Merge remote-tracking branch 'origin/api-bookmarked-pagination' into dev
This commit is contained in:
commit
a20d14ce8c
6 changed files with 68 additions and 44 deletions
|
|
@ -103,10 +103,8 @@ def Build(a):
|
|||
a["filter"]["startDate"] = -1
|
||||
a["filter"]["endDate"] = TimeUTC.now()
|
||||
full_args, query_part, sort = sessions.search_query_parts(
|
||||
data=schemas.SessionsSearchPayloadSchema.parse_obj(a["filter"]),
|
||||
error_status=None, errors_only=False,
|
||||
favorite_only=False, issue=None, project_id=a["projectId"],
|
||||
user_id=None)
|
||||
data=schemas.SessionsSearchPayloadSchema.parse_obj(a["filter"]), error_status=None, errors_only=False,
|
||||
issue=None, project_id=a["projectId"], user_id=None, favorite_only=False)
|
||||
subQ = f"""SELECT COUNT(session_id) AS value
|
||||
{query_part}"""
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -423,6 +423,10 @@ def __get_sort_key(key):
|
|||
|
||||
@dev.timed
|
||||
def search(data: schemas.SearchErrorsSchema, project_id, user_id, flows=False, status="ALL", favorite_only=False):
|
||||
empty_response = {"data": {
|
||||
'total': 0,
|
||||
'errors': []
|
||||
}}
|
||||
status = status.upper()
|
||||
if status.lower() not in ['all', 'unresolved', 'resolved', 'ignored']:
|
||||
return {"errors": ["invalid error status"]}
|
||||
|
|
@ -445,12 +449,9 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id, flows=False, s
|
|||
if len(data.events) > 0 or len(data.filters) > 0 or status != "ALL":
|
||||
# if favorite_only=True search for sessions associated with favorite_error
|
||||
statuses = sessions.search2_pg(data=data, project_id=project_id, user_id=user_id, errors_only=True,
|
||||
error_status=status, favorite_only=favorite_only)
|
||||
error_status=status)
|
||||
if len(statuses) == 0:
|
||||
return {"data": {
|
||||
'total': 0,
|
||||
'errors': []
|
||||
}}
|
||||
return empty_response
|
||||
error_ids = [e["error_id"] for e in statuses]
|
||||
with pg_client.PostgresClient() as cur:
|
||||
if data.startDate is None:
|
||||
|
|
@ -465,12 +466,20 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id, flows=False, s
|
|||
if data.order is not None:
|
||||
order = data.order
|
||||
extra_join = ""
|
||||
|
||||
params = {
|
||||
"startDate": data.startDate,
|
||||
"endDate": data.endDate,
|
||||
"project_id": project_id,
|
||||
"userId": user_id,
|
||||
"step_size": step_size}
|
||||
if data.limit is not None and data.page is not None:
|
||||
params["errors_offset"] = (data.page - 1) * data.limit
|
||||
params["errors_limit"] = data.limit
|
||||
else:
|
||||
params["errors_offset"] = 0
|
||||
params["errors_limit"] = 200
|
||||
|
||||
if error_ids is not None:
|
||||
params["error_ids"] = tuple(error_ids)
|
||||
pg_sub_query.append("error_id IN %(error_ids)s")
|
||||
|
|
@ -478,7 +487,8 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id, flows=False, s
|
|||
pg_sub_query.append("ufe.user_id = %(userId)s")
|
||||
extra_join += " INNER JOIN public.user_favorite_errors AS ufe USING (error_id)"
|
||||
main_pg_query = f"""\
|
||||
SELECT error_id,
|
||||
SELECT full_count,
|
||||
error_id,
|
||||
name,
|
||||
message,
|
||||
users,
|
||||
|
|
@ -486,20 +496,23 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id, flows=False, s
|
|||
last_occurrence,
|
||||
first_occurrence,
|
||||
chart
|
||||
FROM (SELECT error_id,
|
||||
name,
|
||||
message,
|
||||
COUNT(DISTINCT user_uuid) AS users,
|
||||
COUNT(DISTINCT session_id) AS sessions,
|
||||
MAX(timestamp) AS max_datetime,
|
||||
MIN(timestamp) AS min_datetime
|
||||
FROM events.errors
|
||||
INNER JOIN public.errors AS pe USING (error_id)
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
{extra_join}
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
GROUP BY error_id, name, message
|
||||
ORDER BY {sort} {order}) AS details
|
||||
FROM (SELECT COUNT(details) OVER () AS full_count, details.*
|
||||
FROM (SELECT error_id,
|
||||
name,
|
||||
message,
|
||||
COUNT(DISTINCT user_uuid) AS users,
|
||||
COUNT(DISTINCT session_id) AS sessions,
|
||||
MAX(timestamp) AS max_datetime,
|
||||
MIN(timestamp) AS min_datetime
|
||||
FROM events.errors
|
||||
INNER JOIN public.errors AS pe USING (error_id)
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
{extra_join}
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
GROUP BY error_id, name, message
|
||||
ORDER BY {sort} {order}) AS details
|
||||
LIMIT %(errors_limit)s OFFSET %(errors_offset)s
|
||||
) AS details
|
||||
INNER JOIN LATERAL (SELECT MAX(timestamp) AS last_occurrence,
|
||||
MIN(timestamp) AS first_occurrence
|
||||
FROM events.errors
|
||||
|
|
@ -517,16 +530,14 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id, flows=False, s
|
|||
|
||||
# print("--------------------")
|
||||
# print(cur.mogrify(main_pg_query, params))
|
||||
# print("--------------------")
|
||||
|
||||
cur.execute(cur.mogrify(main_pg_query, params))
|
||||
total = cur.rowcount
|
||||
rows = cur.fetchall()
|
||||
total = 0 if len(rows) == 0 else rows[0]["full_count"]
|
||||
if flows:
|
||||
return {"data": {"count": total}}
|
||||
row = cur.fetchone()
|
||||
rows = []
|
||||
limit = 200
|
||||
while row is not None and len(rows) < limit:
|
||||
rows.append(row)
|
||||
row = cur.fetchone()
|
||||
|
||||
if total == 0:
|
||||
rows = []
|
||||
else:
|
||||
|
|
@ -552,6 +563,7 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id, flows=False, s
|
|||
}
|
||||
|
||||
for r in rows:
|
||||
r.pop("full_count")
|
||||
if r["error_id"] in statuses:
|
||||
r["status"] = statuses[r["error_id"]]["status"]
|
||||
r["parent_error_id"] = statuses[r["error_id"]]["parent_error_id"]
|
||||
|
|
|
|||
|
|
@ -168,10 +168,11 @@ def _isUndefined_operator(op: schemas.SearchEventOperator):
|
|||
|
||||
|
||||
@dev.timed
|
||||
def search2_pg(data: schemas.SessionsSearchPayloadSchema, project_id, user_id, favorite_only=False, errors_only=False,
|
||||
def search2_pg(data: schemas.SessionsSearchPayloadSchema, project_id, user_id, errors_only=False,
|
||||
error_status="ALL", count_only=False, issue=None):
|
||||
full_args, query_part, sort = search_query_parts(data, error_status, errors_only, favorite_only, issue, project_id,
|
||||
user_id)
|
||||
full_args, query_part, sort = search_query_parts(data=data, error_status=error_status, errors_only=errors_only,
|
||||
favorite_only=data.bookmarked, issue=issue, project_id=project_id,
|
||||
user_id=user_id)
|
||||
if data.limit is not None and data.page is not None:
|
||||
full_args["sessions_limit_s"] = (data.page - 1) * data.limit
|
||||
full_args["sessions_limit_e"] = data.page * data.limit
|
||||
|
|
@ -379,8 +380,7 @@ def search_query_parts(data, error_status, errors_only, favorite_only, issue, pr
|
|||
fav_only_join = ""
|
||||
if favorite_only and not errors_only:
|
||||
fav_only_join = "LEFT JOIN public.user_favorite_sessions AS fs ON fs.session_id = s.session_id"
|
||||
extra_constraints.append("fs.user_id = %(userId)s")
|
||||
full_args["userId"] = user_id
|
||||
# extra_constraints.append("fs.user_id = %(userId)s")
|
||||
events_query_part = ""
|
||||
if len(data.filters) > 0:
|
||||
meta_keys = None
|
||||
|
|
@ -976,7 +976,12 @@ def search_query_parts(data, error_status, errors_only, favorite_only, issue, pr
|
|||
extra_from += " INNER JOIN public.user_favorite_errors AS ufe USING (error_id)"
|
||||
extra_constraints.append("ufe.user_id = %(userId)s")
|
||||
# extra_constraints = [extra.decode('UTF-8') + "\n" for extra in extra_constraints]
|
||||
if not favorite_only and not errors_only and user_id is not None:
|
||||
if favorite_only and not errors_only and user_id is not None:
|
||||
extra_from += """INNER JOIN (SELECT user_id, session_id
|
||||
FROM public.user_favorite_sessions
|
||||
WHERE user_id = %(userId)s) AS favorite_sessions
|
||||
USING (session_id)"""
|
||||
elif not favorite_only and not errors_only and user_id is not None:
|
||||
extra_from += """LEFT JOIN (SELECT user_id, session_id
|
||||
FROM public.user_favorite_sessions
|
||||
WHERE user_id = %(userId)s) AS favorite_sessions
|
||||
|
|
|
|||
|
|
@ -133,7 +133,7 @@ def events_search(projectId: int, q: str,
|
|||
@app.post('/{projectId}/sessions/search2', tags=["sessions"])
|
||||
def sessions_search2(projectId: int, data: schemas.FlatSessionsSearchPayloadSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = sessions.search2_pg(data, projectId, user_id=context.user_id)
|
||||
data = sessions.search2_pg(data=data, project_id=projectId, user_id=context.user_id)
|
||||
return {'data': data}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -605,6 +605,7 @@ class SessionsSearchPayloadSchema(BaseModel):
|
|||
group_by_user: bool = Field(default=False)
|
||||
limit: int = Field(default=200, gt=0, le=200)
|
||||
page: int = Field(default=1, gt=0)
|
||||
bookmarked: bool = Field(default=False)
|
||||
|
||||
class Config:
|
||||
alias_generator = attribute_to_camel_case
|
||||
|
|
|
|||
|
|
@ -483,13 +483,18 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id, flows=False, s
|
|||
order = "DESC"
|
||||
if data.order is not None:
|
||||
order = data.order
|
||||
extra_join = ""
|
||||
params = {
|
||||
"startDate": data.startDate,
|
||||
"endDate": data.endDate,
|
||||
"project_id": project_id,
|
||||
"userId": user_id,
|
||||
"step_size": step_size}
|
||||
if data.limit is not None and data.page is not None:
|
||||
params["errors_offset"] = (data.page - 1) * data.limit
|
||||
params["errors_limit"] = data.limit
|
||||
else:
|
||||
params["errors_offset"] = 0
|
||||
params["errors_limit"] = 200
|
||||
if favorite_only:
|
||||
cur.execute(cur.mogrify(f"""SELECT error_id
|
||||
FROM public.user_favorite_errors
|
||||
|
|
@ -531,9 +536,10 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id, flows=False, s
|
|||
WHERE {" AND ".join(ch_sub_query)}
|
||||
GROUP BY error_id, name, message
|
||||
ORDER BY {sort} {order}
|
||||
LIMIT 200) AS details INNER JOIN (SELECT error_id AS error_id, toUnixTimestamp(MAX(datetime))*1000 AS last_occurrence, toUnixTimestamp(MIN(datetime))*1000 AS first_occurrence
|
||||
FROM errors
|
||||
GROUP BY error_id) AS time_details
|
||||
LIMIT %(errors_limit)s OFFSET %(errors_offset)s) AS details
|
||||
INNER JOIN (SELECT error_id AS error_id, toUnixTimestamp(MAX(datetime))*1000 AS last_occurrence, toUnixTimestamp(MIN(datetime))*1000 AS first_occurrence
|
||||
FROM errors
|
||||
GROUP BY error_id) AS time_details
|
||||
ON details.error_id=time_details.error_id
|
||||
INNER JOIN (SELECT error_id, groupArray([timestamp, count]) AS chart
|
||||
FROM (SELECT error_id, toUnixTimestamp(toStartOfInterval(datetime, INTERVAL %(step_size)s second)) * 1000 AS timestamp,
|
||||
|
|
@ -544,8 +550,10 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id, flows=False, s
|
|||
ORDER BY timestamp) AS sub_table
|
||||
GROUP BY error_id) AS chart_details ON details.error_id=chart_details.error_id;"""
|
||||
|
||||
# print("--------------------")
|
||||
# print(main_ch_query % params)
|
||||
# print("------------")
|
||||
# print(ch.client().substitute_params(main_ch_query, params))
|
||||
# print("------------")
|
||||
|
||||
rows = ch.execute(query=main_ch_query, params=params)
|
||||
if len(statuses) == 0:
|
||||
query = cur.mogrify(
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue