Api v1.15.0 (#1633)

* fix(chalice): fixed funnels
This commit is contained in:
Kraiem Taha Yassine 2023-11-09 16:08:10 +01:00 committed by GitHub
parent d573d5ad97
commit f56ea86bd4
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 18 additions and 151 deletions

View file

@ -457,9 +457,9 @@ def get_stages(stages, rows):
drop = int(100 * (session_counts[i] - session_counts[i + 1]) / session_counts[i])
stages_list.append(
{"value": stage["value"],
"type": stage["type"],
"operator": stage["operator"],
{"value": stage.value,
"type": stage.type,
"operator": stage.operator,
"sessionsCount": session_counts[i + 1],
"drop_pct": drop,
"usersCount": users_counts[i + 1],
@ -587,8 +587,8 @@ def get_top_insights(filter_d: schemas.CardSeriesFilterSchema, project_id):
stages_list = get_stages(stages, rows)
# Obtain the second part of the output
total_drop_due_to_issues = get_issues(stages, rows,
first_stage=filter_d.get("firstStage"),
last_stage=filter_d.get("lastStage"),
first_stage=1,
last_stage=len(filter_d.events),
drop_only=True)
return stages_list, total_drop_due_to_issues
@ -609,45 +609,3 @@ def get_issues_list(filter_d: schemas.CardSeriesFilterSchema, project_id, first_
# output['critical_issues_count'] = n_critical_issues
output = {**output, **issues_dict}
return output
def get_overview(filter_d, project_id, first_stage=None, last_stage=None):
output = dict()
stages = filter_d["events"]
# TODO: handle 1 stage alone
if len(stages) == 0:
return {"stages": [],
"criticalIssuesCount": 0}
elif len(stages) == 1:
# TODO: count sessions, and users for single stage
output["stages"] = [{
"type": stages[0]["type"],
"value": stages[0]["value"],
"sessionsCount": None,
"dropPercentage": None,
"usersCount": None
}]
return output
# The result of the multi-stage query
rows = get_stages_and_events(filter_d=filter_d, project_id=project_id)
if len(rows) == 0:
# PS: not sure what to return if rows are empty
output["stages"] = [{
"type": stages[0]["type"],
"value": stages[0]["value"],
"sessionsCount": None,
"dropPercentage": None,
"usersCount": None
}]
output['criticalIssuesCount'] = 0
return output
# Obtain the first part of the output
stages_list = get_stages(stages, rows)
# Obtain the second part of the output
n_critical_issues, issues_dict, total_drop_due_to_issues = get_issues(stages, rows, first_stage=first_stage,
last_stage=last_stage)
output['stages'] = stages_list
output['criticalIssuesCount'] = n_critical_issues
return output

View file

@ -761,6 +761,9 @@ class SessionsSearchPayloadSchema(_TimedSchema, _PaginatedSchema):
def merge_identical_filters(cls, values):
i = 0
while i < len(values):
if values[i].is_event:
i += 1
continue
j = i + 1
while j < len(values):
if values[i].type == values[j].type:
@ -1024,16 +1027,6 @@ class CardSessionsSchema(_TimedSchema, _PaginatedSchema):
v["isEvent"] = ProductAnalyticsSelectedEventType.has_value(v["type"])
return values
@model_validator(mode="before")
def remove_wrong_filter_values(cls, values):
for f in values.get("filters", []):
vals = []
for v in f.get("value", []):
if v is not None:
vals.append(v)
f["value"] = vals
return values
@model_validator(mode="before")
def __enforce_default(cls, values):
if values.get("startTimestamp") is None:

View file

@ -463,9 +463,9 @@ def get_stages(stages, rows):
drop = int(100 * (session_counts[i] - session_counts[i + 1]) / session_counts[i])
stages_list.append(
{"value": stage["value"],
"type": stage["type"],
"operator": stage["operator"],
{"value": stage.value,
"type": stage.type,
"operator": stage.operator,
"sessionsCount": session_counts[i + 1],
"drop_pct": drop,
"usersCount": users_counts[i + 1],
@ -593,8 +593,8 @@ def get_top_insights(filter_d: schemas.CardSeriesFilterSchema, project_id):
stages_list = get_stages(stages, rows)
# Obtain the second part of the output
total_drop_due_to_issues = get_issues(stages, rows,
first_stage=filter_d.get("firstStage"),
last_stage=filter_d.get("lastStage"),
first_stage=1,
last_stage=len(filter_d.events),
drop_only=True)
return stages_list, total_drop_due_to_issues
@ -615,45 +615,3 @@ def get_issues_list(filter_d: schemas.CardSeriesFilterSchema, project_id, first_
# output['critical_issues_count'] = n_critical_issues
output = {**output, **issues_dict}
return output
def get_overview(filter_d, project_id, first_stage=None, last_stage=None):
output = dict()
stages = filter_d["events"]
# TODO: handle 1 stage alone
if len(stages) == 0:
return {"stages": [],
"criticalIssuesCount": 0}
elif len(stages) == 1:
# TODO: count sessions, and users for single stage
output["stages"] = [{
"type": stages[0]["type"],
"value": stages[0]["value"],
"sessionsCount": None,
"dropPercentage": None,
"usersCount": None
}]
return output
# The result of the multi-stage query
rows = get_stages_and_events(filter_d=filter_d, project_id=project_id)
if len(rows) == 0:
# PS: not sure what to return if rows are empty
output["stages"] = [{
"type": stages[0]["type"],
"value": stages[0]["value"],
"sessionsCount": None,
"dropPercentage": None,
"usersCount": None
}]
output['criticalIssuesCount'] = 0
return output
# Obtain the first part of the output
stages_list = get_stages(stages, rows)
# Obtain the second part of the output
n_critical_issues, issues_dict, total_drop_due_to_issues = get_issues(stages, rows, first_stage=first_stage,
last_stage=last_stage)
output['stages'] = stages_list
output['criticalIssuesCount'] = n_critical_issues
return output

View file

@ -462,9 +462,9 @@ def get_stages(stages, rows):
drop = int(100 * (session_counts[i] - session_counts[i + 1]) / session_counts[i])
stages_list.append(
{"value": stage["value"],
"type": stage["type"],
"operator": stage["operator"],
{"value": stage.value,
"type": stage.type,
"operator": stage.operator,
"sessionsCount": session_counts[i + 1],
"drop_pct": drop,
"usersCount": users_counts[i + 1],
@ -592,8 +592,8 @@ def get_top_insights(filter_d: schemas.CardSeriesFilterSchema, project_id):
stages_list = get_stages(stages, rows)
# Obtain the second part of the output
total_drop_due_to_issues = get_issues(stages, rows,
first_stage=filter_d.get("firstStage"),
last_stage=filter_d.get("lastStage"),
first_stage=1,
last_stage=len(filter_d.events),
drop_only=True)
return stages_list, total_drop_due_to_issues
@ -614,45 +614,3 @@ def get_issues_list(filter_d: schemas.CardSeriesFilterSchema, project_id, first_
# output['critical_issues_count'] = n_critical_issues
output = {**output, **issues_dict}
return output
def get_overview(filter_d, project_id, first_stage=None, last_stage=None):
output = dict()
stages = filter_d["events"]
# TODO: handle 1 stage alone
if len(stages) == 0:
return {"stages": [],
"criticalIssuesCount": 0}
elif len(stages) == 1:
# TODO: count sessions, and users for single stage
output["stages"] = [{
"type": stages[0]["type"],
"value": stages[0]["value"],
"sessionsCount": None,
"dropPercentage": None,
"usersCount": None
}]
return output
# The result of the multi-stage query
rows = get_stages_and_events(filter_d=filter_d, project_id=project_id)
if len(rows) == 0:
# PS: not sure what to return if rows are empty
output["stages"] = [{
"type": stages[0]["type"],
"value": stages[0]["value"],
"sessionsCount": None,
"dropPercentage": None,
"usersCount": None
}]
output['criticalIssuesCount'] = 0
return output
# Obtain the first part of the output
stages_list = get_stages(stages, rows)
# Obtain the second part of the output
n_critical_issues, issues_dict, total_drop_due_to_issues = get_issues(stages, rows, first_stage=first_stage,
last_stage=last_stage)
output['stages'] = stages_list
output['criticalIssuesCount'] = n_critical_issues
return output