commit
a3f7dedb4a
11 changed files with 39 additions and 29 deletions
|
|
@ -121,12 +121,14 @@ def update_dashboard(project_id, user_id, dashboard_id, data: schemas.EditDashbo
|
|||
WHERE dashboards.project_id = %(projectId)s
|
||||
AND dashboard_id = %(dashboard_id)s
|
||||
AND (dashboards.user_id = %(userId)s OR is_public)
|
||||
RETURNING dashboard_id,name,description,is_public,created_at;"""
|
||||
RETURNING dashboard_id,name,description,is_public,created_at"""
|
||||
if data.metrics is not None and len(data.metrics) > 0:
|
||||
pg_query = f"""WITH dash AS ({pg_query})
|
||||
INSERT INTO dashboard_widgets(dashboard_id, metric_id, user_id, config)
|
||||
VALUES {",".join([f"(%(dashboard_id)s, %(metric_id_{i})s, %(userId)s, (SELECT default_config FROM metrics WHERE metric_id=%(metric_id_{i})s)||%(config_{i})s)" for i in range(len(data.metrics))])}
|
||||
RETURNING dash.*;"""
|
||||
RETURNING (SELECT dashboard_id FROM dash),(SELECT name FROM dash),
|
||||
(SELECT description FROM dash),(SELECT is_public FROM dash),
|
||||
(SELECT created_at FROM dash);"""
|
||||
for i, m in enumerate(data.metrics):
|
||||
params[f"metric_id_{i}"] = m
|
||||
# params[f"config_{i}"] = schemas.AddWidgetToDashboardPayloadSchema.schema() \
|
||||
|
|
|
|||
|
|
@ -57,7 +57,7 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]:
|
|||
f_k = f"f_value{i}"
|
||||
values = {**values,
|
||||
**sh.multi_values(helper.values_for_operator(value=f["value"], op=f["operator"]),
|
||||
value_key=f_k)}
|
||||
value_key=f_k)}
|
||||
if filter_type == schemas.FilterType.user_browser:
|
||||
# op = sessions.__get_sql_operator_multiple(f["operator"])
|
||||
first_stage_extra_constraints.append(
|
||||
|
|
@ -166,7 +166,7 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]:
|
|||
continue
|
||||
|
||||
values = {**values, **sh.multi_values(helper.values_for_operator(value=s["value"], op=s["operator"]),
|
||||
value_key=f"value{i + 1}")}
|
||||
value_key=f"value{i + 1}")}
|
||||
if sh.is_negation_operator(op) and i > 0:
|
||||
op = sh.reverse_sql_operator(op)
|
||||
main_condition = "left_not.session_id ISNULL"
|
||||
|
|
@ -180,7 +180,7 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]:
|
|||
main_condition = "TRUE"
|
||||
else:
|
||||
main_condition = sh.multi_conditions(f"main.{next_col_name} {op} %(value{i + 1})s",
|
||||
values=s["value"], value_key=f"value{i + 1}")
|
||||
values=s["value"], value_key=f"value{i + 1}")
|
||||
n_stages_query.append(f"""
|
||||
(SELECT main.session_id,
|
||||
{"MIN(main.timestamp)" if i + 1 < len(stages) else "MAX(main.timestamp)"} AS stage{i + 1}_timestamp
|
||||
|
|
@ -258,7 +258,7 @@ def pearson_corr(x: list, y: list):
|
|||
return None, None, False
|
||||
|
||||
if n == 2:
|
||||
return math.copysign(1, x[1] - x[0]) * math.copysign(1, y[1] - y[0]), 1.0
|
||||
return math.copysign(1, x[1] - x[0]) * math.copysign(1, y[1] - y[0]), 1.0, True
|
||||
|
||||
xmean = sum(x) / len(x)
|
||||
ymean = sum(y) / len(y)
|
||||
|
|
@ -574,8 +574,10 @@ def get_top_insights(filter_d, project_id):
|
|||
# Obtain the first part of the output
|
||||
stages_list = get_stages(stages, rows)
|
||||
# Obtain the second part of the output
|
||||
total_drop_due_to_issues = get_issues(stages, rows, first_stage=filter_d.get("firstStage"),
|
||||
last_stage=filter_d.get("lastStage"), drop_only=True)
|
||||
n_critical_issues, issues_dict, total_drop_due_to_issues = get_issues(stages, rows,
|
||||
first_stage=filter_d.get("firstStage"),
|
||||
last_stage=filter_d.get("lastStage"),
|
||||
drop_only=True)
|
||||
return stages_list, total_drop_due_to_issues
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]:
|
|||
f_k = f"f_value{i}"
|
||||
values = {**values,
|
||||
**sh.multi_values(helper.values_for_operator(value=f["value"], op=f["operator"]),
|
||||
value_key=f_k)}
|
||||
value_key=f_k)}
|
||||
if filter_type == schemas.FilterType.user_browser:
|
||||
# op = sessions.__get_sql_operator_multiple(f["operator"])
|
||||
first_stage_extra_constraints.append(
|
||||
|
|
@ -172,7 +172,7 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]:
|
|||
continue
|
||||
|
||||
values = {**values, **sh.multi_values(helper.values_for_operator(value=s["value"], op=s["operator"]),
|
||||
value_key=f"value{i + 1}")}
|
||||
value_key=f"value{i + 1}")}
|
||||
if sh.is_negation_operator(op) and i > 0:
|
||||
op = sh.reverse_sql_operator(op)
|
||||
main_condition = "left_not.session_id ISNULL"
|
||||
|
|
@ -186,7 +186,7 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]:
|
|||
main_condition = "TRUE"
|
||||
else:
|
||||
main_condition = sh.multi_conditions(f"main.{next_col_name} {op} %(value{i + 1})s",
|
||||
values=s["value"], value_key=f"value{i + 1}")
|
||||
values=s["value"], value_key=f"value{i + 1}")
|
||||
n_stages_query.append(f"""
|
||||
(SELECT main.session_id,
|
||||
{"MIN(main.timestamp)" if i + 1 < len(stages) else "MAX(main.timestamp)"} AS stage{i + 1}_timestamp
|
||||
|
|
@ -264,7 +264,7 @@ def pearson_corr(x: list, y: list):
|
|||
return None, None, False
|
||||
|
||||
if n == 2:
|
||||
return math.copysign(1, x[1] - x[0]) * math.copysign(1, y[1] - y[0]), 1.0
|
||||
return math.copysign(1, x[1] - x[0]) * math.copysign(1, y[1] - y[0]), 1.0, True
|
||||
|
||||
xmean = sum(x) / len(x)
|
||||
ymean = sum(y) / len(y)
|
||||
|
|
@ -580,8 +580,10 @@ def get_top_insights(filter_d, project_id):
|
|||
# Obtain the first part of the output
|
||||
stages_list = get_stages(stages, rows)
|
||||
# Obtain the second part of the output
|
||||
total_drop_due_to_issues = get_issues(stages, rows, first_stage=filter_d.get("firstStage"),
|
||||
last_stage=filter_d.get("lastStage"), drop_only=True)
|
||||
n_critical_issues, issues_dict, total_drop_due_to_issues = get_issues(stages, rows,
|
||||
first_stage=filter_d.get("firstStage"),
|
||||
last_stage=filter_d.get("lastStage"),
|
||||
drop_only=True)
|
||||
return stages_list, total_drop_due_to_issues
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]:
|
|||
f_k = f"f_value{i}"
|
||||
values = {**values,
|
||||
**sh.multi_values(helper.values_for_operator(value=f["value"], op=f["operator"]),
|
||||
value_key=f_k)}
|
||||
value_key=f_k)}
|
||||
if filter_type == schemas.FilterType.user_browser:
|
||||
# op = sessions.__get_sql_operator_multiple(f["operator"])
|
||||
first_stage_extra_constraints.append(
|
||||
|
|
@ -172,7 +172,7 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]:
|
|||
continue
|
||||
|
||||
values = {**values, **sh.multi_values(helper.values_for_operator(value=s["value"], op=s["operator"]),
|
||||
value_key=f"value{i + 1}")}
|
||||
value_key=f"value{i + 1}")}
|
||||
if sh.is_negation_operator(op) and i > 0:
|
||||
op = sh.reverse_sql_operator(op)
|
||||
main_condition = "left_not.session_id ISNULL"
|
||||
|
|
@ -186,7 +186,7 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]:
|
|||
main_condition = "TRUE"
|
||||
else:
|
||||
main_condition = sh.multi_conditions(f"main.{next_col_name} {op} %(value{i + 1})s",
|
||||
values=s["value"], value_key=f"value{i + 1}")
|
||||
values=s["value"], value_key=f"value{i + 1}")
|
||||
n_stages_query.append(f"""
|
||||
(SELECT main.session_id,
|
||||
{"MIN(main.timestamp)" if i + 1 < len(stages) else "MAX(main.timestamp)"} AS stage{i + 1}_timestamp
|
||||
|
|
@ -264,7 +264,7 @@ def pearson_corr(x: list, y: list):
|
|||
return None, None, False
|
||||
|
||||
if n == 2:
|
||||
return math.copysign(1, x[1] - x[0]) * math.copysign(1, y[1] - y[0]), 1.0
|
||||
return math.copysign(1, x[1] - x[0]) * math.copysign(1, y[1] - y[0]), 1.0, True
|
||||
|
||||
xmean = sum(x) / len(x)
|
||||
ymean = sum(y) / len(y)
|
||||
|
|
@ -580,8 +580,10 @@ def get_top_insights(filter_d, project_id):
|
|||
# Obtain the first part of the output
|
||||
stages_list = get_stages(stages, rows)
|
||||
# Obtain the second part of the output
|
||||
total_drop_due_to_issues = get_issues(stages, rows, first_stage=filter_d.get("firstStage"),
|
||||
last_stage=filter_d.get("lastStage"), drop_only=True)
|
||||
n_critical_issues, issues_dict, total_drop_due_to_issues = get_issues(stages, rows,
|
||||
first_stage=filter_d.get("firstStage"),
|
||||
last_stage=filter_d.get("lastStage"),
|
||||
drop_only=True)
|
||||
return stages_list, total_drop_due_to_issues
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -71,10 +71,10 @@ spec:
|
|||
value: '{{ .Values.global.postgresql.postgresqlPassword }}'
|
||||
{{- end}}
|
||||
- name: SITE_URL
|
||||
value: 'https://{{ .Values.global.domainName }}'
|
||||
value: '{{ ternary "https" "http" .Values.global.ORSecureAccess}}://{{ .Values.global.domainName }}'
|
||||
- name: S3_HOST
|
||||
{{- if contains "minio" .Values.global.s3.endpoint }}
|
||||
value: 'https://{{ .Values.global.domainName }}:{{ .Values.global.ingress.controller.service.ports.https}}'
|
||||
value: '{{ .Values.global.ORSecureAccess}}://{{ .Values.global.domainName }}:{{ ternary .Values.global.ingress.controller.service.ports.https .Values.global.ingress.controller.service.ports.http .Values.global.ORSecureAccess }}'
|
||||
{{- else}}
|
||||
value: '{{ .Values.global.s3.endpoint }}'
|
||||
{{- end}}
|
||||
|
|
|
|||
|
|
@ -83,7 +83,7 @@ spec:
|
|||
- name: ASSETS_ORIGIN
|
||||
{{- if contains "minio" .Values.global.s3.endpoint }}
|
||||
# Local minio Installation
|
||||
value: 'https://{{ .Values.global.domainName }}:{{.Values.global.ingress.controller.service.ports.https}}/{{.Values.global.s3.assetsBucket}}'
|
||||
value: '{{ .Values.global.ORSecureAccess}}://{{ .Values.global.domainName }}:{{ ternary .Values.global.ingress.controller.service.ports.https .Values.global.ingress.controller.service.ports.http .Values.global.ORSecureAccess }}'
|
||||
{{- else if contains "amazonaws.com" .Values.global.s3.endpoint }}
|
||||
# AWS S3
|
||||
# Ref: https://stackoverflow.com/questions/53634583/go-template-split-string-by-delimiter
|
||||
|
|
|
|||
|
|
@ -51,7 +51,7 @@ spec:
|
|||
value: "{{ .Values.global.s3.region }}"
|
||||
- name: S3_HOST
|
||||
{{- if contains "minio" .Values.global.s3.endpoint }}
|
||||
value: 'https://{{ .Values.global.domainName }}:{{ .Values.global.ingress.controller.service.ports.https}}'
|
||||
value: '{{ .Values.global.ORSecureAccess}}://{{ .Values.global.domainName }}:{{ ternary .Values.global.ingress.controller.service.ports.https .Values.global.ingress.controller.service.ports.http .Values.global.ORSecureAccess }}'
|
||||
{{- else}}
|
||||
value: '{{ .Values.global.s3.endpoint }}'
|
||||
{{- end}}
|
||||
|
|
|
|||
|
|
@ -81,10 +81,10 @@ spec:
|
|||
value: '{{ .Values.global.postgresql.postgresqlPassword }}'
|
||||
{{- end}}
|
||||
- name: SITE_URL
|
||||
value: 'https://{{ .Values.global.domainName }}'
|
||||
value: '{{ ternary "https" "http" .Values.global.ORSecureAccess}}://{{ .Values.global.domainName }}'
|
||||
- name: S3_HOST
|
||||
{{- if contains "minio" .Values.global.s3.endpoint }}
|
||||
value: 'https://{{ .Values.global.domainName }}:{{ .Values.global.ingress.controller.service.ports.https}}'
|
||||
value: '{{ ternary "https" "http" .Values.global.ORSecureAccess}}://{{ .Values.global.domainName }}:{{ ternary .Values.global.ingress.controller.service.ports.https .Values.global.ingress.controller.service.ports.http .Values.global.ORSecureAccess }}'
|
||||
{{- else}}
|
||||
value: '{{ .Values.global.s3.endpoint }}'
|
||||
{{- end}}
|
||||
|
|
|
|||
|
|
@ -90,7 +90,7 @@ spec:
|
|||
- name: ASSETS_ORIGIN
|
||||
{{- if contains "minio" .Values.global.s3.endpoint }}
|
||||
# Local minio Installation
|
||||
value: 'https://{{ .Values.global.domainName }}:{{.Values.global.ingress.controller.service.ports.https}}/{{.Values.global.s3.assetsBucket}}'
|
||||
value: '{{ .Values.global.ORSecureAccess}}://{{ .Values.global.domainName }}:{{ ternary .Values.global.ingress.controller.service.ports.https .Values.global.ingress.controller.service.ports.http .Values.global.ORSecureAccess }}'
|
||||
{{- else if contains "amazonaws.com" .Values.global.s3.endpoint }}
|
||||
# AWS S3
|
||||
# Ref: https://stackoverflow.com/questions/53634583/go-template-split-string-by-delimiter
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ spec:
|
|||
- name: ASSETS_ORIGIN
|
||||
{{- if contains "minio" .Values.global.s3.endpoint }}
|
||||
# Local minio Installation
|
||||
value: 'https://{{ .Values.global.domainName }}:{{.Values.global.ingress.controller.service.ports.https}}/{{.Values.global.s3.assetsBucket}}'
|
||||
value: '{{ .Values.global.ORSecureAccess}}://{{ .Values.global.domainName }}:{{ ternary .Values.global.ingress.controller.service.ports.https .Values.global.ingress.controller.service.ports.http .Values.global.ORSecureAccess }}'
|
||||
{{- else if contains "amazonaws.com" .Values.global.s3.endpoint }}
|
||||
# AWS S3
|
||||
# Ref: https://stackoverflow.com/questions/53634583/go-template-split-string-by-delimiter
|
||||
|
|
|
|||
|
|
@ -37,5 +37,7 @@ global:
|
|||
vault: *vault
|
||||
redis: *redis
|
||||
clusterDomain: "svc.cluster.local"
|
||||
# In case you've http proxy to access internet.
|
||||
env: {}
|
||||
# If you're accessing OpenReplay with http, then update the value to http
|
||||
ORSecureAccess: true
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue