diff --git a/.github/workflows/workers-ee.yaml b/.github/workflows/workers-ee.yaml
index 35580b5a9..c573dff26 100644
--- a/.github/workflows/workers-ee.yaml
+++ b/.github/workflows/workers-ee.yaml
@@ -86,7 +86,11 @@ jobs:
;;
esac
- [[ $(cat /tmp/images_to_build.txt) != "" ]] || (echo "Nothing to build here"; exit 1)
+ if [[ $(cat /tmp/images_to_build.txt) == "" ]]; then
+ echo "Nothing to build here"
+ touch /tmp/nothing-to-build-here
+ exit 0
+ fi
#
# Pushing image to registry
#
@@ -94,7 +98,7 @@ jobs:
for image in $(cat /tmp/images_to_build.txt);
do
echo "Bulding $image"
- PUSH_IMAGE=0 bash -x ./build.sh skip $image
+ PUSH_IMAGE=0 bash -x ./build.sh ee $image
[[ "x$skip_security_checks" == "xtrue" ]] || {
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
./trivy image --exit-code 1 --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
@@ -105,7 +109,7 @@ jobs:
} && {
echo "Skipping Security Checks"
}
- PUSH_IMAGE=1 bash -x ./build.sh skip $image
+ PUSH_IMAGE=1 bash -x ./build.sh ee $image
echo "::set-output name=image::$DOCKER_REPO/$image:$IMAGE_TAG"
done
@@ -118,6 +122,7 @@ jobs:
# Deploying image to environment.
#
set -x
+ [[ -f /tmp/nothing-to-build-here ]] && exit 0
cd scripts/helmcharts/
## Update secerts
@@ -156,16 +161,16 @@ jobs:
# Deploy command
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true | kubectl apply -f -
- - name: Alert slack
- if: ${{ failure() }}
- uses: rtCamp/action-slack-notify@v2
- env:
- SLACK_CHANNEL: ee
- SLACK_TITLE: "Failed ${{ github.workflow }}"
- SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
- SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
- SLACK_USERNAME: "OR Bot"
- SLACK_MESSAGE: 'Build failed :bomb:'
+ #- name: Alert slack
+ # if: ${{ failure() }}
+ # uses: rtCamp/action-slack-notify@v2
+ # env:
+ # SLACK_CHANNEL: ee
+ # SLACK_TITLE: "Failed ${{ github.workflow }}"
+ # SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
+ # SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
+ # SLACK_USERNAME: "OR Bot"
+ # SLACK_MESSAGE: 'Build failed :bomb:'
# - name: Debug Job
diff --git a/.github/workflows/workers.yaml b/.github/workflows/workers.yaml
index 341a196ad..e222e00fb 100644
--- a/.github/workflows/workers.yaml
+++ b/.github/workflows/workers.yaml
@@ -86,7 +86,11 @@ jobs:
;;
esac
- [[ $(cat /tmp/images_to_build.txt) != "" ]] || (echo "Nothing to build here"; exit 1)
+ if [[ $(cat /tmp/images_to_build.txt) == "" ]]; then
+ echo "Nothing to build here"
+ touch /tmp/nothing-to-build-here
+ exit 0
+ fi
#
# Pushing image to registry
#
@@ -116,6 +120,8 @@ jobs:
#
# Deploying image to environment.
#
+ set -x
+ [[ -f /tmp/nothing-to-build-here ]] && exit 0
cd scripts/helmcharts/
## Update secerts
diff --git a/README.md b/README.md
index 6a644c0bc..363c64d1c 100644
--- a/README.md
+++ b/README.md
@@ -91,9 +91,3 @@ Check out our [roadmap](https://www.notion.so/openreplay/Roadmap-889d2c3d968b478
## License
This monorepo uses several licenses. See [LICENSE](/LICENSE) for more details.
-
-## Contributors
-
-
-
-
diff --git a/api/chalicelib/core/alerts_processor.py b/api/chalicelib/core/alerts_processor.py
index 2ed9105b2..76ae5c615 100644
--- a/api/chalicelib/core/alerts_processor.py
+++ b/api/chalicelib/core/alerts_processor.py
@@ -199,7 +199,8 @@ def process():
logging.info(f"Valid alert, notifying users, alertId:{alert['alertId']} name: {alert['name']}")
notifications.append(generate_notification(alert, result))
except Exception as e:
- logging.error(f"!!!Error while running alert query for alertId:{alert['alertId']} name: {alert['name']}")
+ logging.error(
+ f"!!!Error while running alert query for alertId:{alert['alertId']} name: {alert['name']}")
logging.error(query)
logging.error(e)
cur = cur.recreate(rollback=True)
@@ -212,12 +213,22 @@ def process():
alerts.process_notifications(notifications)
+def __format_value(x):
+ if x % 1 == 0:
+ x = int(x)
+ else:
+ x = round(x, 2)
+ return f"{x:,}"
+
+
def generate_notification(alert, result):
+ left = __format_value(result['value'])
+ right = __format_value(alert['query']['right'])
return {
"alertId": alert["alertId"],
"tenantId": alert["tenantId"],
"title": alert["name"],
- "description": f"has been triggered, {alert['query']['left']} = {round(result['value'], 2)} ({alert['query']['operator']} {alert['query']['right']}).",
+ "description": f"has been triggered, {alert['query']['left']} = {left} ({alert['query']['operator']} {right}).",
"buttonText": "Check metrics for more details",
"buttonUrl": f"/{alert['projectId']}/metrics",
"imageUrl": None,
diff --git a/api/chalicelib/core/custom_metrics.py b/api/chalicelib/core/custom_metrics.py
index 29c4b6fa9..691b8e1ba 100644
--- a/api/chalicelib/core/custom_metrics.py
+++ b/api/chalicelib/core/custom_metrics.py
@@ -266,7 +266,8 @@ def update(metric_id, user_id, project_id, data: schemas.UpdateCustomMetricsSche
params = {"metric_id": metric_id, "is_public": data.is_public, "name": data.name,
"user_id": user_id, "project_id": project_id, "view_type": data.view_type,
"metric_type": data.metric_type, "metric_of": data.metric_of,
- "metric_value": data.metric_value, "metric_format": data.metric_format}
+ "metric_value": data.metric_value, "metric_format": data.metric_format,
+ "config": json.dumps(data.config.dict())}
for i, s in enumerate(data.series):
prefix = "u_"
if s.index is None:
@@ -316,7 +317,8 @@ def update(metric_id, user_id, project_id, data: schemas.UpdateCustomMetricsSche
view_type= %(view_type)s, metric_type= %(metric_type)s,
metric_of= %(metric_of)s, metric_value= %(metric_value)s,
metric_format= %(metric_format)s,
- edited_at = timezone('utc'::text, now())
+ edited_at = timezone('utc'::text, now()),
+ default_config = %(config)s
WHERE metric_id = %(metric_id)s
AND project_id = %(project_id)s
AND (user_id = %(user_id)s OR is_public)
@@ -392,7 +394,7 @@ def get(metric_id, project_id, user_id, flatten=True):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(
- """SELECT *
+ """SELECT *, default_config AS config
FROM metrics
LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(metric_series.* ORDER BY index),'[]'::jsonb) AS series
FROM metric_series
@@ -443,7 +445,7 @@ def get_with_template(metric_id, project_id, user_id, include_dashboard=True):
) AS connected_dashboards ON (TRUE)"""
cur.execute(
cur.mogrify(
- f"""SELECT *
+ f"""SELECT *, default_config AS config
FROM metrics
LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(metric_series.* ORDER BY index),'[]'::jsonb) AS series
FROM metric_series
diff --git a/api/chalicelib/core/dashboards.py b/api/chalicelib/core/dashboards.py
index 9d1dc4c81..ac98b44e7 100644
--- a/api/chalicelib/core/dashboards.py
+++ b/api/chalicelib/core/dashboards.py
@@ -111,6 +111,8 @@ def get_dashboard(project_id, user_id, dashboard_id):
for w in row["widgets"]:
w["created_at"] = TimeUTC.datetime_to_timestamp(w["created_at"])
w["edited_at"] = TimeUTC.datetime_to_timestamp(w["edited_at"])
+ w["config"]["col"] = w["default_config"]["col"]
+ w["config"]["row"] = w["default_config"]["row"]
for s in w["series"]:
s["created_at"] = TimeUTC.datetime_to_timestamp(s["created_at"])
return helper.dict_to_camel_case(row)
diff --git a/api/chalicelib/core/metrics.py b/api/chalicelib/core/metrics.py
index bf388c093..b25b441ed 100644
--- a/api/chalicelib/core/metrics.py
+++ b/api/chalicelib/core/metrics.py
@@ -419,7 +419,7 @@ def get_slowest_images(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=True,
chart=True, data=args)
pg_sub_query_chart.append("resources.type = 'img'")
- pg_sub_query_chart.append("resources.url = top_img.url")
+ pg_sub_query_chart.append("resources.url_hostpath = top_img.url_hostpath")
pg_sub_query_subset = __get_constraints(project_id=project_id, time_constraint=True,
chart=False, data=args)
@@ -431,13 +431,13 @@ def get_slowest_images(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
with pg_client.PostgresClient() as cur:
pg_query = f"""SELECT *
- FROM (SELECT resources.url,
+ FROM (SELECT resources.url_hostpath,
COALESCE(AVG(resources.duration), 0) AS avg_duration,
COUNT(resources.session_id) AS sessions_count
FROM events.resources
INNER JOIN sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query_subset)}
- GROUP BY resources.url
+ GROUP BY resources.url_hostpath
ORDER BY avg_duration DESC
LIMIT 10) AS top_img
LEFT JOIN LATERAL (
@@ -485,13 +485,13 @@ def get_performance(project_id, startTimestamp=TimeUTC.now(delta_days=-1), endTi
if resources and len(resources) > 0:
for r in resources:
if r["type"] == "IMG":
- img_constraints.append(f"resources.url = %(val_{len(img_constraints)})s")
+ img_constraints.append(f"resources.url_hostpath = %(val_{len(img_constraints)})s")
img_constraints_vals["val_" + str(len(img_constraints) - 1)] = r['value']
elif r["type"] == "LOCATION":
location_constraints.append(f"pages.path = %(val_{len(location_constraints)})s")
location_constraints_vals["val_" + str(len(location_constraints) - 1)] = r['value']
else:
- request_constraints.append(f"resources.url = %(val_{len(request_constraints)})s")
+ request_constraints.append(f"resources.url_hostpath = %(val_{len(request_constraints)})s")
request_constraints_vals["val_" + str(len(request_constraints) - 1)] = r['value']
params = {"step_size": step_size, "project_id": project_id, "startTimestamp": startTimestamp,
"endTimestamp": endTimestamp}
@@ -627,12 +627,12 @@ def search(text, resource_type, project_id, performance=False, pages_only=False,
pg_sub_query.append("url_hostpath ILIKE %(value)s")
with pg_client.PostgresClient() as cur:
pg_query = f"""SELECT key, value
- FROM ( SELECT DISTINCT ON (url) ROW_NUMBER() OVER (PARTITION BY type ORDER BY url) AS r,
- url AS value,
+ FROM ( SELECT DISTINCT ON (url_hostpath) ROW_NUMBER() OVER (PARTITION BY type ORDER BY url_hostpath) AS r,
+ url_hostpath AS value,
type AS key
FROM events.resources INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query)}
- ORDER BY url, type ASC) AS ranked_values
+ ORDER BY url_hostpath, type ASC) AS ranked_values
WHERE ranked_values.r<=5;"""
cur.execute(cur.mogrify(pg_query, {"project_id": project_id, "value": helper.string_to_sql_like(text)}))
rows = cur.fetchall()
@@ -893,7 +893,7 @@ def get_resources_loading_time(project_id, startTimestamp=TimeUTC.now(delta_days
if type is not None:
pg_sub_query_subset.append(f"resources.type = '{__get_resource_db_type_from_type(type)}'")
if url is not None:
- pg_sub_query_subset.append(f"resources.url = %(value)s")
+ pg_sub_query_subset.append(f"resources.url_hostpath = %(value)s")
with pg_client.PostgresClient() as cur:
pg_query = f"""WITH resources AS (SELECT resources.duration, timestamp
@@ -1009,7 +1009,7 @@ def get_slowest_resources(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
ORDER BY avg DESC
LIMIT 10) AS main_list
INNER JOIN LATERAL (
- SELECT url, type
+ SELECT url_hostpath AS url, type
FROM events.resources
INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query)}
diff --git a/api/chalicelib/core/projects.py b/api/chalicelib/core/projects.py
index 18e80944e..10d1e7aee 100644
--- a/api/chalicelib/core/projects.py
+++ b/api/chalicelib/core/projects.py
@@ -76,19 +76,21 @@ def get_projects(tenant_id, recording_state=False, gdpr=None, recorded=False, st
rows = cur.fetchall()
# if recorded is requested, check if it was saved or computed
if recorded:
- for r in rows:
+ u_values = []
+ params = {}
+ for i, r in enumerate(rows):
if r["first_recorded_session_at"] is None:
- extra_update = ""
- if r["recorded"]:
- extra_update = ", first_recorded_session_at=to_timestamp(%(first_recorded)s/1000)"
- query = cur.mogrify(f"""UPDATE public.projects
- SET sessions_last_check_at=(now() at time zone 'utc')
- {extra_update}
- WHERE project_id=%(project_id)s""",
- {"project_id": r["project_id"], "first_recorded": r["first_recorded"]})
- cur.execute(query)
+ u_values.append(f"(%(project_id_{i})s,to_timestamp(%(first_recorded_{i})s/1000))")
+ params[f"project_id_{i}"] = r["project_id"]
+ params[f"first_recorded_{i}"] = r["first_recorded"] if r["recorded"] else None
r.pop("first_recorded_session_at")
r.pop("first_recorded")
+ if len(u_values) > 0:
+ query = cur.mogrify(f"""UPDATE public.projects
+ SET sessions_last_check_at=(now() at time zone 'utc'), first_recorded_session_at=u.first_recorded
+ FROM (VALUES {",".join(u_values)}) AS u(project_id,first_recorded)
+ WHERE projects.project_id=u.project_id;""", params)
+ cur.execute(query)
if recording_state and len(rows) > 0:
project_ids = [f'({r["project_id"]})' for r in rows]
diff --git a/api/chalicelib/core/sessions.py b/api/chalicelib/core/sessions.py
index fcea8621d..91efb967f 100644
--- a/api/chalicelib/core/sessions.py
+++ b/api/chalicelib/core/sessions.py
@@ -177,7 +177,7 @@ def _isUndefined_operator(op: schemas.SearchEventOperator):
# This function executes the query and return result
def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_id, errors_only=False,
- error_status=schemas.ErrorStatus.all, count_only=False, issue=None):
+ error_status=schemas.ErrorStatus.all, count_only=False, issue=None, ids_only=False):
if data.bookmarked:
data.startDate, data.endDate = sessions_favorite.get_start_end_timestamp(project_id, user_id)
@@ -185,9 +185,11 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_
favorite_only=data.bookmarked, issue=issue, project_id=project_id,
user_id=user_id)
if data.limit is not None and data.page is not None:
+ full_args["sessions_limit"] = data.limit
full_args["sessions_limit_s"] = (data.page - 1) * data.limit
full_args["sessions_limit_e"] = data.page * data.limit
else:
+ full_args["sessions_limit"] = 200
full_args["sessions_limit_s"] = 1
full_args["sessions_limit_e"] = 200
@@ -235,6 +237,12 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_
GROUP BY user_id
) AS users_sessions;""",
full_args)
+ elif ids_only:
+ main_query = cur.mogrify(f"""SELECT DISTINCT ON(s.session_id) s.session_id
+ {query_part}
+ ORDER BY s.session_id desc
+ LIMIT %(sessions_limit)s OFFSET %(sessions_limit_s)s;""",
+ full_args)
else:
if data.order is None:
data.order = schemas.SortOrderType.desc
@@ -242,7 +250,6 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_
if data.sort is not None and data.sort != "session_id":
# sort += " " + data.order + "," + helper.key_to_snake_case(data.sort)
sort = helper.key_to_snake_case(data.sort)
-
meta_keys = metadata.get(project_id=project_id)
main_query = cur.mogrify(f"""SELECT COUNT(full_sessions) AS count,
COALESCE(JSONB_AGG(full_sessions)
@@ -266,7 +273,7 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_
print(data.json())
print("--------------------")
raise err
- if errors_only:
+ if errors_only or ids_only:
return helper.list_to_camel_case(cur.fetchall())
sessions = cur.fetchone()
diff --git a/api/chalicelib/core/sessions_notes.py b/api/chalicelib/core/sessions_notes.py
index 661c94efd..3ef4ccc21 100644
--- a/api/chalicelib/core/sessions_notes.py
+++ b/api/chalicelib/core/sessions_notes.py
@@ -140,7 +140,9 @@ def share_to_slack(tenant_id, user_id, project_id, note_id, webhook_id):
note = get_note(tenant_id=tenant_id, project_id=project_id, user_id=user_id, note_id=note_id, share=user_id)
if note is None:
return {"errors": ["Note not found"]}
- session_url = urljoin(config('SITE_URL'), f"{note['projectId']}/session/{note['sessionId']}")
+ session_url = urljoin(config('SITE_URL'), f"{note['projectId']}/session/{note['sessionId']}?note={note['noteId']}")
+ if note["timestamp"] > 0:
+ session_url += f"&jumpto={note['timestamp']}"
title = f"<{session_url}|Note for session {note['sessionId']}>"
blocks = [{"type": "section",
diff --git a/api/chalicelib/core/significance.py b/api/chalicelib/core/significance.py
index 2abd87cf7..c4a4fcaac 100644
--- a/api/chalicelib/core/significance.py
+++ b/api/chalicelib/core/significance.py
@@ -181,9 +181,7 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]:
values=s["value"], value_key=f"value{i + 1}")
n_stages_query.append(f"""
(SELECT main.session_id,
- {"MIN(main.timestamp)" if i + 1 < len(stages) else "MAX(main.timestamp)"} AS stage{i + 1}_timestamp,
- '{event_type}' AS type,
- '{s["operator"]}' AS operator
+ {"MIN(main.timestamp)" if i + 1 < len(stages) else "MAX(main.timestamp)"} AS stage{i + 1}_timestamp
FROM {next_table} AS main {" ".join(extra_from)}
WHERE main.timestamp >= {f"T{i}.stage{i}_timestamp" if i > 0 else "%(startTimestamp)s"}
{f"AND main.session_id=T1.session_id" if i > 0 else ""}
@@ -191,30 +189,34 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]:
{(" AND " + " AND ".join(stage_constraints)) if len(stage_constraints) > 0 else ""}
{(" AND " + " AND ".join(first_stage_extra_constraints)) if len(first_stage_extra_constraints) > 0 and i == 0 else ""}
GROUP BY main.session_id)
- AS T{i + 1} {"USING (session_id)" if i > 0 else ""}
+ AS T{i + 1} {"ON (TRUE)" if i > 0 else ""}
""")
- if len(n_stages_query) == 0:
+ n_stages = len(n_stages_query)
+ if n_stages == 0:
return []
n_stages_query = " LEFT JOIN LATERAL ".join(n_stages_query)
n_stages_query += ") AS stages_t"
n_stages_query = f"""
- SELECT stages_and_issues_t.*, sessions.user_uuid FROM (
+ SELECT stages_and_issues_t.*, sessions.user_uuid
+ FROM (
SELECT * FROM (
- SELECT * FROM
- {n_stages_query}
+ SELECT T1.session_id, {",".join([f"stage{i + 1}_timestamp" for i in range(n_stages)])}
+ FROM {n_stages_query}
LEFT JOIN LATERAL
- ( SELECT ISE.session_id,
- ISS.type as issue_type,
+ ( SELECT ISS.type as issue_type,
ISE.timestamp AS issue_timestamp,
- ISS.context_string as issue_context,
+ COALESCE(ISS.context_string,'') as issue_context,
ISS.issue_id as issue_id
FROM events_common.issues AS ISE INNER JOIN issues AS ISS USING (issue_id)
WHERE ISE.timestamp >= stages_t.stage1_timestamp
AND ISE.timestamp <= stages_t.stage{i + 1}_timestamp
AND ISS.project_id=%(project_id)s
+ AND ISE.session_id = stages_t.session_id
+ AND ISS.type!='custom' -- ignore custom issues because they are massive
{"AND ISS.type IN %(issueTypes)s" if len(filter_issues) > 0 else ""}
- ) AS issues_t USING (session_id)
+ LIMIT 10 -- remove the limit to get exact stats
+ ) AS issues_t ON (TRUE)
) AS stages_and_issues_t INNER JOIN sessions USING(session_id);
"""
@@ -297,7 +299,21 @@ def pearson_corr(x: list, y: list):
return r, confidence, False
-def get_transitions_and_issues_of_each_type(rows: List[RealDictRow], all_issues_with_context, first_stage, last_stage):
+# def tuple_or(t: tuple):
+# x = 0
+# for el in t:
+# x |= el # | is for bitwise OR
+# return x
+#
+# The following function is correct optimization of the previous function because t is a list of 0,1
+def tuple_or(t: tuple):
+ for el in t:
+ if el > 0:
+ return 1
+ return 0
+
+
+def get_transitions_and_issues_of_each_type(rows: List[RealDictRow], all_issues, first_stage, last_stage):
"""
Returns two lists with binary values 0/1:
@@ -316,12 +332,6 @@ def get_transitions_and_issues_of_each_type(rows: List[RealDictRow], all_issues_
transitions = []
n_sess_affected = 0
errors = {}
- for issue in all_issues_with_context:
- split = issue.split('__^__')
- errors[issue] = {
- "errors": [],
- "issue_type": split[0],
- "context": split[1]}
for row in rows:
t = 0
@@ -329,38 +339,26 @@ def get_transitions_and_issues_of_each_type(rows: List[RealDictRow], all_issues_
last_ts = row[f'stage{last_stage}_timestamp']
if first_ts is None:
continue
- elif first_ts is not None and last_ts is not None:
+ elif last_ts is not None:
t = 1
transitions.append(t)
ic_present = False
- for issue_type_with_context in errors:
+ for error_id in all_issues:
+ if error_id not in errors:
+ errors[error_id] = []
ic = 0
- issue_type = errors[issue_type_with_context]["issue_type"]
- context = errors[issue_type_with_context]["context"]
- if row['issue_type'] is not None:
+ row_issue_id = row['issue_id']
+ if row_issue_id is not None:
if last_ts is None or (first_ts < row['issue_timestamp'] < last_ts):
- context_in_row = row['issue_context'] if row['issue_context'] is not None else ''
- if issue_type == row['issue_type'] and context == context_in_row:
+ if error_id == row_issue_id:
ic = 1
ic_present = True
- errors[issue_type_with_context]["errors"].append(ic)
+ errors[error_id].append(ic)
if ic_present and t:
n_sess_affected += 1
- # def tuple_or(t: tuple):
- # x = 0
- # for el in t:
- # x |= el
- # return x
- def tuple_or(t: tuple):
- for el in t:
- if el > 0:
- return 1
- return 0
-
- errors = {key: errors[key]["errors"] for key in errors}
all_errors = [tuple_or(t) for t in zip(*errors.values())]
return transitions, errors, all_errors, n_sess_affected
@@ -376,10 +374,9 @@ def get_affected_users_for_all_issues(rows, first_stage, last_stage):
"""
affected_users = defaultdict(lambda: set())
affected_sessions = defaultdict(lambda: set())
- contexts = defaultdict(lambda: None)
+ all_issues = {}
n_affected_users_dict = defaultdict(lambda: None)
n_affected_sessions_dict = defaultdict(lambda: None)
- all_issues_with_context = set()
n_issues_dict = defaultdict(lambda: 0)
issues_by_session = defaultdict(lambda: 0)
@@ -395,15 +392,13 @@ def get_affected_users_for_all_issues(rows, first_stage, last_stage):
# check that the issue exists and belongs to subfunnel:
if iss is not None and (row[f'stage{last_stage}_timestamp'] is None or
(row[f'stage{first_stage}_timestamp'] < iss_ts < row[f'stage{last_stage}_timestamp'])):
- context_string = row['issue_context'] if row['issue_context'] is not None else ''
- issue_with_context = iss + '__^__' + context_string
- contexts[issue_with_context] = {"context": context_string, "id": row["issue_id"]}
- all_issues_with_context.add(issue_with_context)
- n_issues_dict[issue_with_context] += 1
+ if row["issue_id"] not in all_issues:
+ all_issues[row["issue_id"]] = {"context": row['issue_context'], "issue_type": row["issue_type"]}
+ n_issues_dict[row["issue_id"]] += 1
if row['user_uuid'] is not None:
- affected_users[issue_with_context].add(row['user_uuid'])
+ affected_users[row["issue_id"]].add(row['user_uuid'])
- affected_sessions[issue_with_context].add(row['session_id'])
+ affected_sessions[row["issue_id"]].add(row['session_id'])
issues_by_session[row[f'session_id']] += 1
if len(affected_users) > 0:
@@ -414,29 +409,28 @@ def get_affected_users_for_all_issues(rows, first_stage, last_stage):
n_affected_sessions_dict.update({
iss: len(affected_sessions[iss]) for iss in affected_sessions
})
- return all_issues_with_context, n_issues_dict, n_affected_users_dict, n_affected_sessions_dict, contexts
+ return all_issues, n_issues_dict, n_affected_users_dict, n_affected_sessions_dict
def count_sessions(rows, n_stages):
session_counts = {i: set() for i in range(1, n_stages + 1)}
- for ind, row in enumerate(rows):
+ for row in rows:
for i in range(1, n_stages + 1):
if row[f"stage{i}_timestamp"] is not None:
session_counts[i].add(row[f"session_id"])
+
session_counts = {i: len(session_counts[i]) for i in session_counts}
return session_counts
def count_users(rows, n_stages):
- users_in_stages = defaultdict(lambda: set())
-
- for ind, row in enumerate(rows):
+ users_in_stages = {i: set() for i in range(1, n_stages + 1)}
+ for row in rows:
for i in range(1, n_stages + 1):
if row[f"stage{i}_timestamp"] is not None:
users_in_stages[i].add(row["user_uuid"])
users_count = {i: len(users_in_stages[i]) for i in range(1, n_stages + 1)}
-
return users_count
@@ -489,18 +483,18 @@ def get_issues(stages, rows, first_stage=None, last_stage=None, drop_only=False)
last_stage = n_stages
n_critical_issues = 0
- issues_dict = dict({"significant": [],
- "insignificant": []})
+ issues_dict = {"significant": [],
+ "insignificant": []}
session_counts = count_sessions(rows, n_stages)
drop = session_counts[first_stage] - session_counts[last_stage]
- all_issues_with_context, n_issues_dict, affected_users_dict, affected_sessions, contexts = get_affected_users_for_all_issues(
+ all_issues, n_issues_dict, affected_users_dict, affected_sessions = get_affected_users_for_all_issues(
rows, first_stage, last_stage)
transitions, errors, all_errors, n_sess_affected = get_transitions_and_issues_of_each_type(rows,
- all_issues_with_context,
+ all_issues,
first_stage, last_stage)
- # print("len(transitions) =", len(transitions))
+ del rows
if any(all_errors):
total_drop_corr, conf, is_sign = pearson_corr(transitions, all_errors)
@@ -513,33 +507,35 @@ def get_issues(stages, rows, first_stage=None, last_stage=None, drop_only=False)
if drop_only:
return total_drop_due_to_issues
- for issue in all_issues_with_context:
+ for issue_id in all_issues:
- if not any(errors[issue]):
+ if not any(errors[issue_id]):
continue
- r, confidence, is_sign = pearson_corr(transitions, errors[issue])
+ r, confidence, is_sign = pearson_corr(transitions, errors[issue_id])
if r is not None and drop is not None and is_sign:
- lost_conversions = int(r * affected_sessions[issue])
+ lost_conversions = int(r * affected_sessions[issue_id])
else:
lost_conversions = None
if r is None:
r = 0
- split = issue.split('__^__')
issues_dict['significant' if is_sign else 'insignificant'].append({
- "type": split[0],
- "title": helper.get_issue_title(split[0]),
- "affected_sessions": affected_sessions[issue],
- "unaffected_sessions": session_counts[1] - affected_sessions[issue],
+ "type": all_issues[issue_id]["issue_type"],
+ "title": helper.get_issue_title(all_issues[issue_id]["issue_type"]),
+ "affected_sessions": affected_sessions[issue_id],
+ "unaffected_sessions": session_counts[1] - affected_sessions[issue_id],
"lost_conversions": lost_conversions,
- "affected_users": affected_users_dict[issue],
+ "affected_users": affected_users_dict[issue_id],
"conversion_impact": round(r * 100),
- "context_string": contexts[issue]["context"],
- "issue_id": contexts[issue]["id"]
+ "context_string": all_issues[issue_id]["context"],
+ "issue_id": issue_id
})
if is_sign:
- n_critical_issues += n_issues_dict[issue]
+ n_critical_issues += n_issues_dict[issue_id]
+ # To limit the number of returned issues to the frontend
+ issues_dict["significant"] = issues_dict["significant"][:20]
+ issues_dict["insignificant"] = issues_dict["insignificant"][:20]
return n_critical_issues, issues_dict, total_drop_due_to_issues
diff --git a/api/chalicelib/core/sourcemaps.py b/api/chalicelib/core/sourcemaps.py
index a2197e0c7..844926ffa 100644
--- a/api/chalicelib/core/sourcemaps.py
+++ b/api/chalicelib/core/sourcemaps.py
@@ -70,7 +70,7 @@ def format_payload(p, truncate_to_first=False):
def url_exists(url):
try:
r = requests.head(url, allow_redirects=False)
- return r.status_code == 200 and r.headers.get("Content-Type") != "text/html"
+ return r.status_code == 200 and "text/html" not in r.headers.get("Content-Type", "")
except Exception as e:
print(f"!! Issue checking if URL exists: {url}")
print(e)
diff --git a/api/chalicelib/utils/jira_client.py b/api/chalicelib/utils/jira_client.py
index a820d4aa9..ee8196a46 100644
--- a/api/chalicelib/utils/jira_client.py
+++ b/api/chalicelib/utils/jira_client.py
@@ -242,7 +242,7 @@ class JiraManager:
def get_issue_types(self):
try:
- types = self._jira.issue_types()
+ types = self._jira.project(self._config['JIRA_PROJECT_ID']).issueTypes
except JIRAError as e:
self.retries -= 1
if (e.status_code // 100) == 4 and self.retries > 0:
diff --git a/api/requirements-alerts.txt b/api/requirements-alerts.txt
index b30e65988..ff36f3099 100644
--- a/api/requirements-alerts.txt
+++ b/api/requirements-alerts.txt
@@ -1,15 +1,15 @@
requests==2.28.1
urllib3==1.26.12
-boto3==1.26.4
+boto3==1.26.14
pyjwt==2.6.0
psycopg2-binary==2.9.5
-elasticsearch==8.5.0
+elasticsearch==8.5.1
jira==3.4.1
-fastapi==0.86.0
-uvicorn[standard]==0.19.0
+fastapi==0.87.0
+uvicorn[standard]==0.20.0
python-decouple==3.6
pydantic[email]==1.10.2
-apscheduler==3.9.1
\ No newline at end of file
+apscheduler==3.9.1.post1
\ No newline at end of file
diff --git a/api/requirements.txt b/api/requirements.txt
index b30e65988..ff36f3099 100644
--- a/api/requirements.txt
+++ b/api/requirements.txt
@@ -1,15 +1,15 @@
requests==2.28.1
urllib3==1.26.12
-boto3==1.26.4
+boto3==1.26.14
pyjwt==2.6.0
psycopg2-binary==2.9.5
-elasticsearch==8.5.0
+elasticsearch==8.5.1
jira==3.4.1
-fastapi==0.86.0
-uvicorn[standard]==0.19.0
+fastapi==0.87.0
+uvicorn[standard]==0.20.0
python-decouple==3.6
pydantic[email]==1.10.2
-apscheduler==3.9.1
\ No newline at end of file
+apscheduler==3.9.1.post1
\ No newline at end of file
diff --git a/api/routers/core.py b/api/routers/core.py
index 80f2b6296..7ee8364e7 100644
--- a/api/routers/core.py
+++ b/api/routers/core.py
@@ -56,6 +56,14 @@ def sessions_search(projectId: int, data: schemas.FlatSessionsSearchPayloadSchem
return {'data': data}
+@app.post('/{projectId}/sessions/search/ids', tags=["sessions"])
+@app.post('/{projectId}/sessions/search2/ids', tags=["sessions"])
+def session_ids_search(projectId: int, data: schemas.FlatSessionsSearchPayloadSchema = Body(...),
+ context: schemas.CurrentContext = Depends(OR_context)):
+ data = sessions.search_sessions(data=data, project_id=projectId, user_id=context.user_id, ids_only=True)
+ return {'data': data}
+
+
@app.get('/{projectId}/events/search', tags=["events"])
def events_search(projectId: int, q: str,
type: Union[schemas.FilterType, schemas.EventType,
diff --git a/api/schemas.py b/api/schemas.py
index f1f3d9cb7..7e990bcb8 100644
--- a/api/schemas.py
+++ b/api/schemas.py
@@ -874,14 +874,14 @@ class TryCustomMetricsPayloadSchema(CustomMetricChartPayloadSchema):
class CustomMetricsConfigSchema(BaseModel):
- col: Optional[int] = Field(default=2)
+ col: Optional[int] = Field(...)
row: Optional[int] = Field(default=2)
position: Optional[int] = Field(default=0)
class CreateCustomMetricsSchema(TryCustomMetricsPayloadSchema):
series: List[CustomMetricCreateSeriesSchema] = Field(..., min_items=1)
- config: CustomMetricsConfigSchema = Field(default=CustomMetricsConfigSchema())
+ config: CustomMetricsConfigSchema = Field(...)
@root_validator(pre=True)
def transform_series(cls, values):
diff --git a/backend/Dockerfile b/backend/Dockerfile
index 4e0064e9d..0d7cad075 100644
--- a/backend/Dockerfile
+++ b/backend/Dockerfile
@@ -1,6 +1,6 @@
FROM golang:1.18-alpine3.15 AS prepare
-RUN apk add --no-cache git openssh openssl-dev pkgconf gcc g++ make libc-dev bash
+RUN apk add --no-cache git openssh openssl-dev pkgconf gcc g++ make libc-dev bash librdkafka-dev cyrus-sasl cyrus-sasl-gssapiv2 krb5
WORKDIR /root
@@ -15,11 +15,11 @@ COPY pkg pkg
COPY internal internal
ARG SERVICE_NAME
-RUN CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -o service -tags musl openreplay/backend/cmd/$SERVICE_NAME
+RUN CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -o service -tags dynamic openreplay/backend/cmd/$SERVICE_NAME
FROM alpine AS entrypoint
-RUN apk add --no-cache ca-certificates
+RUN apk add --no-cache ca-certificates librdkafka-dev cyrus-sasl cyrus-sasl-gssapiv2 krb5
RUN adduser -u 1001 openreplay -D
ENV TZ=UTC \
@@ -29,6 +29,18 @@ ENV TZ=UTC \
UAPARSER_FILE=/home/openreplay/regexes.yaml \
HTTP_PORT=8080 \
KAFKA_USE_SSL=true \
+ # KAFKA_USE_KERBEROS should be set true if you wish to use Kerberos auth for Kafka
+ KAFKA_USE_KERBEROS=false \
+ # KERBEROS_SERVICE_NAME is the primary name of the Brokers configured in the Broker JAAS file
+ KERBEROS_SERVICE_NAME="" \
+ # KERBEROS_PRINCIPAL is this client's principal name
+ KERBEROS_PRINCIPAL="" \
+ # KERBEROS_PRINCIPAL is the absolute path to the keytab to be used for authentication
+ KERBEROS_KEYTAB_LOCATION="" \
+ # KAFKA_SSL_KEY is the absolute path to the CA cert for verifying the broker's key
+ KAFKA_SSL_KEY="" \
+ # KAFKA_SSL_CERT is a CA cert string (PEM format) for verifying the broker's key
+ KAFKA_SSL_CERT="" \
KAFKA_MAX_POLL_INTERVAL_MS=400000 \
REDIS_STREAMS_MAX_LEN=10000 \
TOPIC_RAW_WEB=raw \
diff --git a/backend/Dockerfile.bundle b/backend/Dockerfile.bundle
index 407a7b9d8..19c3b325c 100644
--- a/backend/Dockerfile.bundle
+++ b/backend/Dockerfile.bundle
@@ -1,6 +1,6 @@
FROM golang:1.18-alpine3.15 AS prepare
-RUN apk add --no-cache git openssh openssl-dev pkgconf gcc g++ make libc-dev bash
+RUN apk add --no-cache git openssh openssl-dev pkgconf gcc g++ make libc-dev bash librdkafka-dev cyrus-sasl-gssapi cyrus-sasl-devel
WORKDIR /root
@@ -14,11 +14,11 @@ COPY cmd cmd
COPY pkg pkg
COPY internal internal
-RUN for name in assets db ender http integrations sink storage;do CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -o bin/$name -tags musl openreplay/backend/cmd/$name; done
+RUN for name in assets db ender http integrations sink storage;do CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -o bin/$name -tags dynamic openreplay/backend/cmd/$name; done
FROM alpine AS entrypoint
#FROM pygmy/alpine-tini:latest
-RUN apk add --no-cache ca-certificates
+RUN apk add --no-cache ca-certificates librdkafka-dev cyrus-sasl-gssapi cyrus-sasl-devel pkgconf
ENV TZ=UTC \
FS_ULIMIT=1000 \
@@ -28,6 +28,18 @@ ENV TZ=UTC \
HTTP_PORT=80 \
BEACON_SIZE_LIMIT=7000000 \
KAFKA_USE_SSL=true \
+ # KAFKA_USE_KERBEROS should be set true if you wish to use Kerberos auth for Kafka
+ KAFKA_USE_KERBEROS=false \
+ # KERBEROS_SERVICE_NAME is the primary name of the Brokers configured in the Broker JAAS file
+ KERBEROS_SERVICE_NAME="" \
+ # KERBEROS_PRINCIPAL is this client's principal name
+ KERBEROS_PRINCIPAL="" \
+ # KERBEROS_PRINCIPAL is the absolute path to the keytab to be used for authentication
+ KERBEROS_KEYTAB_LOCATION="" \
+ # KAFKA_SSL_KEY is the absolute path to the CA cert for verifying the broker's key
+ KAFKA_SSL_KEY="" \
+ # KAFKA_SSL_CERT is a CA cert string (PEM format) for verifying the broker's key
+ KAFKA_SSL_CERT="" \
KAFKA_MAX_POLL_INTERVAL_MS=400000 \
REDIS_STREAMS_MAX_LEN=3000 \
TOPIC_RAW_WEB=raw \
diff --git a/backend/cmd/assets/main.go b/backend/cmd/assets/main.go
index 220300e74..5fdc85107 100644
--- a/backend/cmd/assets/main.go
+++ b/backend/cmd/assets/main.go
@@ -73,6 +73,8 @@ func main() {
log.Printf("Error while caching: %v", err)
case <-tick:
cacher.UpdateTimeouts()
+ case msg := <-msgConsumer.Rebalanced():
+ log.Println(msg)
default:
if !cacher.CanCache() {
continue
diff --git a/backend/cmd/db/main.go b/backend/cmd/db/main.go
index c9916b03d..8db029394 100644
--- a/backend/cmd/db/main.go
+++ b/backend/cmd/db/main.go
@@ -163,6 +163,8 @@ func main() {
os.Exit(0)
case <-commitTick:
commitDBUpdates()
+ case msg := <-consumer.Rebalanced():
+ log.Println(msg)
default:
// Handle new message from queue
if err := consumer.ConsumeNext(); err != nil {
diff --git a/backend/cmd/db/values.yaml b/backend/cmd/db/values.yaml
deleted file mode 100644
index 2c0f0e7f3..000000000
--- a/backend/cmd/db/values.yaml
+++ /dev/null
@@ -1,92 +0,0 @@
-chalice:
- env:
- jwt_secret: SetARandomStringHere
-clickhouse:
- enabled: false
-fromVersion: v1.6.0
-global:
- domainName: openreplay.local
- email:
- emailFrom: OpenReplay
- emailHost: ""
- emailPassword: ""
- emailPort: "587"
- emailSslCert: ""
- emailSslKey: ""
- emailUseSsl: "false"
- emailUseTls: "true"
- emailUser: ""
- enterpriseEditionLicense: ""
- ingress:
- controller:
- config:
- enable-real-ip: true
- force-ssl-redirect: false
- max-worker-connections: 0
- proxy-body-size: 10m
- ssl-redirect: false
- extraArgs:
- default-ssl-certificate: app/openreplay-ssl
- ingressClass: openreplay
- ingressClassResource:
- name: openreplay
- service:
- externalTrafficPolicy: Local
- kafka:
- kafkaHost: kafka.db.svc.cluster.local
- kafkaPort: "9092"
- kafkaUseSsl: "false"
- zookeeperHost: databases-zookeeper.svc.cluster.local
- zookeeperNonTLSPort: 2181
- postgresql:
- postgresqlDatabase: postgres
- postgresqlHost: postgresql.db.svc.cluster.local
- postgresqlPassword: changeMePassword
- postgresqlPort: "5432"
- postgresqlUser: postgres
- redis:
- redisHost: redis-master.db.svc.cluster.local
- redisPort: "6379"
- s3:
- accessKey: changeMeMinioAccessKey
- assetsBucket: sessions-assets
- endpoint: http://minio.db.svc.cluster.local:9000
- recordingsBucket: mobs
- region: us-east-1
- secretKey: changeMeMinioPassword
- sourcemapsBucket: sourcemaps
-ingress-nginx:
- controller:
- config:
- enable-real-ip: true
- force-ssl-redirect: false
- max-worker-connections: 0
- proxy-body-size: 10m
- ssl-redirect: false
- extraArgs:
- default-ssl-certificate: app/openreplay-ssl
- ingressClass: openreplay
- ingressClassResource:
- name: openreplay
- service:
- externalTrafficPolicy: Local
-kafka:
- kafkaHost: kafka.db.svc.cluster.local
- kafkaPort: "9092"
- kafkaUseSsl: "false"
- zookeeperHost: databases-zookeeper.svc.cluster.local
- zookeeperNonTLSPort: 2181
-minio:
- global:
- minio:
- accessKey: changeMeMinioAccessKey
- secretKey: changeMeMinioPassword
-postgresql:
- postgresqlDatabase: postgres
- postgresqlHost: postgresql.db.svc.cluster.local
- postgresqlPassword: changeMePassword
- postgresqlPort: "5432"
- postgresqlUser: postgres
-redis:
- redisHost: redis-master.db.svc.cluster.local
- redisPort: "6379"
diff --git a/backend/cmd/ender/main.go b/backend/cmd/ender/main.go
index 913629f0e..beb69bd42 100644
--- a/backend/cmd/ender/main.go
+++ b/backend/cmd/ender/main.go
@@ -98,6 +98,8 @@ func main() {
if err := consumer.CommitBack(intervals.EVENTS_BACK_COMMIT_GAP); err != nil {
log.Printf("can't commit messages with offset: %s", err)
}
+ case msg := <-consumer.Rebalanced():
+ log.Println(msg)
default:
if err := consumer.ConsumeNext(); err != nil {
log.Fatalf("Error on consuming: %v", err)
diff --git a/backend/cmd/heuristics/main.go b/backend/cmd/heuristics/main.go
index 9e4804089..0a5c77ea5 100644
--- a/backend/cmd/heuristics/main.go
+++ b/backend/cmd/heuristics/main.go
@@ -82,6 +82,8 @@ func main() {
})
producer.Flush(cfg.ProducerTimeout)
consumer.Commit()
+ case msg := <-consumer.Rebalanced():
+ log.Println(msg)
default:
if err := consumer.ConsumeNext(); err != nil {
log.Fatalf("Error on consuming: %v", err)
diff --git a/backend/cmd/sink/main.go b/backend/cmd/sink/main.go
index d3cc99e40..03f11b200 100644
--- a/backend/cmd/sink/main.go
+++ b/backend/cmd/sink/main.go
@@ -3,16 +3,14 @@ package main
import (
"context"
"log"
- "openreplay/backend/pkg/pprof"
"os"
"os/signal"
- "strings"
"syscall"
"time"
"openreplay/backend/internal/config/sink"
"openreplay/backend/internal/sink/assetscache"
- "openreplay/backend/internal/sink/oswriter"
+ "openreplay/backend/internal/sink/sessionwriter"
"openreplay/backend/internal/storage"
"openreplay/backend/pkg/messages"
"openreplay/backend/pkg/monitoring"
@@ -21,8 +19,6 @@ import (
)
func main() {
- pprof.StartProfilingServer()
-
metrics := monitoring.New("sink")
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
@@ -33,7 +29,7 @@ func main() {
log.Fatalf("%v doesn't exist. %v", cfg.FsDir, err)
}
- writer := oswriter.NewWriter(cfg.FsUlimit, cfg.FsDir)
+ writer := sessionwriter.NewWriter(cfg.FsUlimit, cfg.FsDir, cfg.FileBuffer, cfg.SyncTimeout)
producer := queue.NewProducer(cfg.MessageSizeLimit, true)
defer producer.Close(cfg.ProducerCloseTimeout)
@@ -64,6 +60,7 @@ func main() {
if err := producer.Produce(cfg.TopicTrigger, msg.SessionID(), msg.Encode()); err != nil {
log.Printf("can't send SessionEnd to trigger topic: %s; sessID: %d", err, msg.SessionID())
}
+ writer.Close(msg.SessionID())
return
}
@@ -95,47 +92,20 @@ func main() {
counter.Update(msg.SessionID(), time.UnixMilli(ts))
}
- // Write encoded message with index to session file
- data := msg.EncodeWithIndex()
+ // Try to encode message to avoid null data inserts
+ data := msg.Encode()
if data == nil {
- log.Printf("can't encode with index, err: %s", err)
return
}
- wasWritten := false // To avoid timestamp duplicates in original mob file
- if messages.IsDOMType(msg.TypeID()) {
- if err := writer.WriteDOM(msg.SessionID(), data); err != nil {
- if strings.Contains(err.Error(), "not a directory") {
- // Trying to write data to mob file by original path
- oldErr := writer.WriteMOB(msg.SessionID(), data)
- if oldErr != nil {
- log.Printf("MOB Writeer error: %s, prev DOM error: %s, info: %s", oldErr, err, msg.Meta().Batch().Info())
- } else {
- wasWritten = true
- }
- } else {
- log.Printf("DOM Writer error: %s, info: %s", err, msg.Meta().Batch().Info())
- }
- }
- }
- if !messages.IsDOMType(msg.TypeID()) || msg.TypeID() == messages.MsgTimestamp {
- // TODO: write only necessary timestamps
- if err := writer.WriteDEV(msg.SessionID(), data); err != nil {
- if strings.Contains(err.Error(), "not a directory") {
- if !wasWritten {
- // Trying to write data to mob file by original path
- oldErr := writer.WriteMOB(msg.SessionID(), data)
- if oldErr != nil {
- log.Printf("MOB Writeer error: %s, prev DEV error: %s, info: %s", oldErr, err, msg.Meta().Batch().Info())
- }
- }
- } else {
- log.Printf("Devtools Writer error: %s, info: %s", err, msg.Meta().Batch().Info())
- }
- }
+
+ // Write message to file
+ if err := writer.Write(msg); err != nil {
+ log.Printf("writer error: %s", err)
+ return
}
// [METRICS] Increase the number of written to the files messages and the message size
- messageSize.Record(context.Background(), float64(len(data)))
+ messageSize.Record(context.Background(), float64(len(msg.Encode())))
savedMessages.Add(context.Background(), 1)
}
@@ -153,27 +123,36 @@ func main() {
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
- tick := time.Tick(30 * time.Second)
+ tick := time.Tick(10 * time.Second)
+ tickInfo := time.Tick(30 * time.Second)
for {
select {
case sig := <-sigchan:
log.Printf("Caught signal %v: terminating\n", sig)
- if err := writer.CloseAll(); err != nil {
- log.Printf("closeAll error: %v\n", err)
- }
+ // Sync and stop writer
+ writer.Stop()
+ // Commit and stop consumer
if err := consumer.Commit(); err != nil {
log.Printf("can't commit messages: %s", err)
}
consumer.Close()
os.Exit(0)
case <-tick:
- if err := writer.SyncAll(); err != nil {
- log.Fatalf("sync error: %v\n", err)
- }
- counter.Print()
if err := consumer.Commit(); err != nil {
log.Printf("can't commit messages: %s", err)
}
+ case <-tickInfo:
+ counter.Print()
+ log.Printf("writer: %s", writer.Info())
+ case <-consumer.Rebalanced():
+ s := time.Now()
+ // Commit now to avoid duplicate reads
+ if err := consumer.Commit(); err != nil {
+ log.Printf("can't commit messages: %s", err)
+ }
+ // Sync all files
+ writer.Sync()
+ log.Printf("manual sync finished, dur: %d", time.Now().Sub(s).Milliseconds())
default:
err := consumer.ConsumeNext()
if err != nil {
diff --git a/backend/cmd/storage/main.go b/backend/cmd/storage/main.go
index 07c6eec91..251ce82e2 100644
--- a/backend/cmd/storage/main.go
+++ b/backend/cmd/storage/main.go
@@ -73,6 +73,8 @@ func main() {
os.Exit(0)
case <-counterTick:
go counter.Print()
+ case msg := <-consumer.Rebalanced():
+ log.Println(msg)
default:
err := consumer.ConsumeNext()
if err != nil {
diff --git a/backend/go.mod b/backend/go.mod
index 0eead389c..61d644a17 100644
--- a/backend/go.mod
+++ b/backend/go.mod
@@ -8,6 +8,7 @@ require (
github.com/Masterminds/semver v1.5.0
github.com/aws/aws-sdk-go v1.44.98
github.com/btcsuite/btcutil v1.0.2
+ github.com/confluentinc/confluent-kafka-go v1.8.2
github.com/elastic/go-elasticsearch/v7 v7.13.1
github.com/go-redis/redis v6.15.9+incompatible
github.com/google/uuid v1.3.0
@@ -26,9 +27,8 @@ require (
go.opentelemetry.io/otel/exporters/prometheus v0.30.0
go.opentelemetry.io/otel/metric v0.30.0
go.opentelemetry.io/otel/sdk/metric v0.30.0
- golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2
+ golang.org/x/net v0.0.0-20220906165146-f3363e06e74c
google.golang.org/api v0.81.0
- gopkg.in/confluentinc/confluent-kafka-go.v1 v1.8.2
)
require (
@@ -38,7 +38,6 @@ require (
cloud.google.com/go/storage v1.14.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
- github.com/confluentinc/confluent-kafka-go v1.9.0 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
@@ -53,7 +52,6 @@ require (
github.com/jackc/puddle v1.2.2-0.20220404125616-4e959849469a // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/klauspost/compress v1.15.7 // indirect
- github.com/kr/pretty v0.3.0 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/paulmach/orb v0.7.1 // indirect
github.com/pierrec/lz4/v4 v4.1.15 // indirect
@@ -69,8 +67,8 @@ require (
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 // indirect
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 // indirect
golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 // indirect
- golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect
- golang.org/x/text v0.3.7 // indirect
+ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 // indirect
+ golang.org/x/text v0.4.0 // indirect
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd // indirect
diff --git a/backend/go.sum b/backend/go.sum
index dbaee7216..c7abea25e 100644
--- a/backend/go.sum
+++ b/backend/go.sum
@@ -115,12 +115,11 @@ github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWH
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
-github.com/confluentinc/confluent-kafka-go v1.9.0 h1:d1k62oAuQVxgdMdiDQnpkABbtIWTBwXHpDcyGQUw5QQ=
-github.com/confluentinc/confluent-kafka-go v1.9.0/go.mod h1:WDFs+KlhHITEoCzEfHSNgj5aP7vjajyYbZpvTEGs1sE=
+github.com/confluentinc/confluent-kafka-go v1.8.2 h1:PBdbvYpyOdFLehj8j+9ba7FL4c4Moxn79gy9cYKxG5E=
+github.com/confluentinc/confluent-kafka-go v1.8.2/go.mod h1:u2zNLny2xq+5rWeTQjFHbDzzNuba4P1vo31r9r4uAdg=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
-github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
@@ -328,14 +327,12 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
-github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
@@ -400,8 +397,6 @@ github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
-github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
@@ -566,8 +561,9 @@ golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su
golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 h1:NWy5+hlRbC7HK+PmcXVUmW1IMyFce7to56IUvhUFm7Y=
golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220906165146-f3363e06e74c h1:yKufUcDwucU5urd+50/Opbt4AYpqthk7wHpHok8f1lo=
+golang.org/x/net v0.0.0-20220906165146-f3363e06e74c/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -678,8 +674,9 @@ golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220429233432-b5fbb4746d32/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 h1:WIoqL4EROvwiPdUtaip4VcDdpZ4kha7wBWZrbVKCIZg=
+golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -690,8 +687,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -939,8 +937,6 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/confluentinc/confluent-kafka-go.v1 v1.8.2 h1:QAgN6OC0o7dwvyz+HML6GYm+0Pk54O91+oxGqJ/5z8I=
-gopkg.in/confluentinc/confluent-kafka-go.v1 v1.8.2/go.mod h1:ZdI3yfYmdNSLQPNCpO1y00EHyWaHG5EnQEyL/ntAegY=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
diff --git a/backend/internal/config/sink/config.go b/backend/internal/config/sink/config.go
index a7481f93a..53e3517a4 100644
--- a/backend/internal/config/sink/config.go
+++ b/backend/internal/config/sink/config.go
@@ -9,6 +9,8 @@ type Config struct {
common.Config
FsDir string `env:"FS_DIR,required"`
FsUlimit uint16 `env:"FS_ULIMIT,required"`
+ FileBuffer int `env:"FILE_BUFFER,default=16384"`
+ SyncTimeout int `env:"SYNC_TIMEOUT,default=5"`
GroupSink string `env:"GROUP_SINK,required"`
TopicRawWeb string `env:"TOPIC_RAW_WEB,required"`
TopicRawIOS string `env:"TOPIC_RAW_IOS,required"`
@@ -17,7 +19,7 @@ type Config struct {
CacheAssets bool `env:"CACHE_ASSETS,required"`
AssetsOrigin string `env:"ASSETS_ORIGIN,required"`
ProducerCloseTimeout int `env:"PRODUCER_CLOSE_TIMEOUT,default=15000"`
- CacheThreshold int64 `env:"CACHE_THRESHOLD,default=75"`
+ CacheThreshold int64 `env:"CACHE_THRESHOLD,default=5"`
CacheExpiration int64 `env:"CACHE_EXPIRATION,default=120"`
}
diff --git a/backend/internal/config/storage/config.go b/backend/internal/config/storage/config.go
index fdf29b7db..6083f0249 100644
--- a/backend/internal/config/storage/config.go
+++ b/backend/internal/config/storage/config.go
@@ -11,7 +11,6 @@ type Config struct {
S3Region string `env:"AWS_REGION_WEB,required"`
S3Bucket string `env:"S3_BUCKET_WEB,required"`
FSDir string `env:"FS_DIR,required"`
- FSCleanHRS int `env:"FS_CLEAN_HRS,required"`
FileSplitSize int `env:"FILE_SPLIT_SIZE,required"`
RetryTimeout time.Duration `env:"RETRY_TIMEOUT,default=2m"`
GroupStorage string `env:"GROUP_STORAGE,required"`
@@ -21,6 +20,7 @@ type Config struct {
DeleteTimeout time.Duration `env:"DELETE_TIMEOUT,default=48h"`
ProducerCloseTimeout int `env:"PRODUCER_CLOSE_TIMEOUT,default=15000"`
UseFailover bool `env:"USE_FAILOVER,default=false"`
+ MaxFileSize int64 `env:"MAX_FILE_SIZE,default=524288000"`
}
func New() *Config {
diff --git a/backend/internal/sink/oswriter/oswriter.go b/backend/internal/sink/oswriter/oswriter.go
deleted file mode 100644
index 070540b1d..000000000
--- a/backend/internal/sink/oswriter/oswriter.go
+++ /dev/null
@@ -1,166 +0,0 @@
-package oswriter
-
-import (
- "errors"
- "log"
- "math"
- "os"
- "path/filepath"
- "strconv"
- "time"
-)
-
-type Writer struct {
- ulimit int
- dir string
- files map[string]*os.File
- atimes map[string]int64
-}
-
-func NewWriter(ulimit uint16, dir string) *Writer {
- return &Writer{
- ulimit: int(ulimit),
- dir: dir + "/",
- files: make(map[string]*os.File),
- atimes: make(map[string]int64),
- }
-}
-
-func (w *Writer) open(fname string) (*os.File, error) {
- file, ok := w.files[fname]
- if ok {
- return file, nil
- }
- if len(w.atimes) == w.ulimit {
- var m_k string
- var m_t int64 = math.MaxInt64
- for k, t := range w.atimes {
- if t < m_t {
- m_k = k
- m_t = t
- }
- }
- if err := w.close(m_k); err != nil {
- return nil, err
- }
- }
-
- // mkdir if not exist
- pathTo := w.dir + filepath.Dir(fname)
- if info, err := os.Stat(pathTo); os.IsNotExist(err) {
- if err := os.MkdirAll(pathTo, 0755); err != nil {
- log.Printf("os.MkdirAll error: %s", err)
- }
- } else {
- if err != nil {
- return nil, err
- }
- if !info.IsDir() {
- return nil, errors.New("not a directory")
- }
- }
-
- file, err := os.OpenFile(w.dir+fname, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
- if err != nil {
- log.Printf("os.OpenFile error: %s", err)
- return nil, err
- }
- w.files[fname] = file
- w.atimes[fname] = time.Now().Unix()
- return file, nil
-}
-
-func (w *Writer) close(fname string) error {
- file := w.files[fname]
- if file == nil {
- return nil
- }
- if err := file.Sync(); err != nil {
- return err
- }
- if err := file.Close(); err != nil {
- return err
- }
- delete(w.files, fname)
- delete(w.atimes, fname)
- return nil
-}
-
-func (w *Writer) WriteDOM(sid uint64, data []byte) error {
- return w.write(strconv.FormatUint(sid, 10)+"/dom.mob", data)
-}
-
-func (w *Writer) WriteDEV(sid uint64, data []byte) error {
- return w.write(strconv.FormatUint(sid, 10)+"/devtools.mob", data)
-}
-
-func (w *Writer) WriteMOB(sid uint64, data []byte) error {
- // Use session id as a file name without directory
- fname := strconv.FormatUint(sid, 10)
- file, err := w.openWithoutDir(fname)
- if err != nil {
- return err
- }
- _, err = file.Write(data)
- return err
-}
-
-func (w *Writer) write(fname string, data []byte) error {
- file, err := w.open(fname)
- if err != nil {
- return err
- }
- _, err = file.Write(data)
- return err
-}
-
-func (w *Writer) openWithoutDir(fname string) (*os.File, error) {
- file, ok := w.files[fname]
- if ok {
- return file, nil
- }
- if len(w.atimes) == w.ulimit {
- var m_k string
- var m_t int64 = math.MaxInt64
- for k, t := range w.atimes {
- if t < m_t {
- m_k = k
- m_t = t
- }
- }
- if err := w.close(m_k); err != nil {
- return nil, err
- }
- }
-
- file, err := os.OpenFile(w.dir+fname, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
- if err != nil {
- return nil, err
- }
- w.files[fname] = file
- w.atimes[fname] = time.Now().Unix()
- return file, nil
-}
-
-func (w *Writer) SyncAll() error {
- for _, file := range w.files {
- if err := file.Sync(); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (w *Writer) CloseAll() error {
- for _, file := range w.files {
- if err := file.Sync(); err != nil {
- return err
- }
- if err := file.Close(); err != nil {
- return err
- }
- }
- w.files = nil
- w.atimes = nil
- return nil
-}
diff --git a/backend/internal/sink/sessionwriter/file.go b/backend/internal/sink/sessionwriter/file.go
new file mode 100644
index 000000000..37b1664a9
--- /dev/null
+++ b/backend/internal/sink/sessionwriter/file.go
@@ -0,0 +1,76 @@
+package sessionwriter
+
+import (
+ "bufio"
+ "io"
+ "log"
+ "os"
+)
+
+type File struct {
+ file *os.File
+ buffer *bufio.Writer
+ updated bool
+}
+
+func NewFile(path string, bufSize int) (*File, error) {
+ file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
+ if err != nil {
+ return nil, err
+ }
+ return &File{
+ file: file,
+ buffer: bufio.NewWriterSize(file, bufSize),
+ updated: false,
+ }, nil
+}
+
+func (f *File) Write(data []byte) error {
+ f.updated = true
+ if len(data) > f.buffer.Available()+f.buffer.Size() {
+ // Flush buffer to file
+ for i := 0; i < 3; i++ {
+ err := f.buffer.Flush()
+ if err == nil {
+ break
+ }
+ log.Printf("can't flush buffer: %s", err)
+ }
+ // Write big message directly to file
+ return f.write(f.file, data)
+ }
+ return f.write(f.buffer, data)
+}
+
+func (f *File) write(w io.Writer, data []byte) error {
+ leftToWrite := len(data)
+ for leftToWrite > 0 {
+ from := len(data) - leftToWrite
+ writtenDown, err := w.Write(data[from:])
+ if err != nil {
+ return err
+ }
+ leftToWrite -= writtenDown
+ }
+ return nil
+}
+
+func (f *File) Sync() error {
+ if !f.updated {
+ return nil
+ }
+ if err := f.buffer.Flush(); err != nil {
+ return err
+ }
+ if err := f.file.Sync(); err != nil {
+ return err
+ }
+ f.updated = false
+ return nil
+}
+
+func (f *File) Close() error {
+ _ = f.buffer.Flush()
+ _ = f.file.Sync()
+ return f.file.Close()
+}
diff --git a/backend/internal/sink/sessionwriter/meta.go b/backend/internal/sink/sessionwriter/meta.go
new file mode 100644
index 000000000..4fac56e50
--- /dev/null
+++ b/backend/internal/sink/sessionwriter/meta.go
@@ -0,0 +1,56 @@
+package sessionwriter
+
+import (
+ "math"
+ "sync"
+ "time"
+)
+
+type Meta struct {
+ limit int
+ lock *sync.Mutex
+ meta map[uint64]int64
+}
+
+func NewMeta(limit int) *Meta {
+ return &Meta{
+ limit: limit,
+ lock: &sync.Mutex{},
+ meta: make(map[uint64]int64, limit),
+ }
+}
+
+func (m *Meta) Add(sid uint64) {
+ m.lock.Lock()
+ m.meta[sid] = time.Now().Unix()
+ m.lock.Unlock()
+}
+
+func (m *Meta) Count() int {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+ return len(m.meta)
+}
+
+func (m *Meta) Delete(sid uint64) {
+ m.lock.Lock()
+ delete(m.meta, sid)
+ m.lock.Unlock()
+}
+
+func (m *Meta) GetExtra() uint64 {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+ if len(m.meta) >= m.limit {
+ var extraSessID uint64
+ var minTimestamp int64 = math.MaxInt64
+ for sessID, timestamp := range m.meta {
+ if timestamp < minTimestamp {
+ extraSessID = sessID
+ minTimestamp = timestamp
+ }
+ }
+ return extraSessID
+ }
+ return 0
+}
diff --git a/backend/internal/sink/sessionwriter/session.go b/backend/internal/sink/sessionwriter/session.go
new file mode 100644
index 000000000..8cf8881de
--- /dev/null
+++ b/backend/internal/sink/sessionwriter/session.go
@@ -0,0 +1,96 @@
+package sessionwriter
+
+import (
+ "encoding/binary"
+ "fmt"
+ "strconv"
+ "sync"
+
+ "openreplay/backend/pkg/messages"
+)
+
+type Session struct {
+ lock *sync.Mutex
+ dom *File
+ dev *File
+ index []byte
+ updated bool
+}
+
+func NewSession(sessID uint64, workDir string, bufSize int) (*Session, error) {
+ if sessID == 0 {
+ return nil, fmt.Errorf("wrong session id")
+ }
+ filePath := workDir + strconv.FormatUint(sessID, 10)
+
+ dom, err := NewFile(filePath, bufSize)
+ if err != nil {
+ return nil, err
+ }
+ dev, err := NewFile(filePath+"devtools", bufSize)
+ if err != nil {
+ dom.Close()
+ return nil, err
+ }
+
+ return &Session{
+ lock: &sync.Mutex{},
+ dom: dom,
+ dev: dev,
+ index: make([]byte, 8),
+ updated: false,
+ }, nil
+}
+
+func (s *Session) Write(msg messages.Message) error {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ // Encode message index
+ binary.LittleEndian.PutUint64(s.index, msg.Meta().Index)
+
+ // Write message to dom.mob file
+ if messages.IsDOMType(msg.TypeID()) {
+ // Write message index
+ if err := s.dom.Write(s.index); err != nil {
+ return err
+ }
+ // Write message body
+ if err := s.dom.Write(msg.Encode()); err != nil {
+ return err
+ }
+ }
+ s.updated = true
+ // Write message to dev.mob file
+ if !messages.IsDOMType(msg.TypeID()) || msg.TypeID() == messages.MsgTimestamp {
+ // Write message index
+ if err := s.dev.Write(s.index); err != nil {
+ return err
+ }
+ // Write message body
+ if err := s.dev.Write(msg.Encode()); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (s *Session) Sync() error {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ if err := s.dom.Sync(); err != nil {
+ return err
+ }
+ return s.dev.Sync()
+}
+
+func (s *Session) Close() error {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ if err := s.dom.Close(); err != nil {
+ return err
+ }
+ return s.dev.Close()
+}
diff --git a/backend/internal/sink/sessionwriter/writer.go b/backend/internal/sink/sessionwriter/writer.go
new file mode 100644
index 000000000..7da1ae878
--- /dev/null
+++ b/backend/internal/sink/sessionwriter/writer.go
@@ -0,0 +1,126 @@
+package sessionwriter
+
+import (
+ "fmt"
+ "log"
+ "sync"
+ "time"
+
+ "openreplay/backend/pkg/messages"
+)
+
+type SessionWriter struct {
+ filesLimit int
+ workingDir string
+ fileBuffer int
+ syncTimeout time.Duration
+ meta *Meta
+ sessions *sync.Map
+ done chan struct{}
+ stopped chan struct{}
+}
+
+func NewWriter(filesLimit uint16, workingDir string, fileBuffer int, syncTimeout int) *SessionWriter {
+ w := &SessionWriter{
+ filesLimit: int(filesLimit) / 2, // should divide by 2 because each session has 2 files
+ workingDir: workingDir + "/",
+ fileBuffer: fileBuffer,
+ syncTimeout: time.Duration(syncTimeout) * time.Second,
+ meta: NewMeta(int(filesLimit)),
+ sessions: &sync.Map{},
+ done: make(chan struct{}),
+ stopped: make(chan struct{}),
+ }
+ go w.synchronizer()
+ return w
+}
+
+func (w *SessionWriter) Write(msg messages.Message) (err error) {
+ var (
+ sess *Session
+ sid = msg.SessionID()
+ )
+
+ // Load session
+ sessObj, ok := w.sessions.Load(sid)
+ if !ok {
+ // Create new session
+ sess, err = NewSession(sid, w.workingDir, w.fileBuffer)
+ if err != nil {
+ return fmt.Errorf("can't create session: %d, err: %s", sid, err)
+ }
+
+ // Check opened sessions limit and close extra session if you need to
+ if extraSessID := w.meta.GetExtra(); extraSessID != 0 {
+ if err := w.Close(extraSessID); err != nil {
+ log.Printf("can't close session: %s", err)
+ }
+ }
+
+ // Add created session
+ w.sessions.Store(sid, sess)
+ w.meta.Add(sid)
+ } else {
+ sess = sessObj.(*Session)
+ }
+
+ // Write data to session
+ return sess.Write(msg)
+}
+
+func (w *SessionWriter) sync(sid uint64) error {
+ sessObj, ok := w.sessions.Load(sid)
+ if !ok {
+ return fmt.Errorf("session: %d not found", sid)
+ }
+ sess := sessObj.(*Session)
+ return sess.Sync()
+}
+
+func (w *SessionWriter) Close(sid uint64) error {
+ sessObj, ok := w.sessions.LoadAndDelete(sid)
+ if !ok {
+ return fmt.Errorf("session: %d not found", sid)
+ }
+ sess := sessObj.(*Session)
+ err := sess.Close()
+ w.meta.Delete(sid)
+ return err
+}
+
+func (w *SessionWriter) Stop() {
+ w.done <- struct{}{}
+ <-w.stopped
+}
+
+func (w *SessionWriter) Info() string {
+ return fmt.Sprintf("%d sessions", w.meta.Count())
+}
+
+func (w *SessionWriter) Sync() {
+ w.sessions.Range(func(sid, lockObj any) bool {
+ if err := w.sync(sid.(uint64)); err != nil {
+ log.Printf("can't sync file descriptor: %s", err)
+ }
+ return true
+ })
+}
+
+func (w *SessionWriter) synchronizer() {
+ tick := time.Tick(w.syncTimeout)
+ for {
+ select {
+ case <-tick:
+ w.Sync()
+ case <-w.done:
+ w.sessions.Range(func(sid, lockObj any) bool {
+ if err := w.Close(sid.(uint64)); err != nil {
+ log.Printf("can't close file descriptor: %s", err)
+ }
+ return true
+ })
+ w.stopped <- struct{}{}
+ return
+ }
+ }
+}
diff --git a/backend/internal/storage/storage.go b/backend/internal/storage/storage.go
index 7fdc06c4f..12a37183f 100644
--- a/backend/internal/storage/storage.go
+++ b/backend/internal/storage/storage.go
@@ -13,7 +13,6 @@ import (
"openreplay/backend/pkg/storage"
"os"
"strconv"
- "strings"
"time"
)
@@ -71,43 +70,46 @@ func New(cfg *config.Config, s3 *storage.S3, metrics *monitoring.Metrics) (*Stor
}
func (s *Storage) UploadSessionFiles(msg *messages.SessionEnd) error {
- sessionDir := strconv.FormatUint(msg.SessionID(), 10)
- if err := s.uploadKey(msg.SessionID(), sessionDir+"/dom.mob", true, 5, msg.EncryptionKey); err != nil {
- oldErr := s.uploadKey(msg.SessionID(), sessionDir, true, 5, msg.EncryptionKey)
- if oldErr != nil {
- return fmt.Errorf("upload file error: %s. failed checking mob file using old path: %s", err, oldErr)
- }
- // Exit method anyway because we don't have dev tools separation in prev version
- return nil
- }
- if err := s.uploadKey(msg.SessionID(), sessionDir+"/devtools.mob", false, 4, msg.EncryptionKey); err != nil {
+ if err := s.uploadKey(msg.SessionID(), "/dom.mob", true, 5, msg.EncryptionKey); err != nil {
return err
}
+ if err := s.uploadKey(msg.SessionID(), "/devtools.mob", false, 4, msg.EncryptionKey); err != nil {
+ log.Printf("can't find devtools for session: %d, err: %s", msg.SessionID(), err)
+ }
return nil
}
-// TODO: make a bit cleaner
-func (s *Storage) uploadKey(sessID uint64, key string, shouldSplit bool, retryCount int, encryptionKey string) error {
+// TODO: make a bit cleaner.
+// TODO: Of course, I'll do!
+func (s *Storage) uploadKey(sessID uint64, suffix string, shouldSplit bool, retryCount int, encryptionKey string) error {
if retryCount <= 0 {
return nil
}
-
start := time.Now()
- file, err := os.Open(s.cfg.FSDir + "/" + key)
+ fileName := strconv.FormatUint(sessID, 10)
+ mobFileName := fileName
+ if suffix == "/devtools.mob" {
+ mobFileName += "devtools"
+ }
+ filePath := s.cfg.FSDir + "/" + mobFileName
+
+ // Check file size before download into memory
+ info, err := os.Stat(filePath)
+ if err == nil {
+ if info.Size() > s.cfg.MaxFileSize {
+ log.Printf("big file, size: %d, session: %d", info.Size(), sessID)
+ return nil
+ }
+ }
+ file, err := os.Open(filePath)
if err != nil {
return fmt.Errorf("File open error: %v; sessID: %s, part: %d, sessStart: %s\n",
- err, key, sessID%16,
+ err, fileName, sessID%16,
time.UnixMilli(int64(flakeid.ExtractTimestamp(sessID))),
)
}
defer file.Close()
- // Ignore "s" at the end of mob file name for "old" sessions
- newVers := false
- if strings.Contains(key, "/") {
- newVers = true
- }
-
var fileSize int64 = 0
fileInfo, err := file.Stat()
if err != nil {
@@ -117,17 +119,18 @@ func (s *Storage) uploadKey(sessID uint64, key string, shouldSplit bool, retryCo
}
var encryptedData []byte
+ fileName += suffix
if shouldSplit {
nRead, err := file.Read(s.startBytes)
if err != nil {
log.Printf("File read error: %s; sessID: %s, part: %d, sessStart: %s",
err,
- key,
+ fileName,
sessID%16,
time.UnixMilli(int64(flakeid.ExtractTimestamp(sessID))),
)
time.AfterFunc(s.cfg.RetryTimeout, func() {
- s.uploadKey(sessID, key, shouldSplit, retryCount-1, encryptionKey)
+ s.uploadKey(sessID, suffix, shouldSplit, retryCount-1, encryptionKey)
})
return nil
}
@@ -146,11 +149,7 @@ func (s *Storage) uploadKey(sessID uint64, key string, shouldSplit bool, retryCo
}
// Compress and save to s3
startReader := bytes.NewBuffer(encryptedData)
- startKey := key
- if newVers {
- startKey += "s"
- }
- if err := s.s3.Upload(s.gzipFile(startReader), startKey, "application/octet-stream", true); err != nil {
+ if err := s.s3.Upload(s.gzipFile(startReader), fileName+"s", "application/octet-stream", true); err != nil {
log.Fatalf("Storage: start upload failed. %v\n", err)
}
// TODO: fix possible error (if we read less then FileSplitSize)
@@ -161,7 +160,7 @@ func (s *Storage) uploadKey(sessID uint64, key string, shouldSplit bool, retryCo
if err != nil {
log.Printf("File read error: %s; sessID: %s, part: %d, sessStart: %s",
err,
- key,
+ fileName,
sessID%16,
time.UnixMilli(int64(flakeid.ExtractTimestamp(sessID))),
)
@@ -183,7 +182,7 @@ func (s *Storage) uploadKey(sessID uint64, key string, shouldSplit bool, retryCo
}
// Compress and save to s3
endReader := bytes.NewBuffer(encryptedData)
- if err := s.s3.Upload(s.gzipFile(endReader), key+"e", "application/octet-stream", true); err != nil {
+ if err := s.s3.Upload(s.gzipFile(endReader), fileName+"e", "application/octet-stream", true); err != nil {
log.Fatalf("Storage: end upload failed. %v\n", err)
}
}
@@ -195,7 +194,7 @@ func (s *Storage) uploadKey(sessID uint64, key string, shouldSplit bool, retryCo
if err != nil {
log.Printf("File read error: %s; sessID: %s, part: %d, sessStart: %s",
err,
- key,
+ fileName,
sessID%16,
time.UnixMilli(int64(flakeid.ExtractTimestamp(sessID))),
)
@@ -216,7 +215,7 @@ func (s *Storage) uploadKey(sessID uint64, key string, shouldSplit bool, retryCo
encryptedData = fileData
}
endReader := bytes.NewBuffer(encryptedData)
- if err := s.s3.Upload(s.gzipFile(endReader), key+"s", "application/octet-stream", true); err != nil {
+ if err := s.s3.Upload(s.gzipFile(endReader), fileName, "application/octet-stream", true); err != nil {
log.Fatalf("Storage: end upload failed. %v\n", err)
}
s.archivingTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()))
diff --git a/backend/pkg/db/types/error-event.go b/backend/pkg/db/types/error-event.go
index 826cbba9e..bef9abd99 100644
--- a/backend/pkg/db/types/error-event.go
+++ b/backend/pkg/db/types/error-event.go
@@ -11,6 +11,8 @@ import (
. "openreplay/backend/pkg/messages"
)
+const SOURCE_JS = "js_exception"
+
type ErrorEvent struct {
MessageID uint64
Timestamp uint64
@@ -64,7 +66,7 @@ func WrapJSException(m *JSException) *ErrorEvent {
return &ErrorEvent{
MessageID: m.Meta().Index,
Timestamp: uint64(m.Meta().Timestamp),
- Source: "js_exception",
+ Source: SOURCE_JS,
Name: m.Name,
Message: m.Message,
Payload: m.Payload,
@@ -105,14 +107,16 @@ func (e *ErrorEvent) ID(projectID uint32) string {
hash.Write([]byte(e.Source))
hash.Write([]byte(e.Name))
hash.Write([]byte(e.Message))
- frame, err := parseFirstFrame(e.Payload)
- if err != nil {
- log.Printf("Can't parse stackframe ((( %v ))): %v", e.Payload, err)
- }
- if frame != nil {
- hash.Write([]byte(frame.FileName))
- hash.Write([]byte(strconv.Itoa(frame.LineNo)))
- hash.Write([]byte(strconv.Itoa(frame.ColNo)))
+ if e.Source == SOURCE_JS {
+ frame, err := parseFirstFrame(e.Payload)
+ if err != nil {
+ log.Printf("Can't parse stackframe ((( %v ))): %v", e.Payload, err)
+ }
+ if frame != nil {
+ hash.Write([]byte(frame.FileName))
+ hash.Write([]byte(strconv.Itoa(frame.LineNo)))
+ hash.Write([]byte(strconv.Itoa(frame.ColNo)))
+ }
}
return strconv.FormatUint(uint64(projectID), 16) + hex.EncodeToString(hash.Sum(nil))
}
diff --git a/backend/pkg/messages/iterator.go b/backend/pkg/messages/iterator.go
index 8b23cb97e..7b7991b19 100644
--- a/backend/pkg/messages/iterator.go
+++ b/backend/pkg/messages/iterator.go
@@ -100,6 +100,7 @@ func (i *messageIteratorImpl) Iterate(batchData []byte, batchInfo *BatchInfo) {
tp: msgType,
size: i.size,
reader: reader,
+ raw: batchData,
skipped: &i.canSkip,
broken: &i.broken,
meta: i.messageInfo,
diff --git a/backend/pkg/messages/primitives.go b/backend/pkg/messages/primitives.go
index eb65ae7b1..1d3d2410b 100644
--- a/backend/pkg/messages/primitives.go
+++ b/backend/pkg/messages/primitives.go
@@ -8,13 +8,17 @@ import (
"log"
)
+var (
+ one = []byte{0}
+ three = []byte{0, 0, 0}
+)
+
func ReadByte(reader io.Reader) (byte, error) {
- p := make([]byte, 1)
- _, err := io.ReadFull(reader, p)
+ _, err := io.ReadFull(reader, one)
if err != nil {
return 0, err
}
- return p[0], nil
+ return one[0], nil
}
func ReadData(reader io.Reader) ([]byte, error) {
@@ -156,8 +160,7 @@ func WriteSize(size uint64, buf []byte, p int) {
}
func ReadSize(reader io.Reader) (uint64, error) {
- buf := make([]byte, 3)
- n, err := io.ReadFull(reader, buf)
+ n, err := io.ReadFull(reader, three)
if err != nil {
return 0, err
}
@@ -165,7 +168,7 @@ func ReadSize(reader io.Reader) (uint64, error) {
return 0, fmt.Errorf("read only %d of 3 size bytes", n)
}
var size uint64
- for i, b := range buf {
+ for i, b := range three {
size += uint64(b) << (8 * i)
}
return size, nil
diff --git a/backend/pkg/messages/raw.go b/backend/pkg/messages/raw.go
index 33419d115..dbc71f4e6 100644
--- a/backend/pkg/messages/raw.go
+++ b/backend/pkg/messages/raw.go
@@ -13,6 +13,7 @@ type RawMessage struct {
size uint64
data []byte
reader *bytes.Reader
+ raw []byte
meta *message
encoded bool
skipped *bool
@@ -23,15 +24,25 @@ func (m *RawMessage) Encode() []byte {
if m.encoded {
return m.data
}
- m.data = make([]byte, m.size+1)
- m.data[0] = uint8(m.tp)
- m.encoded = true
- *m.skipped = false
- _, err := io.ReadFull(m.reader, m.data[1:])
- if err != nil {
- log.Printf("message encode err: %s, type: %d, sess: %d", err, m.tp, m.SessionID())
+ // Try to avoid EOF error
+ if m.reader.Len() < int(m.size) {
return nil
}
+ // Get current batch position
+ currPos, err := m.reader.Seek(0, io.SeekCurrent)
+ if err != nil {
+ log.Printf("can't get current batch position: %s", err)
+ return nil
+ }
+ // "Move" message type
+ if currPos == 0 {
+ log.Printf("can't move message type, curr position = %d", currPos)
+ return nil
+ }
+ // Dirty hack to avoid extra memory allocation
+ m.raw[currPos-1] = uint8(m.tp)
+ m.data = m.raw[currPos-1 : currPos+int64(m.size)]
+ m.encoded = true
return m.data
}
diff --git a/backend/pkg/queue/types/types.go b/backend/pkg/queue/types/types.go
index 48408ce10..21ee49d60 100644
--- a/backend/pkg/queue/types/types.go
+++ b/backend/pkg/queue/types/types.go
@@ -6,6 +6,7 @@ type Consumer interface {
CommitBack(gap int64) error
Commit() error
Close()
+ Rebalanced() <-chan interface{}
}
// Producer sends batches of session data to queue (redis or kafka)
diff --git a/backend/pkg/redisstream/consumer.go b/backend/pkg/redisstream/consumer.go
index 228b2c7a0..3c5b6d0a4 100644
--- a/backend/pkg/redisstream/consumer.go
+++ b/backend/pkg/redisstream/consumer.go
@@ -27,6 +27,7 @@ type Consumer struct {
idsPending streamPendingIDsMap
lastTs int64
autoCommit bool
+ event chan interface{}
}
func NewConsumer(group string, streams []string, messageIterator messages.MessageIterator) *Consumer {
@@ -57,11 +58,16 @@ func NewConsumer(group string, streams []string, messageIterator messages.Messag
group: group,
autoCommit: true,
idsPending: idsPending,
+ event: make(chan interface{}, 4),
}
}
const READ_COUNT = 10
+func (c *Consumer) Rebalanced() <-chan interface{} {
+ return c.event
+}
+
func (c *Consumer) ConsumeNext() error {
// MBTODO: read in go routine, send messages to channel
res, err := c.redis.XReadGroup(&_redis.XReadGroupArgs{
diff --git a/backend/pkg/redisstream/redis.go b/backend/pkg/redisstream/redis.go
index 7dba0b537..434099879 100644
--- a/backend/pkg/redisstream/redis.go
+++ b/backend/pkg/redisstream/redis.go
@@ -2,6 +2,7 @@ package redisstream
import (
"log"
+ "regexp"
"github.com/go-redis/redis"
@@ -14,9 +15,20 @@ func getRedisClient() *redis.Client {
if redisClient != nil {
return redisClient
}
- redisClient = redis.NewClient(&redis.Options{
- Addr: env.String("REDIS_STRING"),
- })
+
+ connectionString := env.String("REDIS_STRING")
+
+ match, _ := regexp.MatchString("^[^:]+://", connectionString)
+ if !match {
+ connectionString = "redis://" + connectionString
+ }
+
+ options, err := redis.ParseURL(connectionString)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ redisClient = redis.NewClient(options)
if _, err := redisClient.Ping().Result(); err != nil {
log.Fatalln(err)
}
diff --git a/ee/api/chalicelib/core/alerts_processor.py b/ee/api/chalicelib/core/alerts_processor.py
index 087f23a05..326d17ffc 100644
--- a/ee/api/chalicelib/core/alerts_processor.py
+++ b/ee/api/chalicelib/core/alerts_processor.py
@@ -204,7 +204,8 @@ def process():
logging.info(f"Valid alert, notifying users, alertId:{alert['alertId']} name: {alert['name']}")
notifications.append(generate_notification(alert, result))
except Exception as e:
- logging.error(f"!!!Error while running alert query for alertId:{alert['alertId']} name: {alert['name']}")
+ logging.error(
+ f"!!!Error while running alert query for alertId:{alert['alertId']} name: {alert['name']}")
logging.error(query)
logging.error(e)
cur = cur.recreate(rollback=True)
@@ -217,12 +218,22 @@ def process():
alerts.process_notifications(notifications)
+def __format_value(x):
+ if x % 1 == 0:
+ x = int(x)
+ else:
+ x = round(x, 2)
+ return f"{x:,}"
+
+
def generate_notification(alert, result):
+ left = __format_value(result['value'])
+ right = __format_value(alert['query']['right'])
return {
"alertId": alert["alertId"],
"tenantId": alert["tenantId"],
"title": alert["name"],
- "description": f"has been triggered, {alert['query']['left']} = {round(result['value'], 2)} ({alert['query']['operator']} {alert['query']['right']}).",
+ "description": f"has been triggered, {alert['query']['left']} = {left} ({alert['query']['operator']} {right}).",
"buttonText": "Check metrics for more details",
"buttonUrl": f"/{alert['projectId']}/metrics",
"imageUrl": None,
diff --git a/ee/api/chalicelib/core/custom_metrics.py b/ee/api/chalicelib/core/custom_metrics.py
index b925429f6..3fef819b3 100644
--- a/ee/api/chalicelib/core/custom_metrics.py
+++ b/ee/api/chalicelib/core/custom_metrics.py
@@ -279,7 +279,8 @@ def update(metric_id, user_id, project_id, data: schemas.UpdateCustomMetricsSche
params = {"metric_id": metric_id, "is_public": data.is_public, "name": data.name,
"user_id": user_id, "project_id": project_id, "view_type": data.view_type,
"metric_type": data.metric_type, "metric_of": data.metric_of,
- "metric_value": data.metric_value, "metric_format": data.metric_format}
+ "metric_value": data.metric_value, "metric_format": data.metric_format,
+ "config": json.dumps(data.config.dict())}
for i, s in enumerate(data.series):
prefix = "u_"
if s.index is None:
@@ -329,7 +330,8 @@ def update(metric_id, user_id, project_id, data: schemas.UpdateCustomMetricsSche
view_type= %(view_type)s, metric_type= %(metric_type)s,
metric_of= %(metric_of)s, metric_value= %(metric_value)s,
metric_format= %(metric_format)s,
- edited_at = timezone('utc'::text, now())
+ edited_at = timezone('utc'::text, now()),
+ default_config = %(config)s
WHERE metric_id = %(metric_id)s
AND project_id = %(project_id)s
AND (user_id = %(user_id)s OR is_public)
@@ -405,7 +407,7 @@ def get(metric_id, project_id, user_id, flatten=True):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(
- """SELECT *
+ """SELECT *, default_config AS config
FROM metrics
LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(metric_series.* ORDER BY index),'[]'::jsonb) AS series
FROM metric_series
@@ -456,7 +458,7 @@ def get_with_template(metric_id, project_id, user_id, include_dashboard=True):
) AS connected_dashboards ON (TRUE)"""
cur.execute(
cur.mogrify(
- f"""SELECT *
+ f"""SELECT *, default_config AS config
FROM metrics
LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(metric_series.* ORDER BY index),'[]'::jsonb) AS series
FROM metric_series
diff --git a/ee/api/chalicelib/core/dashboards.py b/ee/api/chalicelib/core/dashboards.py
index d96356df1..25b1551d3 100644
--- a/ee/api/chalicelib/core/dashboards.py
+++ b/ee/api/chalicelib/core/dashboards.py
@@ -118,6 +118,8 @@ def get_dashboard(project_id, user_id, dashboard_id):
for w in row["widgets"]:
w["created_at"] = TimeUTC.datetime_to_timestamp(w["created_at"])
w["edited_at"] = TimeUTC.datetime_to_timestamp(w["edited_at"])
+ w["config"]["col"] = w["default_config"]["col"]
+ w["config"]["row"] = w["default_config"]["row"]
for s in w["series"]:
s["created_at"] = TimeUTC.datetime_to_timestamp(s["created_at"])
return helper.dict_to_camel_case(row)
diff --git a/ee/api/chalicelib/core/metrics.py b/ee/api/chalicelib/core/metrics.py
index 452566194..2a2f6ee20 100644
--- a/ee/api/chalicelib/core/metrics.py
+++ b/ee/api/chalicelib/core/metrics.py
@@ -452,18 +452,18 @@ def get_slowest_images(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
ch_sub_query.append("resources.type = 'img'")
ch_sub_query_chart = __get_basic_constraints(table_name="resources", round_start=True, data=args)
ch_sub_query_chart.append("resources.type = 'img'")
- ch_sub_query_chart.append("resources.url IN %(url)s")
+ ch_sub_query_chart.append("resources.url_hostpath IN %(url)s")
meta_condition = __get_meta_constraint(args)
ch_sub_query += meta_condition
ch_sub_query_chart += meta_condition
with ch_client.ClickHouseClient() as ch:
- ch_query = f"""SELECT resources.url,
+ ch_query = f"""SELECT resources.url_hostpath AS url,
COALESCE(avgOrNull(resources.duration),0) AS avg,
COUNT(1) AS count
FROM resources {"INNER JOIN sessions_metadata USING(session_id)" if len(meta_condition) > 0 else ""}
WHERE {" AND ".join(ch_sub_query)} AND resources.duration>0
- GROUP BY resources.url ORDER BY avg DESC LIMIT 10;"""
+ GROUP BY resources.url_hostpath ORDER BY avg DESC LIMIT 10;"""
params = {"step_size": step_size, "project_id": project_id, "startTimestamp": startTimestamp,
"endTimestamp": endTimestamp, **__get_constraint_values(args)}
rows = ch.execute(query=ch_query, params=params)
@@ -474,13 +474,13 @@ def get_slowest_images(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
urls = [row["url"] for row in rows]
charts = {}
- ch_query = f"""SELECT url,
+ ch_query = f"""SELECT url_hostpath AS url,
toUnixTimestamp(toStartOfInterval(resources.datetime, INTERVAL %(step_size)s second ))*1000 AS timestamp,
COALESCE(avgOrNull(resources.duration),0) AS avg
FROM resources {"INNER JOIN sessions_metadata USING(session_id)" if len(meta_condition) > 0 else ""}
WHERE {" AND ".join(ch_sub_query_chart)} AND resources.duration>0
- GROUP BY url, timestamp
- ORDER BY url, timestamp;"""
+ GROUP BY url_hostpath, timestamp
+ ORDER BY url_hostpath, timestamp;"""
params["url"] = urls
u_rows = ch.execute(query=ch_query, params=params)
for url in urls:
@@ -526,13 +526,13 @@ def get_performance(project_id, startTimestamp=TimeUTC.now(delta_days=-1), endTi
if resources and len(resources) > 0:
for r in resources:
if r["type"] == "IMG":
- img_constraints.append(f"resources.url = %(val_{len(img_constraints)})s")
+ img_constraints.append(f"resources.url_hostpath = %(val_{len(img_constraints)})s")
img_constraints_vals["val_" + str(len(img_constraints) - 1)] = r['value']
elif r["type"] == "LOCATION":
location_constraints.append(f"pages.url_path = %(val_{len(location_constraints)})s")
location_constraints_vals["val_" + str(len(location_constraints) - 1)] = r['value']
else:
- request_constraints.append(f"resources.url = %(val_{len(request_constraints)})s")
+ request_constraints.append(f"resources.url_hostpath = %(val_{len(request_constraints)})s")
request_constraints_vals["val_" + str(len(request_constraints) - 1)] = r['value']
params = {"step_size": step_size, "project_id": project_id, "startTimestamp": startTimestamp,
"endTimestamp": endTimestamp}
@@ -638,7 +638,7 @@ def search(text, resource_type, project_id, performance=False, pages_only=False,
if resource_type == "ALL" and not pages_only and not events_only:
ch_sub_query.append("positionUTF8(url_hostpath,%(value)s)!=0")
with ch_client.ClickHouseClient() as ch:
- ch_query = f"""SELECT arrayJoin(arraySlice(arrayReverseSort(arrayDistinct(groupArray(url))), 1, 5)) AS value,
+ ch_query = f"""SELECT arrayJoin(arraySlice(arrayReverseSort(arrayDistinct(groupArray(url_hostpath))), 1, 5)) AS value,
type AS key
FROM resources
WHERE {" AND ".join(ch_sub_query)}
@@ -884,7 +884,7 @@ def get_resources_loading_time(project_id, startTimestamp=TimeUTC.now(delta_days
if type is not None:
ch_sub_query_chart.append(f"resources.type = '{__get_resource_db_type_from_type(type)}'")
if url is not None:
- ch_sub_query_chart.append(f"resources.url = %(value)s")
+ ch_sub_query_chart.append(f"resources.url_hostpath = %(value)s")
meta_condition = __get_meta_constraint(args)
ch_sub_query_chart += meta_condition
ch_sub_query_chart.append("resources.duration>0")
@@ -966,7 +966,7 @@ def get_slowest_resources(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
ch_sub_query_chart.append("isNotNull(resources.duration)")
ch_sub_query_chart.append("resources.duration>0")
with ch_client.ClickHouseClient() as ch:
- ch_query = f"""SELECT any(url) AS url, any(type) AS type,
+ ch_query = f"""SELECT any(url_hostpath) AS url, any(type) AS type,
splitByChar('/', resources.url_hostpath)[-1] AS name,
COALESCE(avgOrNull(NULLIF(resources.duration,0)),0) AS avg
FROM resources {"INNER JOIN sessions_metadata USING(session_id)" if len(meta_condition) > 0 else ""}
@@ -2179,7 +2179,7 @@ def get_performance_avg_image_load_time(ch, project_id, startTimestamp=TimeUTC.n
if resources and len(resources) > 0:
for r in resources:
if r["type"] == "IMG":
- img_constraints.append(f"resources.url = %(val_{len(img_constraints)})s")
+ img_constraints.append(f"resources.url_hostpath = %(val_{len(img_constraints)})s")
img_constraints_vals["val_" + str(len(img_constraints) - 1)] = r['value']
params = {"step_size": step_size, "project_id": project_id, "startTimestamp": startTimestamp,
@@ -2254,7 +2254,7 @@ def get_performance_avg_request_load_time(ch, project_id, startTimestamp=TimeUTC
if resources and len(resources) > 0:
for r in resources:
if r["type"] != "IMG" and r["type"] == "LOCATION":
- request_constraints.append(f"resources.url = %(val_{len(request_constraints)})s")
+ request_constraints.append(f"resources.url_hostpath = %(val_{len(request_constraints)})s")
request_constraints_vals["val_" + str(len(request_constraints) - 1)] = r['value']
params = {"step_size": step_size, "project_id": project_id, "startTimestamp": startTimestamp,
"endTimestamp": endTimestamp}
diff --git a/ee/api/chalicelib/core/metrics_exp.py b/ee/api/chalicelib/core/metrics_exp.py
index 9a8af012b..c41676d4a 100644
--- a/ee/api/chalicelib/core/metrics_exp.py
+++ b/ee/api/chalicelib/core/metrics_exp.py
@@ -462,18 +462,18 @@ def get_slowest_images(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
ch_sub_query_chart = __get_basic_constraints(table_name="resources", round_start=True, data=args)
# ch_sub_query_chart.append("events.event_type='RESOURCE'")
ch_sub_query_chart.append("resources.type = 'img'")
- ch_sub_query_chart.append("resources.url IN %(url)s")
+ ch_sub_query_chart.append("resources.url_hostpath IN %(url)s")
meta_condition = __get_meta_constraint(args)
ch_sub_query += meta_condition
ch_sub_query_chart += meta_condition
with ch_client.ClickHouseClient() as ch:
- ch_query = f"""SELECT resources.url,
+ ch_query = f"""SELECT resources.url_hostpath AS url,
COALESCE(avgOrNull(resources.duration),0) AS avg,
COUNT(1) AS count
FROM {exp_ch_helper.get_main_resources_table(startTimestamp)} AS resources
WHERE {" AND ".join(ch_sub_query)} AND resources.duration>0
- GROUP BY resources.url ORDER BY avg DESC LIMIT 10;"""
+ GROUP BY resources.url_hostpath ORDER BY avg DESC LIMIT 10;"""
params = {"step_size": step_size, "project_id": project_id, "startTimestamp": startTimestamp,
"endTimestamp": endTimestamp, **__get_constraint_values(args)}
rows = ch.execute(query=ch_query, params=params)
@@ -484,13 +484,13 @@ def get_slowest_images(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
urls = [row["url"] for row in rows]
charts = {}
- ch_query = f"""SELECT url,
+ ch_query = f"""SELECT url_hostpath AS url,
toUnixTimestamp(toStartOfInterval(resources.datetime, INTERVAL %(step_size)s second ))*1000 AS timestamp,
COALESCE(avgOrNull(resources.duration),0) AS avg
FROM {exp_ch_helper.get_main_resources_table(startTimestamp)} AS resources
WHERE {" AND ".join(ch_sub_query_chart)} AND resources.duration>0
- GROUP BY url, timestamp
- ORDER BY url, timestamp;"""
+ GROUP BY url_hostpath, timestamp
+ ORDER BY url_hostpath, timestamp;"""
params["url"] = urls
# print(ch.format(query=ch_query, params=params))
u_rows = ch.execute(query=ch_query, params=params)
@@ -538,13 +538,13 @@ def get_performance(project_id, startTimestamp=TimeUTC.now(delta_days=-1), endTi
if resources and len(resources) > 0:
for r in resources:
if r["type"] == "IMG":
- img_constraints.append(f"resources.url = %(val_{len(img_constraints)})s")
+ img_constraints.append(f"resources.url_hostpath = %(val_{len(img_constraints)})s")
img_constraints_vals["val_" + str(len(img_constraints) - 1)] = r['value']
elif r["type"] == "LOCATION":
location_constraints.append(f"pages.url_path = %(val_{len(location_constraints)})s")
location_constraints_vals["val_" + str(len(location_constraints) - 1)] = r['value']
else:
- request_constraints.append(f"resources.url = %(val_{len(request_constraints)})s")
+ request_constraints.append(f"resources.url_hostpath = %(val_{len(request_constraints)})s")
request_constraints_vals["val_" + str(len(request_constraints) - 1)] = r['value']
params = {"step_size": step_size, "project_id": project_id, "startTimestamp": startTimestamp,
"endTimestamp": endTimestamp}
@@ -891,7 +891,7 @@ def get_resources_loading_time(project_id, startTimestamp=TimeUTC.now(delta_days
if type is not None:
ch_sub_query_chart.append(f"resources.type = '{__get_resource_db_type_from_type(type)}'")
if url is not None:
- ch_sub_query_chart.append(f"resources.url = %(value)s")
+ ch_sub_query_chart.append(f"resources.url_hostpath = %(value)s")
meta_condition = __get_meta_constraint(args)
ch_sub_query_chart += meta_condition
ch_sub_query_chart.append("resources.duration>0")
@@ -974,7 +974,7 @@ def get_slowest_resources(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
ch_sub_query_chart.append("isNotNull(resources.duration)")
ch_sub_query_chart.append("resources.duration>0")
with ch_client.ClickHouseClient() as ch:
- ch_query = f"""SELECT any(url) AS url, any(type) AS type, name,
+ ch_query = f"""SELECT any(url_hostpath) AS url, any(type) AS type, name,
COALESCE(avgOrNull(NULLIF(resources.duration,0)),0) AS avg
FROM {exp_ch_helper.get_main_resources_table(startTimestamp)} AS resources
WHERE {" AND ".join(ch_sub_query)}
@@ -2185,7 +2185,7 @@ def get_performance_avg_image_load_time(ch, project_id, startTimestamp=TimeUTC.n
if resources and len(resources) > 0:
for r in resources:
if r["type"] == "IMG":
- img_constraints.append(f"resources.url = %(val_{len(img_constraints)})s")
+ img_constraints.append(f"resources.url_hostpath = %(val_{len(img_constraints)})s")
img_constraints_vals["val_" + str(len(img_constraints) - 1)] = r['value']
params = {"step_size": step_size, "project_id": project_id, "startTimestamp": startTimestamp,
@@ -2260,7 +2260,7 @@ def get_performance_avg_request_load_time(ch, project_id, startTimestamp=TimeUTC
if resources and len(resources) > 0:
for r in resources:
if r["type"] != "IMG" and r["type"] == "LOCATION":
- request_constraints.append(f"resources.url = %(val_{len(request_constraints)})s")
+ request_constraints.append(f"resources.url_hostpath = %(val_{len(request_constraints)})s")
request_constraints_vals["val_" + str(len(request_constraints) - 1)] = r['value']
params = {"step_size": step_size, "project_id": project_id, "startTimestamp": startTimestamp,
"endTimestamp": endTimestamp}
diff --git a/ee/api/chalicelib/core/projects.py b/ee/api/chalicelib/core/projects.py
index cf18cc0a9..18d71914b 100644
--- a/ee/api/chalicelib/core/projects.py
+++ b/ee/api/chalicelib/core/projects.py
@@ -51,6 +51,7 @@ def get_projects(tenant_id, recording_state=False, gdpr=None, recorded=False, st
AND users.deleted_at ISNULL
AND users.tenant_id = %(tenant_id)s
AND (roles.all_projects OR roles_projects.project_id = s.project_id)
+ LIMIT 1
) AS role_project ON (TRUE)"""
extra_projection = ""
extra_join = ""
@@ -86,22 +87,23 @@ def get_projects(tenant_id, recording_state=False, gdpr=None, recorded=False, st
{"tenant_id": tenant_id, "user_id": user_id, "now": TimeUTC.now()})
cur.execute(query)
rows = cur.fetchall()
-
# if recorded is requested, check if it was saved or computed
if recorded:
- for r in rows:
+ u_values = []
+ params = {}
+ for i, r in enumerate(rows):
if r["first_recorded_session_at"] is None:
- extra_update = ""
- if r["recorded"]:
- extra_update = ", first_recorded_session_at=to_timestamp(%(first_recorded)s/1000)"
- query = cur.mogrify(f"""UPDATE public.projects
- SET sessions_last_check_at=(now() at time zone 'utc')
- {extra_update}
- WHERE project_id=%(project_id)s""",
- {"project_id": r["project_id"], "first_recorded": r["first_recorded"]})
- cur.execute(query)
+ u_values.append(f"(%(project_id_{i})s,to_timestamp(%(first_recorded_{i})s/1000))")
+ params[f"project_id_{i}"] = r["project_id"]
+ params[f"first_recorded_{i}"] = r["first_recorded"] if r["recorded"] else None
r.pop("first_recorded_session_at")
r.pop("first_recorded")
+ if len(u_values) > 0:
+ query = cur.mogrify(f"""UPDATE public.projects
+ SET sessions_last_check_at=(now() at time zone 'utc'), first_recorded_session_at=u.first_recorded
+ FROM (VALUES {",".join(u_values)}) AS u(project_id,first_recorded)
+ WHERE projects.project_id=u.project_id;""", params)
+ cur.execute(query)
if recording_state and len(rows) > 0:
project_ids = [f'({r["project_id"]})' for r in rows]
@@ -111,6 +113,7 @@ def get_projects(tenant_id, recording_state=False, gdpr=None, recorded=False, st
WHERE sessions.start_ts >= %(startDate)s AND sessions.start_ts <= %(endDate)s
GROUP BY project_id;""",
{"startDate": TimeUTC.now(delta_days=-3), "endDate": TimeUTC.now(delta_days=1)})
+
cur.execute(query=query)
status = cur.fetchall()
for r in rows:
diff --git a/ee/api/chalicelib/core/sessions.py b/ee/api/chalicelib/core/sessions.py
index 8c9eaf006..7d999fe6c 100644
--- a/ee/api/chalicelib/core/sessions.py
+++ b/ee/api/chalicelib/core/sessions.py
@@ -107,8 +107,7 @@ def get_by_id2_pg(project_id, session_id, context: schemas_ee.CurrentContext, fu
session_id=session_id, user_id=context.user_id)
data['metadata'] = __group_metadata(project_metadata=data.pop("projectMetadata"), session=data)
data['issues'] = issues.get_by_session_id(session_id=session_id, project_id=project_id)
- data['live'] = live and assist.is_live(project_id=project_id,
- session_id=session_id,
+ data['live'] = live and assist.is_live(project_id=project_id, session_id=session_id,
project_key=data["projectKey"])
data["inDB"] = True
return data
@@ -181,7 +180,7 @@ def _isUndefined_operator(op: schemas.SearchEventOperator):
# This function executes the query and return result
def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_id, errors_only=False,
- error_status=schemas.ErrorStatus.all, count_only=False, issue=None):
+ error_status=schemas.ErrorStatus.all, count_only=False, issue=None, ids_only=False):
if data.bookmarked:
data.startDate, data.endDate = sessions_favorite.get_start_end_timestamp(project_id, user_id)
@@ -189,9 +188,11 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_
favorite_only=data.bookmarked, issue=issue, project_id=project_id,
user_id=user_id)
if data.limit is not None and data.page is not None:
+ full_args["sessions_limit"] = data.limit
full_args["sessions_limit_s"] = (data.page - 1) * data.limit
full_args["sessions_limit_e"] = data.page * data.limit
else:
+ full_args["sessions_limit"] = 200
full_args["sessions_limit_s"] = 1
full_args["sessions_limit_e"] = 200
@@ -239,6 +240,12 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_
GROUP BY user_id
) AS users_sessions;""",
full_args)
+ elif ids_only:
+ main_query = cur.mogrify(f"""SELECT DISTINCT ON(s.session_id) s.session_id
+ {query_part}
+ ORDER BY s.session_id desc
+ LIMIT %(sessions_limit)s OFFSET %(sessions_limit_s)s;""",
+ full_args)
else:
if data.order is None:
data.order = schemas.SortOrderType.desc
@@ -246,7 +253,6 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_
if data.sort is not None and data.sort != "session_id":
# sort += " " + data.order + "," + helper.key_to_snake_case(data.sort)
sort = helper.key_to_snake_case(data.sort)
-
meta_keys = metadata.get(project_id=project_id)
main_query = cur.mogrify(f"""SELECT COUNT(full_sessions) AS count,
COALESCE(JSONB_AGG(full_sessions)
@@ -270,7 +276,7 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_
print(data.json())
print("--------------------")
raise err
- if errors_only:
+ if errors_only or ids_only:
return helper.list_to_camel_case(cur.fetchall())
sessions = cur.fetchone()
diff --git a/ee/api/chalicelib/core/sessions_notes.py b/ee/api/chalicelib/core/sessions_notes.py
index 746eba4c8..de1f83854 100644
--- a/ee/api/chalicelib/core/sessions_notes.py
+++ b/ee/api/chalicelib/core/sessions_notes.py
@@ -144,7 +144,9 @@ def share_to_slack(tenant_id, user_id, project_id, note_id, webhook_id):
note = get_note(tenant_id=tenant_id, project_id=project_id, user_id=user_id, note_id=note_id, share=user_id)
if note is None:
return {"errors": ["Note not found"]}
- session_url = urljoin(config('SITE_URL'), f"{note['projectId']}/session/{note['sessionId']}")
+ session_url = urljoin(config('SITE_URL'), f"{note['projectId']}/session/{note['sessionId']}?note={note['noteId']}")
+ if note["timestamp"] > 0:
+ session_url += f"&jumpto={note['timestamp']}"
title = f"<{session_url}|Note for session {note['sessionId']}>"
blocks = [{"type": "section",
diff --git a/ee/api/chalicelib/core/significance.py b/ee/api/chalicelib/core/significance.py
index 59f773c9e..e3d6cc735 100644
--- a/ee/api/chalicelib/core/significance.py
+++ b/ee/api/chalicelib/core/significance.py
@@ -188,9 +188,7 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]:
values=s["value"], value_key=f"value{i + 1}")
n_stages_query.append(f"""
(SELECT main.session_id,
- {"MIN(main.timestamp)" if i + 1 < len(stages) else "MAX(main.timestamp)"} AS stage{i + 1}_timestamp,
- '{event_type}' AS type,
- '{s["operator"]}' AS operator
+ {"MIN(main.timestamp)" if i + 1 < len(stages) else "MAX(main.timestamp)"} AS stage{i + 1}_timestamp
FROM {next_table} AS main {" ".join(extra_from)}
WHERE main.timestamp >= {f"T{i}.stage{i}_timestamp" if i > 0 else "%(startTimestamp)s"}
{f"AND main.session_id=T1.session_id" if i > 0 else ""}
@@ -198,45 +196,55 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]:
{(" AND " + " AND ".join(stage_constraints)) if len(stage_constraints) > 0 else ""}
{(" AND " + " AND ".join(first_stage_extra_constraints)) if len(first_stage_extra_constraints) > 0 and i == 0 else ""}
GROUP BY main.session_id)
- AS T{i + 1} {"USING (session_id)" if i > 0 else ""}
+ AS T{i + 1} {"ON (TRUE)" if i > 0 else ""}
""")
- if len(n_stages_query) == 0:
+ n_stages = len(n_stages_query)
+ if n_stages == 0:
return []
n_stages_query = " LEFT JOIN LATERAL ".join(n_stages_query)
n_stages_query += ") AS stages_t"
n_stages_query = f"""
- SELECT stages_and_issues_t.*,sessions.session_id, sessions.user_uuid FROM (
+ SELECT stages_and_issues_t.*, sessions.user_uuid
+ FROM (
SELECT * FROM (
- SELECT * FROM
- {n_stages_query}
+ SELECT T1.session_id, {",".join([f"stage{i + 1}_timestamp" for i in range(n_stages)])}
+ FROM {n_stages_query}
LEFT JOIN LATERAL
- (
- SELECT * FROM
- (SELECT ISE.session_id,
- ISS.type as issue_type,
+ ( SELECT ISS.type as issue_type,
ISE.timestamp AS issue_timestamp,
- ISS.context_string as issue_context,
+ COALESCE(ISS.context_string,'') as issue_context,
ISS.issue_id as issue_id
FROM events_common.issues AS ISE INNER JOIN issues AS ISS USING (issue_id)
WHERE ISE.timestamp >= stages_t.stage1_timestamp
AND ISE.timestamp <= stages_t.stage{i + 1}_timestamp
AND ISS.project_id=%(project_id)s
- {"AND ISS.type IN %(issueTypes)s" if len(filter_issues) > 0 else ""}) AS base_t
- ) AS issues_t
- USING (session_id)) AS stages_and_issues_t
- inner join sessions USING(session_id);
+ AND ISE.session_id = stages_t.session_id
+ AND ISS.type!='custom' -- ignore custom issues because they are massive
+ {"AND ISS.type IN %(issueTypes)s" if len(filter_issues) > 0 else ""}
+ LIMIT 10 -- remove the limit to get exact stats
+ ) AS issues_t ON (TRUE)
+ ) AS stages_and_issues_t INNER JOIN sessions USING(session_id);
"""
# LIMIT 10000
params = {"project_id": project_id, "startTimestamp": filter_d["startDate"], "endTimestamp": filter_d["endDate"],
"issueTypes": tuple(filter_issues), **values}
with pg_client.PostgresClient() as cur:
+ query = cur.mogrify(n_stages_query, params)
# print("---------------------------------------------------")
- # print(cur.mogrify(n_stages_query, params))
+ # print(query)
# print("---------------------------------------------------")
- cur.execute(cur.mogrify(n_stages_query, params))
- rows = cur.fetchall()
+ try:
+ cur.execute(query)
+ rows = cur.fetchall()
+ except Exception as err:
+ print("--------- FUNNEL SEARCH QUERY EXCEPTION -----------")
+ print(query.decode('UTF-8'))
+ print("--------- PAYLOAD -----------")
+ print(filter_d)
+ print("--------------------")
+ raise err
return rows
@@ -298,7 +306,21 @@ def pearson_corr(x: list, y: list):
return r, confidence, False
-def get_transitions_and_issues_of_each_type(rows: List[RealDictRow], all_issues_with_context, first_stage, last_stage):
+# def tuple_or(t: tuple):
+# x = 0
+# for el in t:
+# x |= el # | is for bitwise OR
+# return x
+#
+# The following function is correct optimization of the previous function because t is a list of 0,1
+def tuple_or(t: tuple):
+ for el in t:
+ if el > 0:
+ return 1
+ return 0
+
+
+def get_transitions_and_issues_of_each_type(rows: List[RealDictRow], all_issues, first_stage, last_stage):
"""
Returns two lists with binary values 0/1:
@@ -317,12 +339,6 @@ def get_transitions_and_issues_of_each_type(rows: List[RealDictRow], all_issues_
transitions = []
n_sess_affected = 0
errors = {}
- for issue in all_issues_with_context:
- split = issue.split('__^__')
- errors[issue] = {
- "errors": [],
- "issue_type": split[0],
- "context": split[1]}
for row in rows:
t = 0
@@ -330,38 +346,26 @@ def get_transitions_and_issues_of_each_type(rows: List[RealDictRow], all_issues_
last_ts = row[f'stage{last_stage}_timestamp']
if first_ts is None:
continue
- elif first_ts is not None and last_ts is not None:
+ elif last_ts is not None:
t = 1
transitions.append(t)
ic_present = False
- for issue_type_with_context in errors:
+ for error_id in all_issues:
+ if error_id not in errors:
+ errors[error_id] = []
ic = 0
- issue_type = errors[issue_type_with_context]["issue_type"]
- context = errors[issue_type_with_context]["context"]
- if row['issue_type'] is not None:
+ row_issue_id = row['issue_id']
+ if row_issue_id is not None:
if last_ts is None or (first_ts < row['issue_timestamp'] < last_ts):
- context_in_row = row['issue_context'] if row['issue_context'] is not None else ''
- if issue_type == row['issue_type'] and context == context_in_row:
+ if error_id == row_issue_id:
ic = 1
ic_present = True
- errors[issue_type_with_context]["errors"].append(ic)
+ errors[error_id].append(ic)
if ic_present and t:
n_sess_affected += 1
- # def tuple_or(t: tuple):
- # x = 0
- # for el in t:
- # x |= el
- # return x
- def tuple_or(t: tuple):
- for el in t:
- if el > 0:
- return 1
- return 0
-
- errors = {key: errors[key]["errors"] for key in errors}
all_errors = [tuple_or(t) for t in zip(*errors.values())]
return transitions, errors, all_errors, n_sess_affected
@@ -377,10 +381,9 @@ def get_affected_users_for_all_issues(rows, first_stage, last_stage):
"""
affected_users = defaultdict(lambda: set())
affected_sessions = defaultdict(lambda: set())
- contexts = defaultdict(lambda: None)
+ all_issues = {}
n_affected_users_dict = defaultdict(lambda: None)
n_affected_sessions_dict = defaultdict(lambda: None)
- all_issues_with_context = set()
n_issues_dict = defaultdict(lambda: 0)
issues_by_session = defaultdict(lambda: 0)
@@ -396,15 +399,13 @@ def get_affected_users_for_all_issues(rows, first_stage, last_stage):
# check that the issue exists and belongs to subfunnel:
if iss is not None and (row[f'stage{last_stage}_timestamp'] is None or
(row[f'stage{first_stage}_timestamp'] < iss_ts < row[f'stage{last_stage}_timestamp'])):
- context_string = row['issue_context'] if row['issue_context'] is not None else ''
- issue_with_context = iss + '__^__' + context_string
- contexts[issue_with_context] = {"context": context_string, "id": row["issue_id"]}
- all_issues_with_context.add(issue_with_context)
- n_issues_dict[issue_with_context] += 1
+ if row["issue_id"] not in all_issues:
+ all_issues[row["issue_id"]] = {"context": row['issue_context'], "issue_type": row["issue_type"]}
+ n_issues_dict[row["issue_id"]] += 1
if row['user_uuid'] is not None:
- affected_users[issue_with_context].add(row['user_uuid'])
+ affected_users[row["issue_id"]].add(row['user_uuid'])
- affected_sessions[issue_with_context].add(row['session_id'])
+ affected_sessions[row["issue_id"]].add(row['session_id'])
issues_by_session[row[f'session_id']] += 1
if len(affected_users) > 0:
@@ -415,29 +416,28 @@ def get_affected_users_for_all_issues(rows, first_stage, last_stage):
n_affected_sessions_dict.update({
iss: len(affected_sessions[iss]) for iss in affected_sessions
})
- return all_issues_with_context, n_issues_dict, n_affected_users_dict, n_affected_sessions_dict, contexts
+ return all_issues, n_issues_dict, n_affected_users_dict, n_affected_sessions_dict
def count_sessions(rows, n_stages):
session_counts = {i: set() for i in range(1, n_stages + 1)}
- for ind, row in enumerate(rows):
+ for row in rows:
for i in range(1, n_stages + 1):
if row[f"stage{i}_timestamp"] is not None:
session_counts[i].add(row[f"session_id"])
+
session_counts = {i: len(session_counts[i]) for i in session_counts}
return session_counts
def count_users(rows, n_stages):
- users_in_stages = defaultdict(lambda: set())
-
- for ind, row in enumerate(rows):
+ users_in_stages = {i: set() for i in range(1, n_stages + 1)}
+ for row in rows:
for i in range(1, n_stages + 1):
if row[f"stage{i}_timestamp"] is not None:
users_in_stages[i].add(row["user_uuid"])
users_count = {i: len(users_in_stages[i]) for i in range(1, n_stages + 1)}
-
return users_count
@@ -490,18 +490,18 @@ def get_issues(stages, rows, first_stage=None, last_stage=None, drop_only=False)
last_stage = n_stages
n_critical_issues = 0
- issues_dict = dict({"significant": [],
- "insignificant": []})
+ issues_dict = {"significant": [],
+ "insignificant": []}
session_counts = count_sessions(rows, n_stages)
drop = session_counts[first_stage] - session_counts[last_stage]
- all_issues_with_context, n_issues_dict, affected_users_dict, affected_sessions, contexts = get_affected_users_for_all_issues(
+ all_issues, n_issues_dict, affected_users_dict, affected_sessions = get_affected_users_for_all_issues(
rows, first_stage, last_stage)
transitions, errors, all_errors, n_sess_affected = get_transitions_and_issues_of_each_type(rows,
- all_issues_with_context,
+ all_issues,
first_stage, last_stage)
- # print("len(transitions) =", len(transitions))
+ del rows
if any(all_errors):
total_drop_corr, conf, is_sign = pearson_corr(transitions, all_errors)
@@ -514,33 +514,35 @@ def get_issues(stages, rows, first_stage=None, last_stage=None, drop_only=False)
if drop_only:
return total_drop_due_to_issues
- for issue in all_issues_with_context:
+ for issue_id in all_issues:
- if not any(errors[issue]):
+ if not any(errors[issue_id]):
continue
- r, confidence, is_sign = pearson_corr(transitions, errors[issue])
+ r, confidence, is_sign = pearson_corr(transitions, errors[issue_id])
if r is not None and drop is not None and is_sign:
- lost_conversions = int(r * affected_sessions[issue])
+ lost_conversions = int(r * affected_sessions[issue_id])
else:
lost_conversions = None
if r is None:
r = 0
- split = issue.split('__^__')
issues_dict['significant' if is_sign else 'insignificant'].append({
- "type": split[0],
- "title": helper.get_issue_title(split[0]),
- "affected_sessions": affected_sessions[issue],
- "unaffected_sessions": session_counts[1] - affected_sessions[issue],
+ "type": all_issues[issue_id]["issue_type"],
+ "title": helper.get_issue_title(all_issues[issue_id]["issue_type"]),
+ "affected_sessions": affected_sessions[issue_id],
+ "unaffected_sessions": session_counts[1] - affected_sessions[issue_id],
"lost_conversions": lost_conversions,
- "affected_users": affected_users_dict[issue],
+ "affected_users": affected_users_dict[issue_id],
"conversion_impact": round(r * 100),
- "context_string": contexts[issue]["context"],
- "issue_id": contexts[issue]["id"]
+ "context_string": all_issues[issue_id]["context"],
+ "issue_id": issue_id
})
if is_sign:
- n_critical_issues += n_issues_dict[issue]
+ n_critical_issues += n_issues_dict[issue_id]
+ # To limit the number of returned issues to the frontend
+ issues_dict["significant"] = issues_dict["significant"][:20]
+ issues_dict["insignificant"] = issues_dict["insignificant"][:20]
return n_critical_issues, issues_dict, total_drop_due_to_issues
diff --git a/ee/api/requirements-alerts.txt b/ee/api/requirements-alerts.txt
index fce0ba6cc..02042a778 100644
--- a/ee/api/requirements-alerts.txt
+++ b/ee/api/requirements-alerts.txt
@@ -1,18 +1,18 @@
requests==2.28.1
urllib3==1.26.12
-boto3==1.26.4
+boto3==1.26.14
pyjwt==2.6.0
psycopg2-binary==2.9.5
-elasticsearch==8.5.0
+elasticsearch==8.5.1
jira==3.4.1
-fastapi==0.86.0
-uvicorn[standard]==0.19.0
+fastapi==0.87.0
+uvicorn[standard]==0.20.0
python-decouple==3.6
pydantic[email]==1.10.2
-apscheduler==3.9.1
+apscheduler==3.9.1.post1
clickhouse-driver==0.2.4
python-multipart==0.0.5
\ No newline at end of file
diff --git a/ee/api/requirements-crons.txt b/ee/api/requirements-crons.txt
index fce0ba6cc..02042a778 100644
--- a/ee/api/requirements-crons.txt
+++ b/ee/api/requirements-crons.txt
@@ -1,18 +1,18 @@
requests==2.28.1
urllib3==1.26.12
-boto3==1.26.4
+boto3==1.26.14
pyjwt==2.6.0
psycopg2-binary==2.9.5
-elasticsearch==8.5.0
+elasticsearch==8.5.1
jira==3.4.1
-fastapi==0.86.0
-uvicorn[standard]==0.19.0
+fastapi==0.87.0
+uvicorn[standard]==0.20.0
python-decouple==3.6
pydantic[email]==1.10.2
-apscheduler==3.9.1
+apscheduler==3.9.1.post1
clickhouse-driver==0.2.4
python-multipart==0.0.5
\ No newline at end of file
diff --git a/ee/api/requirements.txt b/ee/api/requirements.txt
index 23fc32fe7..ac4f27a9d 100644
--- a/ee/api/requirements.txt
+++ b/ee/api/requirements.txt
@@ -1,18 +1,18 @@
requests==2.28.1
urllib3==1.26.12
-boto3==1.26.4
+boto3==1.26.14
pyjwt==2.6.0
psycopg2-binary==2.9.5
-elasticsearch==8.5.0
+elasticsearch==8.5.1
jira==3.4.1
-fastapi==0.86.0
-uvicorn[standard]==0.19.0
+fastapi==0.87.0
+uvicorn[standard]==0.20.0
python-decouple==3.6
pydantic[email]==1.10.2
-apscheduler==3.9.1
+apscheduler==3.9.1.post1
clickhouse-driver==0.2.4
python3-saml==1.14.0
diff --git a/ee/backend/pkg/kafka/consumer.go b/ee/backend/pkg/kafka/consumer.go
index b951fcd9c..bea1f0604 100644
--- a/ee/backend/pkg/kafka/consumer.go
+++ b/ee/backend/pkg/kafka/consumer.go
@@ -2,24 +2,24 @@ package kafka
import (
"log"
- "openreplay/backend/pkg/messages"
"os"
"time"
- "github.com/pkg/errors"
-
- "gopkg.in/confluentinc/confluent-kafka-go.v1/kafka"
"openreplay/backend/pkg/env"
+ "openreplay/backend/pkg/messages"
+
+ "github.com/confluentinc/confluent-kafka-go/kafka"
+ "github.com/pkg/errors"
)
type Message = kafka.Message
type Consumer struct {
- c *kafka.Consumer
- messageIterator messages.MessageIterator
- commitTicker *time.Ticker
- pollTimeout uint
-
+ c *kafka.Consumer
+ messageIterator messages.MessageIterator
+ commitTicker *time.Ticker
+ pollTimeout uint
+ events chan interface{}
lastReceivedPrtTs map[int32]int64
}
@@ -47,34 +47,64 @@ func NewConsumer(
kafkaConfig.SetKey("ssl.key.location", os.Getenv("KAFKA_SSL_KEY"))
kafkaConfig.SetKey("ssl.certificate.location", os.Getenv("KAFKA_SSL_CERT"))
}
+
+ // Apply Kerberos configuration
+ if env.Bool("KAFKA_USE_KERBEROS") {
+ kafkaConfig.SetKey("security.protocol", "sasl_plaintext")
+ kafkaConfig.SetKey("sasl.mechanisms", "GSSAPI")
+ kafkaConfig.SetKey("sasl.kerberos.service.name", os.Getenv("KERBEROS_SERVICE_NAME"))
+ kafkaConfig.SetKey("sasl.kerberos.principal", os.Getenv("KERBEROS_PRINCIPAL"))
+ kafkaConfig.SetKey("sasl.kerberos.keytab", os.Getenv("KERBEROS_KEYTAB_LOCATION"))
+ }
+
c, err := kafka.NewConsumer(kafkaConfig)
if err != nil {
log.Fatalln(err)
}
- subREx := "^("
- for i, t := range topics {
- if i != 0 {
- subREx += "|"
- }
- subREx += t
- }
- subREx += ")$"
- if err := c.Subscribe(subREx, nil); err != nil {
- log.Fatalln(err)
- }
var commitTicker *time.Ticker
if autoCommit {
commitTicker = time.NewTicker(2 * time.Minute)
}
- return &Consumer{
+ consumer := &Consumer{
c: c,
messageIterator: messageIterator,
commitTicker: commitTicker,
pollTimeout: 200,
- lastReceivedPrtTs: make(map[int32]int64),
+ events: make(chan interface{}, 4),
+ lastReceivedPrtTs: make(map[int32]int64, 16),
}
+
+ subREx := "^("
+ for i, t := range topics {
+ if i != 0 {
+ subREx += "|"
+ }
+ subREx += t
+ }
+ subREx += ")$"
+ if err := c.Subscribe(subREx, consumer.reBalanceCallback); err != nil {
+ log.Fatalln(err)
+ }
+
+ return consumer
+}
+
+func (consumer *Consumer) reBalanceCallback(_ *kafka.Consumer, e kafka.Event) error {
+ switch evt := e.(type) {
+ case kafka.RevokedPartitions:
+ // receive before re-balancing partitions; stop consuming messages and commit current state
+ consumer.events <- evt.String()
+ case kafka.AssignedPartitions:
+ // receive after re-balancing partitions; continue consuming messages
+ //consumer.events <- evt.String()
+ }
+ return nil
+}
+
+func (consumer *Consumer) Rebalanced() <-chan interface{} {
+ return consumer.events
}
func (consumer *Consumer) Commit() error {
diff --git a/ee/backend/pkg/kafka/log.go b/ee/backend/pkg/kafka/log.go
index 0cd80cb6d..c71c6d2bd 100644
--- a/ee/backend/pkg/kafka/log.go
+++ b/ee/backend/pkg/kafka/log.go
@@ -1,16 +1,15 @@
package kafka
import (
- "log"
"fmt"
+ "log"
- "gopkg.in/confluentinc/confluent-kafka-go.v1/kafka"
+ "github.com/confluentinc/confluent-kafka-go/kafka"
)
-
func logPartitions(s string, prts []kafka.TopicPartition) {
for _, p := range prts {
s = fmt.Sprintf("%v | %v", s, p.Partition)
}
log.Println(s)
-}
\ No newline at end of file
+}
diff --git a/ee/backend/pkg/kafka/producer.go b/ee/backend/pkg/kafka/producer.go
index 6fb893b7a..1ec241b8a 100644
--- a/ee/backend/pkg/kafka/producer.go
+++ b/ee/backend/pkg/kafka/producer.go
@@ -5,7 +5,7 @@ import (
"log"
"os"
- "gopkg.in/confluentinc/confluent-kafka-go.v1/kafka"
+ "github.com/confluentinc/confluent-kafka-go/kafka"
"openreplay/backend/pkg/env"
)
@@ -30,6 +30,15 @@ func NewProducer(messageSizeLimit int, useBatch bool) *Producer {
kafkaConfig.SetKey("ssl.key.location", os.Getenv("KAFKA_SSL_KEY"))
kafkaConfig.SetKey("ssl.certificate.location", os.Getenv("KAFKA_SSL_CERT"))
}
+ // Apply Kerberos configuration
+ if env.Bool("KAFKA_USE_KERBEROS") {
+ kafkaConfig.SetKey("security.protocol", "sasl_plaintext")
+ kafkaConfig.SetKey("sasl.mechanisms", "GSSAPI")
+ kafkaConfig.SetKey("sasl.kerberos.service.name", os.Getenv("KERBEROS_SERVICE_NAME"))
+ kafkaConfig.SetKey("sasl.kerberos.principal", os.Getenv("KERBEROS_PRINCIPAL"))
+ kafkaConfig.SetKey("sasl.kerberos.keytab", os.Getenv("KERBEROS_KEYTAB_LOCATION"))
+ }
+
producer, err := kafka.NewProducer(kafkaConfig)
if err != nil {
log.Fatalln(err)
diff --git a/ee/connectors/deploy/requirements_snowflake.txt b/ee/connectors/deploy/requirements_snowflake.txt
index 983a313d6..895326b32 100644
--- a/ee/connectors/deploy/requirements_snowflake.txt
+++ b/ee/connectors/deploy/requirements_snowflake.txt
@@ -1,8 +1,8 @@
pandas==1.5.1
kafka-python==2.0.2
SQLAlchemy==1.4.43
-snowflake-connector-python==2.8.1
-snowflake-sqlalchemy==1.4.3
+snowflake-connector-python==2.8.2
+snowflake-sqlalchemy==1.4.4
PyYAML
asn1crypto==1.5.1
azure-common==1.1.28
diff --git a/ee/scripts/schema/db/init_dbs/postgresql/1.9.0/1.9.0.sql b/ee/scripts/schema/db/init_dbs/postgresql/1.9.0/1.9.0.sql
index 3315df03f..c3483579d 100644
--- a/ee/scripts/schema/db/init_dbs/postgresql/1.9.0/1.9.0.sql
+++ b/ee/scripts/schema/db/init_dbs/postgresql/1.9.0/1.9.0.sql
@@ -74,4 +74,16 @@ DROP INDEX IF EXISTS events_common.requests_url_idx;
DROP INDEX IF EXISTS events_common.requests_url_gin_idx;
DROP INDEX IF EXISTS events_common.requests_url_gin_idx2;
-COMMIT;
\ No newline at end of file
+DROP INDEX IF EXISTS events.resources_url_gin_idx;
+DROP INDEX IF EXISTS events.resources_url_idx;
+
+UPDATE metrics
+SET default_config=default_config || '{
+ "col": 4
+}'::jsonb
+WHERE NOT is_predefined
+ AND (metric_type = 'funnel' OR (metric_type = 'table' AND metric_of IN ('SESSIONS', 'js_exception')));
+
+COMMIT;
+
+CREATE INDEX CONCURRENTLY IF NOT EXISTS requests_session_id_status_code_nn_idx ON events_common.requests (session_id, status_code) WHERE status_code IS NOT NULL;
\ No newline at end of file
diff --git a/ee/scripts/schema/db/init_dbs/postgresql/init_schema.sql b/ee/scripts/schema/db/init_dbs/postgresql/init_schema.sql
index 17ff578d3..67e195b96 100644
--- a/ee/scripts/schema/db/init_dbs/postgresql/init_schema.sql
+++ b/ee/scripts/schema/db/init_dbs/postgresql/init_schema.sql
@@ -1229,6 +1229,7 @@ $$
CREATE INDEX IF NOT EXISTS requests_request_body_nn_gin_idx ON events_common.requests USING GIN (request_body gin_trgm_ops) WHERE request_body IS NOT NULL;
CREATE INDEX IF NOT EXISTS requests_response_body_nn_gin_idx ON events_common.requests USING GIN (response_body gin_trgm_ops) WHERE response_body IS NOT NULL;
CREATE INDEX IF NOT EXISTS requests_status_code_nn_idx ON events_common.requests (status_code) WHERE status_code IS NOT NULL;
+ CREATE INDEX IF NOT EXISTS requests_session_id_status_code_nn_idx ON events_common.requests (session_id, status_code) WHERE status_code IS NOT NULL;
CREATE INDEX IF NOT EXISTS requests_host_nn_idx ON events_common.requests (host) WHERE host IS NOT NULL;
CREATE INDEX IF NOT EXISTS requests_host_nn_gin_idx ON events_common.requests USING GIN (host gin_trgm_ops) WHERE host IS NOT NULL;
CREATE INDEX IF NOT EXISTS requests_path_nn_idx ON events_common.requests (path) WHERE path IS NOT NULL;
diff --git a/ee/utilities/package-lock.json b/ee/utilities/package-lock.json
index ace7a2109..ce7002fee 100644
--- a/ee/utilities/package-lock.json
+++ b/ee/utilities/package-lock.json
@@ -1,7 +1,7 @@
{
"name": "utilities-server",
"version": "1.0.0",
- "lockfileVersion": 2,
+ "lockfileVersion": 3,
"requires": true,
"packages": {
"": {
@@ -20,31 +20,30 @@
}
},
"node_modules/@maxmind/geoip2-node": {
- "version": "3.4.0",
- "resolved": "https://registry.npmjs.org/@maxmind/geoip2-node/-/geoip2-node-3.4.0.tgz",
- "integrity": "sha512-XBB+IJSXQRXXHBvwULZu2nOYAPuC0pc77xw/xkDo0cWkBO/L2rUMr+xKGZwj47mFBEiG6tnTMBzxJajEJTrKAg==",
+ "version": "3.5.0",
+ "resolved": "https://registry.npmjs.org/@maxmind/geoip2-node/-/geoip2-node-3.5.0.tgz",
+ "integrity": "sha512-WG2TNxMwDWDOrljLwyZf5bwiEYubaHuICvQRlgz74lE9OZA/z4o+ZT6OisjDBAZh/yRJVNK6mfHqmP5lLlAwsA==",
"dependencies": {
"camelcase-keys": "^7.0.0",
"ip6addr": "^0.2.5",
- "lodash.set": "^4.3.2",
"maxmind": "^4.2.0"
}
},
"node_modules/@redis/bloom": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/@redis/bloom/-/bloom-1.0.2.tgz",
- "integrity": "sha512-EBw7Ag1hPgFzdznK2PBblc1kdlj5B5Cw3XwI9/oG7tSn85/HKy3X9xHy/8tm/eNXJYHLXHJL/pkwBpFMVVefkw==",
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@redis/bloom/-/bloom-1.1.0.tgz",
+ "integrity": "sha512-9QovlxmpRtvxVbN0UBcv8WfdSMudNZZTFqCsnBszcQXqaZb/TVe30ScgGEO7u1EAIacTPAo7/oCYjYAxiHLanQ==",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/@redis/client": {
- "version": "1.2.0",
- "resolved": "https://registry.npmjs.org/@redis/client/-/client-1.2.0.tgz",
- "integrity": "sha512-a8Nlw5fv2EIAFJxTDSSDVUT7yfBGpZO96ybZXzQpgkyLg/dxtQ1uiwTc0EGfzg1mrPjZokeBSEGTbGXekqTNOg==",
+ "version": "1.4.2",
+ "resolved": "https://registry.npmjs.org/@redis/client/-/client-1.4.2.tgz",
+ "integrity": "sha512-oUdEjE0I7JS5AyaAjkD3aOXn9NhO7XKyPyXEyrgFDu++VrVBHUPnV6dgEya9TcMuj5nIJRuCzCm8ZP+c9zCHPw==",
"dependencies": {
- "cluster-key-slot": "1.1.0",
- "generic-pool": "3.8.2",
+ "cluster-key-slot": "1.1.1",
+ "generic-pool": "3.9.0",
"yallist": "4.0.0"
},
"engines": {
@@ -52,37 +51,42 @@
}
},
"node_modules/@redis/graph": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/@redis/graph/-/graph-1.0.1.tgz",
- "integrity": "sha512-oDE4myMCJOCVKYMygEMWuriBgqlS5FqdWerikMoJxzmmTUErnTRRgmIDa2VcgytACZMFqpAOWDzops4DOlnkfQ==",
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@redis/graph/-/graph-1.1.0.tgz",
+ "integrity": "sha512-16yZWngxyXPd+MJxeSr0dqh2AIOi8j9yXKcKCwVaKDbH3HTuETpDVPcLujhFYVPtYrngSco31BUcSa9TH31Gqg==",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/@redis/json": {
- "version": "1.0.3",
- "resolved": "https://registry.npmjs.org/@redis/json/-/json-1.0.3.tgz",
- "integrity": "sha512-4X0Qv0BzD9Zlb0edkUoau5c1bInWSICqXAGrpwEltkncUwcxJIGEcVryZhLgb0p/3PkKaLIWkjhHRtLe9yiA7Q==",
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/@redis/json/-/json-1.0.4.tgz",
+ "integrity": "sha512-LUZE2Gdrhg0Rx7AN+cZkb1e6HjoSKaeeW8rYnt89Tly13GBI5eP4CwDVr+MY8BAYfCg4/N15OUrtLoona9uSgw==",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/@redis/search": {
- "version": "1.0.6",
- "resolved": "https://registry.npmjs.org/@redis/search/-/search-1.0.6.tgz",
- "integrity": "sha512-pP+ZQRis5P21SD6fjyCeLcQdps+LuTzp2wdUbzxEmNhleighDDTD5ck8+cYof+WLec4csZX7ks+BuoMw0RaZrA==",
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@redis/search/-/search-1.1.0.tgz",
+ "integrity": "sha512-NyFZEVnxIJEybpy+YskjgOJRNsfTYqaPbK/Buv6W2kmFNaRk85JiqjJZA5QkRmWvGbyQYwoO5QfDi2wHskKrQQ==",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/@redis/time-series": {
- "version": "1.0.3",
- "resolved": "https://registry.npmjs.org/@redis/time-series/-/time-series-1.0.3.tgz",
- "integrity": "sha512-OFp0q4SGrTH0Mruf6oFsHGea58u8vS/iI5+NpYdicaM+7BgqBZH8FFvNZ8rYYLrUO/QRqMq72NpXmxLVNcdmjA==",
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/@redis/time-series/-/time-series-1.0.4.tgz",
+ "integrity": "sha512-ThUIgo2U/g7cCuZavucQTQzA9g9JbDDY2f64u3AbAoz/8vE2lt2U37LamDUVChhaDA3IRT9R6VvJwqnUfTJzng==",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
+ "node_modules/@socket.io/component-emitter": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/@socket.io/component-emitter/-/component-emitter-3.1.0.tgz",
+ "integrity": "sha512-+9jVqKhRSpsc591z5vX+X5Yyw+he/HCB4iQ/RYxw35CEPaY1gnsNE43nf9n9AaYjAQrTiI/mOwKUKdUs9vf7Xg=="
+ },
"node_modules/@socket.io/redis-adapter": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/@socket.io/redis-adapter/-/redis-adapter-7.2.0.tgz",
@@ -97,11 +101,6 @@
"node": ">=10.0.0"
}
},
- "node_modules/@types/component-emitter": {
- "version": "1.2.11",
- "resolved": "https://registry.npmjs.org/@types/component-emitter/-/component-emitter-1.2.11.tgz",
- "integrity": "sha512-SRXjM+tfsSlA9VuG8hGO2nft2p8zjXCK1VcC6N4NXbBbYbSia9kzCChYQajIjzIqOOOuh5Ock6MmV2oux4jDZQ=="
- },
"node_modules/@types/cookie": {
"version": "0.4.1",
"resolved": "https://registry.npmjs.org/@types/cookie/-/cookie-0.4.1.tgz",
@@ -113,9 +112,9 @@
"integrity": "sha512-vt+kDhq/M2ayberEtJcIN/hxXy1Pk+59g2FV/ZQceeaTyCtCucjL2Q7FXlFjtWn4n15KCr1NE2lNNFhp0lEThw=="
},
"node_modules/@types/node": {
- "version": "18.6.1",
- "resolved": "https://registry.npmjs.org/@types/node/-/node-18.6.1.tgz",
- "integrity": "sha512-z+2vB6yDt1fNwKOeGbckpmirO+VBDuQqecXkgeIqDlaOtmKn6hPR/viQ8cxCfqLU4fTlvM3+YjM367TukWdxpg=="
+ "version": "18.11.9",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-18.11.9.tgz",
+ "integrity": "sha512-CRpX21/kGdzjOpFsZSkcrXMGIBWMGNIHXXBVFSH+ggkftxg+XYP20TESbh+zFvFj3EQOl5byk0HTRn1IL6hbqg=="
},
"node_modules/accepts": {
"version": "1.3.8",
@@ -151,9 +150,9 @@
}
},
"node_modules/body-parser": {
- "version": "1.20.0",
- "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.0.tgz",
- "integrity": "sha512-DfJ+q6EPcGKZD1QWUjSpqp+Q7bDQTsQIF4zfUAtZ6qk+H/3/QRhg9CEp39ss+/T2vw0+HaidC0ecJj/DRLIaKg==",
+ "version": "1.20.1",
+ "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.1.tgz",
+ "integrity": "sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw==",
"dependencies": {
"bytes": "3.1.2",
"content-type": "~1.0.4",
@@ -163,7 +162,7 @@
"http-errors": "2.0.0",
"iconv-lite": "0.4.24",
"on-finished": "2.4.1",
- "qs": "6.10.3",
+ "qs": "6.11.0",
"raw-body": "2.5.1",
"type-is": "~1.6.18",
"unpipe": "1.0.0"
@@ -240,18 +239,13 @@
}
},
"node_modules/cluster-key-slot": {
- "version": "1.1.0",
- "resolved": "https://registry.npmjs.org/cluster-key-slot/-/cluster-key-slot-1.1.0.tgz",
- "integrity": "sha512-2Nii8p3RwAPiFwsnZvukotvow2rIHM+yQ6ZcBXGHdniadkYGZYiGmkHJIbZPIV9nfv7m/U1IPMVVcAhoWFeklw==",
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/cluster-key-slot/-/cluster-key-slot-1.1.1.tgz",
+ "integrity": "sha512-rwHwUfXL40Chm1r08yrhU3qpUvdVlgkKNeyeGPOxnW8/SyVDvgRaed/Uz54AqWNaTCAThlj6QAs3TZcKI0xDEw==",
"engines": {
"node": ">=0.10.0"
}
},
- "node_modules/component-emitter": {
- "version": "1.3.0",
- "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.3.0.tgz",
- "integrity": "sha512-Rd3se6QB+sO1TwqZjscQrurpEPIfO0/yYnSin6Q/rD3mOutHvUrCAhJub3r90uNb+SESBuE0QYoB90YdfatsRg=="
- },
"node_modules/content-disposition": {
"version": "0.5.4",
"resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz",
@@ -356,9 +350,9 @@
}
},
"node_modules/engine.io": {
- "version": "6.2.0",
- "resolved": "https://registry.npmjs.org/engine.io/-/engine.io-6.2.0.tgz",
- "integrity": "sha512-4KzwW3F3bk+KlzSOY57fj/Jx6LyRQ1nbcyIadehl+AnXjKT7gDO0ORdRi/84ixvMKTym6ZKuxvbzN62HDDU1Lg==",
+ "version": "6.2.1",
+ "resolved": "https://registry.npmjs.org/engine.io/-/engine.io-6.2.1.tgz",
+ "integrity": "sha512-ECceEFcAaNRybd3lsGQKas3ZlMVjN3cyWwMP25D2i0zWfyiytVbTpRPa34qrr+FHddtpBVOmq4H/DCv1O0lZRA==",
"dependencies": {
"@types/cookie": "^0.4.1",
"@types/cors": "^2.8.12",
@@ -405,13 +399,13 @@
}
},
"node_modules/express": {
- "version": "4.18.1",
- "resolved": "https://registry.npmjs.org/express/-/express-4.18.1.tgz",
- "integrity": "sha512-zZBcOX9TfehHQhtupq57OF8lFZ3UZi08Y97dwFCkD8p9d/d2Y3M+ykKcwaMDEL+4qyUolgBDX6AblpR3fL212Q==",
+ "version": "4.18.2",
+ "resolved": "https://registry.npmjs.org/express/-/express-4.18.2.tgz",
+ "integrity": "sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ==",
"dependencies": {
"accepts": "~1.3.8",
"array-flatten": "1.1.1",
- "body-parser": "1.20.0",
+ "body-parser": "1.20.1",
"content-disposition": "0.5.4",
"content-type": "~1.0.4",
"cookie": "0.5.0",
@@ -430,7 +424,7 @@
"parseurl": "~1.3.3",
"path-to-regexp": "0.1.7",
"proxy-addr": "~2.0.7",
- "qs": "6.10.3",
+ "qs": "6.11.0",
"range-parser": "~1.2.1",
"safe-buffer": "5.2.1",
"send": "0.18.0",
@@ -518,17 +512,17 @@
"integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A=="
},
"node_modules/generic-pool": {
- "version": "3.8.2",
- "resolved": "https://registry.npmjs.org/generic-pool/-/generic-pool-3.8.2.tgz",
- "integrity": "sha512-nGToKy6p3PAbYQ7p1UlWl6vSPwfwU6TMSWK7TTu+WUY4ZjyZQGniGGt2oNVvyNSpyZYSB43zMXVLcBm08MTMkg==",
+ "version": "3.9.0",
+ "resolved": "https://registry.npmjs.org/generic-pool/-/generic-pool-3.9.0.tgz",
+ "integrity": "sha512-hymDOu5B53XvN4QT9dBmZxPX4CWhBPPLguTZ9MMFeFa/Kg0xWVfylOVNlJji/E7yTZWFd/q9GO5TxDLq156D7g==",
"engines": {
"node": ">= 4"
}
},
"node_modules/get-intrinsic": {
- "version": "1.1.2",
- "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.2.tgz",
- "integrity": "sha512-Jfm3OyCxHh9DJyc28qGk+JmfkpO41A4XkneDSujN9MDXrm4oDKdHvndhZ2dN94+ERNfkYJWDclW6k2L/ZGHjXA==",
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.3.tgz",
+ "integrity": "sha512-QJVz1Tj7MS099PevUG5jvnt9tSkXN8K14dxQlikJuPt4uD9hHAHjLyLBiLR5zELelBdD9QNRAXZzsJx0WaDL9A==",
"dependencies": {
"function-bind": "^1.1.1",
"has": "^1.0.3",
@@ -702,11 +696,6 @@
"resolved": "https://registry.npmjs.org/lodash.once/-/lodash.once-4.1.1.tgz",
"integrity": "sha512-Sb487aTOCr9drQVL8pIxOzVhafOjZN9UU54hiN8PU3uAiSV7lx1yYNpbNmex2PK6dSJoNTSJUUswT651yww3Mg=="
},
- "node_modules/lodash.set": {
- "version": "4.3.2",
- "resolved": "https://registry.npmjs.org/lodash.set/-/lodash.set-4.3.2.tgz",
- "integrity": "sha512-4hNPN5jlm/N/HLMCO43v8BXKq9Z7QdAGc/VGrRD61w8gN9g/6jF9A4L1pbUgBLCffi0w9VsXfTOij5x8iTyFvg=="
- },
"node_modules/map-obj": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/map-obj/-/map-obj-4.3.0.tgz",
@@ -719,15 +708,15 @@
}
},
"node_modules/maxmind": {
- "version": "4.3.6",
- "resolved": "https://registry.npmjs.org/maxmind/-/maxmind-4.3.6.tgz",
- "integrity": "sha512-CwnEZqJX0T6b2rWrc0/V3n9hL/hWAMEn7fY09077YJUHiHx7cn/esA2ZIz8BpYLSJUf7cGVel0oUJa9jMwyQpg==",
+ "version": "4.3.8",
+ "resolved": "https://registry.npmjs.org/maxmind/-/maxmind-4.3.8.tgz",
+ "integrity": "sha512-HrfxEu5yPBPtTy/OT+W5bPQwEfLUX0EHqe2EbJiB47xQMumHqXvSP7PAwzV8Z++NRCmQwy4moQrTSt0+dH+Jmg==",
"dependencies": {
"mmdb-lib": "2.0.2",
- "tiny-lru": "8.0.2"
+ "tiny-lru": "9.0.3"
},
"engines": {
- "node": ">=10",
+ "node": ">=12",
"npm": ">=6"
}
},
@@ -862,9 +851,9 @@
}
},
"node_modules/qs": {
- "version": "6.10.3",
- "resolved": "https://registry.npmjs.org/qs/-/qs-6.10.3.tgz",
- "integrity": "sha512-wr7M2E0OFRfIfJZjKGieI8lBKb7fRCH4Fv5KNPEs7gJ8jadvotdsS08PzOKR7opXhZ/Xkjtt3WF9g38drmyRqQ==",
+ "version": "6.11.0",
+ "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz",
+ "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==",
"dependencies": {
"side-channel": "^1.0.4"
},
@@ -909,16 +898,16 @@
}
},
"node_modules/redis": {
- "version": "4.2.0",
- "resolved": "https://registry.npmjs.org/redis/-/redis-4.2.0.tgz",
- "integrity": "sha512-bCR0gKVhIXFg8zCQjXEANzgI01DDixtPZgIUZHBCmwqixnu+MK3Tb2yqGjh+HCLASQVVgApiwhNkv+FoedZOGQ==",
+ "version": "4.5.1",
+ "resolved": "https://registry.npmjs.org/redis/-/redis-4.5.1.tgz",
+ "integrity": "sha512-oxXSoIqMJCQVBTfxP6BNTCtDMyh9G6Vi5wjdPdV/sRKkufyZslDqCScSGcOr6XGR/reAWZefz7E4leM31RgdBA==",
"dependencies": {
- "@redis/bloom": "1.0.2",
- "@redis/client": "1.2.0",
- "@redis/graph": "1.0.1",
- "@redis/json": "1.0.3",
- "@redis/search": "1.0.6",
- "@redis/time-series": "1.0.3"
+ "@redis/bloom": "1.1.0",
+ "@redis/client": "1.4.2",
+ "@redis/graph": "1.1.0",
+ "@redis/json": "1.0.4",
+ "@redis/search": "1.1.0",
+ "@redis/time-series": "1.0.4"
}
},
"node_modules/safe-buffer": {
@@ -1027,16 +1016,16 @@
}
},
"node_modules/socket.io": {
- "version": "4.5.1",
- "resolved": "https://registry.npmjs.org/socket.io/-/socket.io-4.5.1.tgz",
- "integrity": "sha512-0y9pnIso5a9i+lJmsCdtmTTgJFFSvNQKDnPQRz28mGNnxbmqYg2QPtJTLFxhymFZhAIn50eHAKzJeiNaKr+yUQ==",
+ "version": "4.5.4",
+ "resolved": "https://registry.npmjs.org/socket.io/-/socket.io-4.5.4.tgz",
+ "integrity": "sha512-m3GC94iK9MfIEeIBfbhJs5BqFibMtkRk8ZpKwG2QwxV0m/eEhPIV4ara6XCF1LWNAus7z58RodiZlAH71U3EhQ==",
"dependencies": {
"accepts": "~1.3.4",
"base64id": "~2.0.0",
"debug": "~4.3.2",
- "engine.io": "~6.2.0",
+ "engine.io": "~6.2.1",
"socket.io-adapter": "~2.4.0",
- "socket.io-parser": "~4.0.4"
+ "socket.io-parser": "~4.2.1"
},
"engines": {
"node": ">=10.0.0"
@@ -1048,12 +1037,11 @@
"integrity": "sha512-W4N+o69rkMEGVuk2D/cvca3uYsvGlMwsySWV447y99gUPghxq42BxqLNMndb+a1mm/5/7NeXVQS7RLa2XyXvYg=="
},
"node_modules/socket.io-parser": {
- "version": "4.0.5",
- "resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-4.0.5.tgz",
- "integrity": "sha512-sNjbT9dX63nqUFIOv95tTVm6elyIU4RvB1m8dOeZt+IgWwcWklFDOdmGcfo3zSiRsnR/3pJkjY5lfoGqEe4Eig==",
+ "version": "4.2.1",
+ "resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-4.2.1.tgz",
+ "integrity": "sha512-V4GrkLy+HeF1F/en3SpUaM+7XxYXpuMUWLGde1kSSh5nQMN4hLrbPIkD+otwh6q9R6NOQBN4AMaOZ2zVjui82g==",
"dependencies": {
- "@types/component-emitter": "^1.2.10",
- "component-emitter": "~1.3.0",
+ "@socket.io/component-emitter": "~3.1.0",
"debug": "~4.3.1"
},
"engines": {
@@ -1069,9 +1057,9 @@
}
},
"node_modules/tiny-lru": {
- "version": "8.0.2",
- "resolved": "https://registry.npmjs.org/tiny-lru/-/tiny-lru-8.0.2.tgz",
- "integrity": "sha512-ApGvZ6vVvTNdsmt676grvCkUCGwzG9IqXma5Z07xJgiC5L7akUMof5U8G2JTI9Rz/ovtVhJBlY6mNhEvtjzOIg==",
+ "version": "9.0.3",
+ "resolved": "https://registry.npmjs.org/tiny-lru/-/tiny-lru-9.0.3.tgz",
+ "integrity": "sha512-/i9GruRjXsnDgehxvy6iZ4AFNVxngEFbwzirhdulomMNPGPVV3ECMZOWSw0w4sRMZ9Al9m4jy08GPvRxRUGYlw==",
"engines": {
"node": ">=6"
}
@@ -1108,9 +1096,9 @@
}
},
"node_modules/ua-parser-js": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-1.0.2.tgz",
- "integrity": "sha512-00y/AXhx0/SsnI51fTc0rLRmafiGOM4/O+ny10Ps7f+j/b8p/ZY11ytMgznXkOVo4GQ+KwQG5UQLkLGirsACRg==",
+ "version": "1.0.32",
+ "resolved": "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-1.0.32.tgz",
+ "integrity": "sha512-dXVsz3M4j+5tTiovFVyVqssXBu5HM47//YSOeZ9fQkdDKkfzv2v3PP1jmH6FUyPW+yCSn7aBVK1fGGKNhowdDA==",
"funding": [
{
"type": "opencollective",
@@ -1196,901 +1184,5 @@
"resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
"integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A=="
}
- },
- "dependencies": {
- "@maxmind/geoip2-node": {
- "version": "3.4.0",
- "resolved": "https://registry.npmjs.org/@maxmind/geoip2-node/-/geoip2-node-3.4.0.tgz",
- "integrity": "sha512-XBB+IJSXQRXXHBvwULZu2nOYAPuC0pc77xw/xkDo0cWkBO/L2rUMr+xKGZwj47mFBEiG6tnTMBzxJajEJTrKAg==",
- "requires": {
- "camelcase-keys": "^7.0.0",
- "ip6addr": "^0.2.5",
- "lodash.set": "^4.3.2",
- "maxmind": "^4.2.0"
- }
- },
- "@redis/bloom": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/@redis/bloom/-/bloom-1.0.2.tgz",
- "integrity": "sha512-EBw7Ag1hPgFzdznK2PBblc1kdlj5B5Cw3XwI9/oG7tSn85/HKy3X9xHy/8tm/eNXJYHLXHJL/pkwBpFMVVefkw==",
- "requires": {}
- },
- "@redis/client": {
- "version": "1.2.0",
- "resolved": "https://registry.npmjs.org/@redis/client/-/client-1.2.0.tgz",
- "integrity": "sha512-a8Nlw5fv2EIAFJxTDSSDVUT7yfBGpZO96ybZXzQpgkyLg/dxtQ1uiwTc0EGfzg1mrPjZokeBSEGTbGXekqTNOg==",
- "requires": {
- "cluster-key-slot": "1.1.0",
- "generic-pool": "3.8.2",
- "yallist": "4.0.0"
- }
- },
- "@redis/graph": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/@redis/graph/-/graph-1.0.1.tgz",
- "integrity": "sha512-oDE4myMCJOCVKYMygEMWuriBgqlS5FqdWerikMoJxzmmTUErnTRRgmIDa2VcgytACZMFqpAOWDzops4DOlnkfQ==",
- "requires": {}
- },
- "@redis/json": {
- "version": "1.0.3",
- "resolved": "https://registry.npmjs.org/@redis/json/-/json-1.0.3.tgz",
- "integrity": "sha512-4X0Qv0BzD9Zlb0edkUoau5c1bInWSICqXAGrpwEltkncUwcxJIGEcVryZhLgb0p/3PkKaLIWkjhHRtLe9yiA7Q==",
- "requires": {}
- },
- "@redis/search": {
- "version": "1.0.6",
- "resolved": "https://registry.npmjs.org/@redis/search/-/search-1.0.6.tgz",
- "integrity": "sha512-pP+ZQRis5P21SD6fjyCeLcQdps+LuTzp2wdUbzxEmNhleighDDTD5ck8+cYof+WLec4csZX7ks+BuoMw0RaZrA==",
- "requires": {}
- },
- "@redis/time-series": {
- "version": "1.0.3",
- "resolved": "https://registry.npmjs.org/@redis/time-series/-/time-series-1.0.3.tgz",
- "integrity": "sha512-OFp0q4SGrTH0Mruf6oFsHGea58u8vS/iI5+NpYdicaM+7BgqBZH8FFvNZ8rYYLrUO/QRqMq72NpXmxLVNcdmjA==",
- "requires": {}
- },
- "@socket.io/redis-adapter": {
- "version": "7.2.0",
- "resolved": "https://registry.npmjs.org/@socket.io/redis-adapter/-/redis-adapter-7.2.0.tgz",
- "integrity": "sha512-/r6oF6Myz0K9uatB/pfCi0BhKg/KRMh1OokrqcjlNz6aq40WiXdFLRbHJQuwGHq/KvB+D6141K+IynbVxZGvhw==",
- "requires": {
- "debug": "~4.3.1",
- "notepack.io": "~2.2.0",
- "socket.io-adapter": "^2.4.0",
- "uid2": "0.0.3"
- }
- },
- "@types/component-emitter": {
- "version": "1.2.11",
- "resolved": "https://registry.npmjs.org/@types/component-emitter/-/component-emitter-1.2.11.tgz",
- "integrity": "sha512-SRXjM+tfsSlA9VuG8hGO2nft2p8zjXCK1VcC6N4NXbBbYbSia9kzCChYQajIjzIqOOOuh5Ock6MmV2oux4jDZQ=="
- },
- "@types/cookie": {
- "version": "0.4.1",
- "resolved": "https://registry.npmjs.org/@types/cookie/-/cookie-0.4.1.tgz",
- "integrity": "sha512-XW/Aa8APYr6jSVVA1y/DEIZX0/GMKLEVekNG727R8cs56ahETkRAy/3DR7+fJyh7oUgGwNQaRfXCun0+KbWY7Q=="
- },
- "@types/cors": {
- "version": "2.8.12",
- "resolved": "https://registry.npmjs.org/@types/cors/-/cors-2.8.12.tgz",
- "integrity": "sha512-vt+kDhq/M2ayberEtJcIN/hxXy1Pk+59g2FV/ZQceeaTyCtCucjL2Q7FXlFjtWn4n15KCr1NE2lNNFhp0lEThw=="
- },
- "@types/node": {
- "version": "18.6.1",
- "resolved": "https://registry.npmjs.org/@types/node/-/node-18.6.1.tgz",
- "integrity": "sha512-z+2vB6yDt1fNwKOeGbckpmirO+VBDuQqecXkgeIqDlaOtmKn6hPR/viQ8cxCfqLU4fTlvM3+YjM367TukWdxpg=="
- },
- "accepts": {
- "version": "1.3.8",
- "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz",
- "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==",
- "requires": {
- "mime-types": "~2.1.34",
- "negotiator": "0.6.3"
- }
- },
- "array-flatten": {
- "version": "1.1.1",
- "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz",
- "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg=="
- },
- "assert-plus": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz",
- "integrity": "sha512-NfJ4UzBCcQGLDlQq7nHxH+tv3kyZ0hHQqF5BO6J7tNJeP5do1llPr8dZ8zHonfhAu0PHAdMkSo+8o0wxg9lZWw=="
- },
- "base64id": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/base64id/-/base64id-2.0.0.tgz",
- "integrity": "sha512-lGe34o6EHj9y3Kts9R4ZYs/Gr+6N7MCaMlIFA3F1R2O5/m7K06AxfSeO5530PEERE6/WyEg3lsuyw4GHlPZHog=="
- },
- "body-parser": {
- "version": "1.20.0",
- "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.0.tgz",
- "integrity": "sha512-DfJ+q6EPcGKZD1QWUjSpqp+Q7bDQTsQIF4zfUAtZ6qk+H/3/QRhg9CEp39ss+/T2vw0+HaidC0ecJj/DRLIaKg==",
- "requires": {
- "bytes": "3.1.2",
- "content-type": "~1.0.4",
- "debug": "2.6.9",
- "depd": "2.0.0",
- "destroy": "1.2.0",
- "http-errors": "2.0.0",
- "iconv-lite": "0.4.24",
- "on-finished": "2.4.1",
- "qs": "6.10.3",
- "raw-body": "2.5.1",
- "type-is": "~1.6.18",
- "unpipe": "1.0.0"
- },
- "dependencies": {
- "debug": {
- "version": "2.6.9",
- "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
- "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
- "requires": {
- "ms": "2.0.0"
- }
- },
- "ms": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
- "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="
- }
- }
- },
- "buffer-equal-constant-time": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz",
- "integrity": "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA=="
- },
- "bytes": {
- "version": "3.1.2",
- "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz",
- "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg=="
- },
- "call-bind": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz",
- "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==",
- "requires": {
- "function-bind": "^1.1.1",
- "get-intrinsic": "^1.0.2"
- }
- },
- "camelcase": {
- "version": "6.3.0",
- "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz",
- "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA=="
- },
- "camelcase-keys": {
- "version": "7.0.2",
- "resolved": "https://registry.npmjs.org/camelcase-keys/-/camelcase-keys-7.0.2.tgz",
- "integrity": "sha512-Rjs1H+A9R+Ig+4E/9oyB66UC5Mj9Xq3N//vcLf2WzgdTi/3gUu3Z9KoqmlrEG4VuuLK8wJHofxzdQXz/knhiYg==",
- "requires": {
- "camelcase": "^6.3.0",
- "map-obj": "^4.1.0",
- "quick-lru": "^5.1.1",
- "type-fest": "^1.2.1"
- }
- },
- "cluster-key-slot": {
- "version": "1.1.0",
- "resolved": "https://registry.npmjs.org/cluster-key-slot/-/cluster-key-slot-1.1.0.tgz",
- "integrity": "sha512-2Nii8p3RwAPiFwsnZvukotvow2rIHM+yQ6ZcBXGHdniadkYGZYiGmkHJIbZPIV9nfv7m/U1IPMVVcAhoWFeklw=="
- },
- "component-emitter": {
- "version": "1.3.0",
- "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.3.0.tgz",
- "integrity": "sha512-Rd3se6QB+sO1TwqZjscQrurpEPIfO0/yYnSin6Q/rD3mOutHvUrCAhJub3r90uNb+SESBuE0QYoB90YdfatsRg=="
- },
- "content-disposition": {
- "version": "0.5.4",
- "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz",
- "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==",
- "requires": {
- "safe-buffer": "5.2.1"
- }
- },
- "content-type": {
- "version": "1.0.4",
- "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz",
- "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA=="
- },
- "cookie": {
- "version": "0.5.0",
- "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.5.0.tgz",
- "integrity": "sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw=="
- },
- "cookie-signature": {
- "version": "1.0.6",
- "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz",
- "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ=="
- },
- "core-util-is": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz",
- "integrity": "sha512-3lqz5YjWTYnW6dlDa5TLaTCcShfar1e40rmcJVwCBJC6mWlFuj0eCHIElmG1g5kyuJ/GD+8Wn4FFCcz4gJPfaQ=="
- },
- "cors": {
- "version": "2.8.5",
- "resolved": "https://registry.npmjs.org/cors/-/cors-2.8.5.tgz",
- "integrity": "sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g==",
- "requires": {
- "object-assign": "^4",
- "vary": "^1"
- }
- },
- "debug": {
- "version": "4.3.4",
- "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz",
- "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==",
- "requires": {
- "ms": "2.1.2"
- }
- },
- "depd": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz",
- "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw=="
- },
- "destroy": {
- "version": "1.2.0",
- "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz",
- "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg=="
- },
- "ecdsa-sig-formatter": {
- "version": "1.0.11",
- "resolved": "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz",
- "integrity": "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==",
- "requires": {
- "safe-buffer": "^5.0.1"
- }
- },
- "ee-first": {
- "version": "1.1.1",
- "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
- "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow=="
- },
- "encodeurl": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz",
- "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w=="
- },
- "engine.io": {
- "version": "6.2.0",
- "resolved": "https://registry.npmjs.org/engine.io/-/engine.io-6.2.0.tgz",
- "integrity": "sha512-4KzwW3F3bk+KlzSOY57fj/Jx6LyRQ1nbcyIadehl+AnXjKT7gDO0ORdRi/84ixvMKTym6ZKuxvbzN62HDDU1Lg==",
- "requires": {
- "@types/cookie": "^0.4.1",
- "@types/cors": "^2.8.12",
- "@types/node": ">=10.0.0",
- "accepts": "~1.3.4",
- "base64id": "2.0.0",
- "cookie": "~0.4.1",
- "cors": "~2.8.5",
- "debug": "~4.3.1",
- "engine.io-parser": "~5.0.3",
- "ws": "~8.2.3"
- },
- "dependencies": {
- "cookie": {
- "version": "0.4.2",
- "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.4.2.tgz",
- "integrity": "sha512-aSWTXFzaKWkvHO1Ny/s+ePFpvKsPnjc551iI41v3ny/ow6tBG5Vd+FuqGNhh1LxOmVzOlGUriIlOaokOvhaStA=="
- }
- }
- },
- "engine.io-parser": {
- "version": "5.0.4",
- "resolved": "https://registry.npmjs.org/engine.io-parser/-/engine.io-parser-5.0.4.tgz",
- "integrity": "sha512-+nVFp+5z1E3HcToEnO7ZIj3g+3k9389DvWtvJZz0T6/eOCPIyyxehFcedoYrZQrp0LgQbD9pPXhpMBKMd5QURg=="
- },
- "escape-html": {
- "version": "1.0.3",
- "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz",
- "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow=="
- },
- "etag": {
- "version": "1.8.1",
- "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz",
- "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg=="
- },
- "express": {
- "version": "4.18.1",
- "resolved": "https://registry.npmjs.org/express/-/express-4.18.1.tgz",
- "integrity": "sha512-zZBcOX9TfehHQhtupq57OF8lFZ3UZi08Y97dwFCkD8p9d/d2Y3M+ykKcwaMDEL+4qyUolgBDX6AblpR3fL212Q==",
- "requires": {
- "accepts": "~1.3.8",
- "array-flatten": "1.1.1",
- "body-parser": "1.20.0",
- "content-disposition": "0.5.4",
- "content-type": "~1.0.4",
- "cookie": "0.5.0",
- "cookie-signature": "1.0.6",
- "debug": "2.6.9",
- "depd": "2.0.0",
- "encodeurl": "~1.0.2",
- "escape-html": "~1.0.3",
- "etag": "~1.8.1",
- "finalhandler": "1.2.0",
- "fresh": "0.5.2",
- "http-errors": "2.0.0",
- "merge-descriptors": "1.0.1",
- "methods": "~1.1.2",
- "on-finished": "2.4.1",
- "parseurl": "~1.3.3",
- "path-to-regexp": "0.1.7",
- "proxy-addr": "~2.0.7",
- "qs": "6.10.3",
- "range-parser": "~1.2.1",
- "safe-buffer": "5.2.1",
- "send": "0.18.0",
- "serve-static": "1.15.0",
- "setprototypeof": "1.2.0",
- "statuses": "2.0.1",
- "type-is": "~1.6.18",
- "utils-merge": "1.0.1",
- "vary": "~1.1.2"
- },
- "dependencies": {
- "debug": {
- "version": "2.6.9",
- "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
- "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
- "requires": {
- "ms": "2.0.0"
- }
- },
- "ms": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
- "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="
- }
- }
- },
- "extsprintf": {
- "version": "1.3.0",
- "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz",
- "integrity": "sha512-11Ndz7Nv+mvAC1j0ktTa7fAb0vLyGGX+rMHNBYQviQDGU0Hw7lhctJANqbPhu9nV9/izT/IntTgZ7Im/9LJs9g=="
- },
- "finalhandler": {
- "version": "1.2.0",
- "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.2.0.tgz",
- "integrity": "sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==",
- "requires": {
- "debug": "2.6.9",
- "encodeurl": "~1.0.2",
- "escape-html": "~1.0.3",
- "on-finished": "2.4.1",
- "parseurl": "~1.3.3",
- "statuses": "2.0.1",
- "unpipe": "~1.0.0"
- },
- "dependencies": {
- "debug": {
- "version": "2.6.9",
- "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
- "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
- "requires": {
- "ms": "2.0.0"
- }
- },
- "ms": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
- "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="
- }
- }
- },
- "forwarded": {
- "version": "0.2.0",
- "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz",
- "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow=="
- },
- "fresh": {
- "version": "0.5.2",
- "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz",
- "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q=="
- },
- "function-bind": {
- "version": "1.1.1",
- "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz",
- "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A=="
- },
- "generic-pool": {
- "version": "3.8.2",
- "resolved": "https://registry.npmjs.org/generic-pool/-/generic-pool-3.8.2.tgz",
- "integrity": "sha512-nGToKy6p3PAbYQ7p1UlWl6vSPwfwU6TMSWK7TTu+WUY4ZjyZQGniGGt2oNVvyNSpyZYSB43zMXVLcBm08MTMkg=="
- },
- "get-intrinsic": {
- "version": "1.1.2",
- "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.2.tgz",
- "integrity": "sha512-Jfm3OyCxHh9DJyc28qGk+JmfkpO41A4XkneDSujN9MDXrm4oDKdHvndhZ2dN94+ERNfkYJWDclW6k2L/ZGHjXA==",
- "requires": {
- "function-bind": "^1.1.1",
- "has": "^1.0.3",
- "has-symbols": "^1.0.3"
- }
- },
- "has": {
- "version": "1.0.3",
- "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz",
- "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==",
- "requires": {
- "function-bind": "^1.1.1"
- }
- },
- "has-symbols": {
- "version": "1.0.3",
- "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz",
- "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A=="
- },
- "http-errors": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz",
- "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==",
- "requires": {
- "depd": "2.0.0",
- "inherits": "2.0.4",
- "setprototypeof": "1.2.0",
- "statuses": "2.0.1",
- "toidentifier": "1.0.1"
- }
- },
- "iconv-lite": {
- "version": "0.4.24",
- "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
- "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
- "requires": {
- "safer-buffer": ">= 2.1.2 < 3"
- }
- },
- "inherits": {
- "version": "2.0.4",
- "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
- "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
- },
- "ip6addr": {
- "version": "0.2.5",
- "resolved": "https://registry.npmjs.org/ip6addr/-/ip6addr-0.2.5.tgz",
- "integrity": "sha512-9RGGSB6Zc9Ox5DpDGFnJdIeF0AsqXzdH+FspCfPPaU/L/4tI6P+5lIoFUFm9JXs9IrJv1boqAaNCQmoDADTSKQ==",
- "requires": {
- "assert-plus": "^1.0.0",
- "jsprim": "^2.0.2"
- }
- },
- "ipaddr.js": {
- "version": "1.9.1",
- "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz",
- "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g=="
- },
- "json-schema": {
- "version": "0.4.0",
- "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.4.0.tgz",
- "integrity": "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA=="
- },
- "jsonwebtoken": {
- "version": "8.5.1",
- "resolved": "https://registry.npmjs.org/jsonwebtoken/-/jsonwebtoken-8.5.1.tgz",
- "integrity": "sha512-XjwVfRS6jTMsqYs0EsuJ4LGxXV14zQybNd4L2r0UvbVnSF9Af8x7p5MzbJ90Ioz/9TI41/hTCvznF/loiSzn8w==",
- "requires": {
- "jws": "^3.2.2",
- "lodash.includes": "^4.3.0",
- "lodash.isboolean": "^3.0.3",
- "lodash.isinteger": "^4.0.4",
- "lodash.isnumber": "^3.0.3",
- "lodash.isplainobject": "^4.0.6",
- "lodash.isstring": "^4.0.1",
- "lodash.once": "^4.0.0",
- "ms": "^2.1.1",
- "semver": "^5.6.0"
- }
- },
- "jsprim": {
- "version": "2.0.2",
- "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-2.0.2.tgz",
- "integrity": "sha512-gqXddjPqQ6G40VdnI6T6yObEC+pDNvyP95wdQhkWkg7crHH3km5qP1FsOXEkzEQwnz6gz5qGTn1c2Y52wP3OyQ==",
- "requires": {
- "assert-plus": "1.0.0",
- "extsprintf": "1.3.0",
- "json-schema": "0.4.0",
- "verror": "1.10.0"
- }
- },
- "jwa": {
- "version": "1.4.1",
- "resolved": "https://registry.npmjs.org/jwa/-/jwa-1.4.1.tgz",
- "integrity": "sha512-qiLX/xhEEFKUAJ6FiBMbes3w9ATzyk5W7Hvzpa/SLYdxNtng+gcurvrI7TbACjIXlsJyr05/S1oUhZrc63evQA==",
- "requires": {
- "buffer-equal-constant-time": "1.0.1",
- "ecdsa-sig-formatter": "1.0.11",
- "safe-buffer": "^5.0.1"
- }
- },
- "jws": {
- "version": "3.2.2",
- "resolved": "https://registry.npmjs.org/jws/-/jws-3.2.2.tgz",
- "integrity": "sha512-YHlZCB6lMTllWDtSPHz/ZXTsi8S00usEV6v1tjq8tOUZzw7DpSDWVXjXDre6ed1w/pd495ODpHZYSdkRTsa0HA==",
- "requires": {
- "jwa": "^1.4.1",
- "safe-buffer": "^5.0.1"
- }
- },
- "lodash.includes": {
- "version": "4.3.0",
- "resolved": "https://registry.npmjs.org/lodash.includes/-/lodash.includes-4.3.0.tgz",
- "integrity": "sha512-W3Bx6mdkRTGtlJISOvVD/lbqjTlPPUDTMnlXZFnVwi9NKJ6tiAk6LVdlhZMm17VZisqhKcgzpO5Wz91PCt5b0w=="
- },
- "lodash.isboolean": {
- "version": "3.0.3",
- "resolved": "https://registry.npmjs.org/lodash.isboolean/-/lodash.isboolean-3.0.3.tgz",
- "integrity": "sha512-Bz5mupy2SVbPHURB98VAcw+aHh4vRV5IPNhILUCsOzRmsTmSQ17jIuqopAentWoehktxGd9e/hbIXq980/1QJg=="
- },
- "lodash.isinteger": {
- "version": "4.0.4",
- "resolved": "https://registry.npmjs.org/lodash.isinteger/-/lodash.isinteger-4.0.4.tgz",
- "integrity": "sha512-DBwtEWN2caHQ9/imiNeEA5ys1JoRtRfY3d7V9wkqtbycnAmTvRRmbHKDV4a0EYc678/dia0jrte4tjYwVBaZUA=="
- },
- "lodash.isnumber": {
- "version": "3.0.3",
- "resolved": "https://registry.npmjs.org/lodash.isnumber/-/lodash.isnumber-3.0.3.tgz",
- "integrity": "sha512-QYqzpfwO3/CWf3XP+Z+tkQsfaLL/EnUlXWVkIk5FUPc4sBdTehEqZONuyRt2P67PXAk+NXmTBcc97zw9t1FQrw=="
- },
- "lodash.isplainobject": {
- "version": "4.0.6",
- "resolved": "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz",
- "integrity": "sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA=="
- },
- "lodash.isstring": {
- "version": "4.0.1",
- "resolved": "https://registry.npmjs.org/lodash.isstring/-/lodash.isstring-4.0.1.tgz",
- "integrity": "sha512-0wJxfxH1wgO3GrbuP+dTTk7op+6L41QCXbGINEmD+ny/G/eCqGzxyCsh7159S+mgDDcoarnBw6PC1PS5+wUGgw=="
- },
- "lodash.once": {
- "version": "4.1.1",
- "resolved": "https://registry.npmjs.org/lodash.once/-/lodash.once-4.1.1.tgz",
- "integrity": "sha512-Sb487aTOCr9drQVL8pIxOzVhafOjZN9UU54hiN8PU3uAiSV7lx1yYNpbNmex2PK6dSJoNTSJUUswT651yww3Mg=="
- },
- "lodash.set": {
- "version": "4.3.2",
- "resolved": "https://registry.npmjs.org/lodash.set/-/lodash.set-4.3.2.tgz",
- "integrity": "sha512-4hNPN5jlm/N/HLMCO43v8BXKq9Z7QdAGc/VGrRD61w8gN9g/6jF9A4L1pbUgBLCffi0w9VsXfTOij5x8iTyFvg=="
- },
- "map-obj": {
- "version": "4.3.0",
- "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-4.3.0.tgz",
- "integrity": "sha512-hdN1wVrZbb29eBGiGjJbeP8JbKjq1urkHJ/LIP/NY48MZ1QVXUsQBV1G1zvYFHn1XE06cwjBsOI2K3Ulnj1YXQ=="
- },
- "maxmind": {
- "version": "4.3.6",
- "resolved": "https://registry.npmjs.org/maxmind/-/maxmind-4.3.6.tgz",
- "integrity": "sha512-CwnEZqJX0T6b2rWrc0/V3n9hL/hWAMEn7fY09077YJUHiHx7cn/esA2ZIz8BpYLSJUf7cGVel0oUJa9jMwyQpg==",
- "requires": {
- "mmdb-lib": "2.0.2",
- "tiny-lru": "8.0.2"
- }
- },
- "media-typer": {
- "version": "0.3.0",
- "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
- "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ=="
- },
- "merge-descriptors": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz",
- "integrity": "sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w=="
- },
- "methods": {
- "version": "1.1.2",
- "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz",
- "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w=="
- },
- "mime": {
- "version": "1.6.0",
- "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz",
- "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg=="
- },
- "mime-db": {
- "version": "1.52.0",
- "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
- "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg=="
- },
- "mime-types": {
- "version": "2.1.35",
- "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
- "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
- "requires": {
- "mime-db": "1.52.0"
- }
- },
- "mmdb-lib": {
- "version": "2.0.2",
- "resolved": "https://registry.npmjs.org/mmdb-lib/-/mmdb-lib-2.0.2.tgz",
- "integrity": "sha512-shi1I+fCPQonhTi7qyb6hr7hi87R7YS69FlfJiMFuJ12+grx0JyL56gLNzGTYXPU7EhAPkMLliGeyHer0K+AVA=="
- },
- "ms": {
- "version": "2.1.2",
- "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
- "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
- },
- "negotiator": {
- "version": "0.6.3",
- "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz",
- "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg=="
- },
- "notepack.io": {
- "version": "2.2.0",
- "resolved": "https://registry.npmjs.org/notepack.io/-/notepack.io-2.2.0.tgz",
- "integrity": "sha512-9b5w3t5VSH6ZPosoYnyDONnUTF8o0UkBw7JLA6eBlYJWyGT1Q3vQa8Hmuj1/X6RYvHjjygBDgw6fJhe0JEojfw=="
- },
- "object-assign": {
- "version": "4.1.1",
- "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
- "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg=="
- },
- "object-inspect": {
- "version": "1.12.2",
- "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.2.tgz",
- "integrity": "sha512-z+cPxW0QGUp0mcqcsgQyLVRDoXFQbXOwBaqyF7VIgI4TWNQsDHrBpUQslRmIfAoYWdYzs6UlKJtB2XJpTaNSpQ=="
- },
- "on-finished": {
- "version": "2.4.1",
- "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz",
- "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==",
- "requires": {
- "ee-first": "1.1.1"
- }
- },
- "parseurl": {
- "version": "1.3.3",
- "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz",
- "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ=="
- },
- "path-to-regexp": {
- "version": "0.1.7",
- "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz",
- "integrity": "sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ=="
- },
- "proxy-addr": {
- "version": "2.0.7",
- "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz",
- "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==",
- "requires": {
- "forwarded": "0.2.0",
- "ipaddr.js": "1.9.1"
- }
- },
- "qs": {
- "version": "6.10.3",
- "resolved": "https://registry.npmjs.org/qs/-/qs-6.10.3.tgz",
- "integrity": "sha512-wr7M2E0OFRfIfJZjKGieI8lBKb7fRCH4Fv5KNPEs7gJ8jadvotdsS08PzOKR7opXhZ/Xkjtt3WF9g38drmyRqQ==",
- "requires": {
- "side-channel": "^1.0.4"
- }
- },
- "quick-lru": {
- "version": "5.1.1",
- "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-5.1.1.tgz",
- "integrity": "sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA=="
- },
- "range-parser": {
- "version": "1.2.1",
- "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz",
- "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg=="
- },
- "raw-body": {
- "version": "2.5.1",
- "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.1.tgz",
- "integrity": "sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig==",
- "requires": {
- "bytes": "3.1.2",
- "http-errors": "2.0.0",
- "iconv-lite": "0.4.24",
- "unpipe": "1.0.0"
- }
- },
- "redis": {
- "version": "4.2.0",
- "resolved": "https://registry.npmjs.org/redis/-/redis-4.2.0.tgz",
- "integrity": "sha512-bCR0gKVhIXFg8zCQjXEANzgI01DDixtPZgIUZHBCmwqixnu+MK3Tb2yqGjh+HCLASQVVgApiwhNkv+FoedZOGQ==",
- "requires": {
- "@redis/bloom": "1.0.2",
- "@redis/client": "1.2.0",
- "@redis/graph": "1.0.1",
- "@redis/json": "1.0.3",
- "@redis/search": "1.0.6",
- "@redis/time-series": "1.0.3"
- }
- },
- "safe-buffer": {
- "version": "5.2.1",
- "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
- "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ=="
- },
- "safer-buffer": {
- "version": "2.1.2",
- "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
- "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="
- },
- "semver": {
- "version": "5.7.1",
- "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz",
- "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ=="
- },
- "send": {
- "version": "0.18.0",
- "resolved": "https://registry.npmjs.org/send/-/send-0.18.0.tgz",
- "integrity": "sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==",
- "requires": {
- "debug": "2.6.9",
- "depd": "2.0.0",
- "destroy": "1.2.0",
- "encodeurl": "~1.0.2",
- "escape-html": "~1.0.3",
- "etag": "~1.8.1",
- "fresh": "0.5.2",
- "http-errors": "2.0.0",
- "mime": "1.6.0",
- "ms": "2.1.3",
- "on-finished": "2.4.1",
- "range-parser": "~1.2.1",
- "statuses": "2.0.1"
- },
- "dependencies": {
- "debug": {
- "version": "2.6.9",
- "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
- "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
- "requires": {
- "ms": "2.0.0"
- },
- "dependencies": {
- "ms": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
- "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="
- }
- }
- },
- "ms": {
- "version": "2.1.3",
- "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
- "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
- }
- }
- },
- "serve-static": {
- "version": "1.15.0",
- "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.15.0.tgz",
- "integrity": "sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==",
- "requires": {
- "encodeurl": "~1.0.2",
- "escape-html": "~1.0.3",
- "parseurl": "~1.3.3",
- "send": "0.18.0"
- }
- },
- "setprototypeof": {
- "version": "1.2.0",
- "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz",
- "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw=="
- },
- "side-channel": {
- "version": "1.0.4",
- "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz",
- "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==",
- "requires": {
- "call-bind": "^1.0.0",
- "get-intrinsic": "^1.0.2",
- "object-inspect": "^1.9.0"
- }
- },
- "socket.io": {
- "version": "4.5.1",
- "resolved": "https://registry.npmjs.org/socket.io/-/socket.io-4.5.1.tgz",
- "integrity": "sha512-0y9pnIso5a9i+lJmsCdtmTTgJFFSvNQKDnPQRz28mGNnxbmqYg2QPtJTLFxhymFZhAIn50eHAKzJeiNaKr+yUQ==",
- "requires": {
- "accepts": "~1.3.4",
- "base64id": "~2.0.0",
- "debug": "~4.3.2",
- "engine.io": "~6.2.0",
- "socket.io-adapter": "~2.4.0",
- "socket.io-parser": "~4.0.4"
- }
- },
- "socket.io-adapter": {
- "version": "2.4.0",
- "resolved": "https://registry.npmjs.org/socket.io-adapter/-/socket.io-adapter-2.4.0.tgz",
- "integrity": "sha512-W4N+o69rkMEGVuk2D/cvca3uYsvGlMwsySWV447y99gUPghxq42BxqLNMndb+a1mm/5/7NeXVQS7RLa2XyXvYg=="
- },
- "socket.io-parser": {
- "version": "4.0.5",
- "resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-4.0.5.tgz",
- "integrity": "sha512-sNjbT9dX63nqUFIOv95tTVm6elyIU4RvB1m8dOeZt+IgWwcWklFDOdmGcfo3zSiRsnR/3pJkjY5lfoGqEe4Eig==",
- "requires": {
- "@types/component-emitter": "^1.2.10",
- "component-emitter": "~1.3.0",
- "debug": "~4.3.1"
- }
- },
- "statuses": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz",
- "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ=="
- },
- "tiny-lru": {
- "version": "8.0.2",
- "resolved": "https://registry.npmjs.org/tiny-lru/-/tiny-lru-8.0.2.tgz",
- "integrity": "sha512-ApGvZ6vVvTNdsmt676grvCkUCGwzG9IqXma5Z07xJgiC5L7akUMof5U8G2JTI9Rz/ovtVhJBlY6mNhEvtjzOIg=="
- },
- "toidentifier": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz",
- "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA=="
- },
- "type-fest": {
- "version": "1.4.0",
- "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-1.4.0.tgz",
- "integrity": "sha512-yGSza74xk0UG8k+pLh5oeoYirvIiWo5t0/o3zHHAO2tRDiZcxWP7fywNlXhqb6/r6sWvwi+RsyQMWhVLe4BVuA=="
- },
- "type-is": {
- "version": "1.6.18",
- "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz",
- "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==",
- "requires": {
- "media-typer": "0.3.0",
- "mime-types": "~2.1.24"
- }
- },
- "ua-parser-js": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-1.0.2.tgz",
- "integrity": "sha512-00y/AXhx0/SsnI51fTc0rLRmafiGOM4/O+ny10Ps7f+j/b8p/ZY11ytMgznXkOVo4GQ+KwQG5UQLkLGirsACRg=="
- },
- "uid2": {
- "version": "0.0.3",
- "resolved": "https://registry.npmjs.org/uid2/-/uid2-0.0.3.tgz",
- "integrity": "sha512-5gSP1liv10Gjp8cMEnFd6shzkL/D6W1uhXSFNCxDC+YI8+L8wkCYCbJ7n77Ezb4wE/xzMogecE+DtamEe9PZjg=="
- },
- "unpipe": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
- "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ=="
- },
- "utils-merge": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz",
- "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA=="
- },
- "uWebSockets.js": {
- "version": "git+ssh://git@github.com/uNetworking/uWebSockets.js.git#806df48c9da86af7b3341f3e443388c7cd15c3de",
- "from": "uWebSockets.js@github:uNetworking/uWebSockets.js#v20.10.0"
- },
- "vary": {
- "version": "1.1.2",
- "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
- "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg=="
- },
- "verror": {
- "version": "1.10.0",
- "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz",
- "integrity": "sha512-ZZKSmDAEFOijERBLkmYfJ+vmk3w+7hOLYDNkRCuRuMJGEmqYNCNLyBBFwWKVMhfwaEF3WOd0Zlw86U/WC/+nYw==",
- "requires": {
- "assert-plus": "^1.0.0",
- "core-util-is": "1.0.2",
- "extsprintf": "^1.2.0"
- }
- },
- "ws": {
- "version": "8.2.3",
- "resolved": "https://registry.npmjs.org/ws/-/ws-8.2.3.tgz",
- "integrity": "sha512-wBuoj1BDpC6ZQ1B7DWQBYVLphPWkm8i9Y0/3YdHjHKHiohOJ1ws+3OccDWtH+PoC9DZD5WOTrJvNbWvjS6JWaA==",
- "requires": {}
- },
- "yallist": {
- "version": "4.0.0",
- "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
- "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A=="
- }
}
}
diff --git a/frontend/.env.sample b/frontend/.env.sample
index c5f9530a6..d5337deee 100644
--- a/frontend/.env.sample
+++ b/frontend/.env.sample
@@ -23,4 +23,4 @@ MINIO_SECRET_KEY = ''
# APP and TRACKER VERSIONS
VERSION = '1.9.0'
-TRACKER_VERSION = '4.1.6'
+TRACKER_VERSION = '4.1.9'
diff --git a/frontend/app/Router.js b/frontend/app/Router.js
index 1e4f4b4b7..6a4aea446 100644
--- a/frontend/app/Router.js
+++ b/frontend/app/Router.js
@@ -126,8 +126,9 @@ class Router extends React.Component {
}
fetchInitialData = async () => {
+ const siteIdFromPath = parseInt(window.location.pathname.split("/")[1])
await this.props.fetchUserInfo()
- await this.props.fetchSiteList()
+ await this.props.fetchSiteList(siteIdFromPath)
const { mstore } = this.props;
mstore.initClient();
};
diff --git a/frontend/app/components/Assist/ChatControls/ChatControls.tsx b/frontend/app/components/Assist/ChatControls/ChatControls.tsx
index 625cc7f31..959842961 100644
--- a/frontend/app/components/Assist/ChatControls/ChatControls.tsx
+++ b/frontend/app/components/Assist/ChatControls/ChatControls.tsx
@@ -2,7 +2,7 @@ import React, { useState } from 'react'
import stl from './ChatControls.module.css'
import cn from 'classnames'
import { Button, Icon } from 'UI'
-import type { LocalStream } from 'Player/MessageDistributor/managers/LocalStream';
+import type { LocalStream } from 'Player';
interface Props {
diff --git a/frontend/app/components/Assist/ChatWindow/ChatWindow.tsx b/frontend/app/components/Assist/ChatWindow/ChatWindow.tsx
index 35225c5d5..167db8281 100644
--- a/frontend/app/components/Assist/ChatWindow/ChatWindow.tsx
+++ b/frontend/app/components/Assist/ChatWindow/ChatWindow.tsx
@@ -5,7 +5,7 @@ import Counter from 'App/components/shared/SessionItem/Counter';
import stl from './chatWindow.module.css';
import ChatControls from '../ChatControls/ChatControls';
import Draggable from 'react-draggable';
-import type { LocalStream } from 'Player/MessageDistributor/managers/LocalStream';
+import type { LocalStream } from 'Player';
import { toggleVideoLocalStream } from 'Player'
export interface Props {
diff --git a/frontend/app/components/Assist/components/AssistActions/AssistActions.tsx b/frontend/app/components/Assist/components/AssistActions/AssistActions.tsx
index ee5258747..e377cd3ba 100644
--- a/frontend/app/components/Assist/components/AssistActions/AssistActions.tsx
+++ b/frontend/app/components/Assist/components/AssistActions/AssistActions.tsx
@@ -3,7 +3,7 @@ import { Button, Tooltip } from 'UI';
import { connect } from 'react-redux';
import cn from 'classnames';
import { toggleChatWindow } from 'Duck/sessions';
-import { connectPlayer } from 'Player/store';
+import { connectPlayer } from 'Player';
import ChatWindow from '../../ChatWindow';
import {
callPeer,
@@ -16,9 +16,9 @@ import {
CallingState,
ConnectionStatus,
RemoteControlStatus,
-} from 'Player/MessageDistributor/managers/AssistManager';
-import RequestLocalStream from 'Player/MessageDistributor/managers/LocalStream';
-import type { LocalStream } from 'Player/MessageDistributor/managers/LocalStream';
+ RequestLocalStream,
+} from 'Player';
+import type { LocalStream } from 'Player';
import { toast } from 'react-toastify';
import { confirm } from 'UI';
import stl from './AassistActions.module.css';
diff --git a/frontend/app/components/Client/Audit/AuditView/AuditView.tsx b/frontend/app/components/Client/Audit/AuditView/AuditView.tsx
index b93c26d08..df30e64ff 100644
--- a/frontend/app/components/Client/Audit/AuditView/AuditView.tsx
+++ b/frontend/app/components/Client/Audit/AuditView/AuditView.tsx
@@ -46,7 +46,7 @@ function AuditView(props) {
]}
defaultValue={order}
plain
- onChange={({ value }) => auditStore.updateKey('order', value)}
+ onChange={({ value }) => auditStore.updateKey('order', value.value)}
/>
auditStore.updateKey('searchQuery', value) }/>
diff --git a/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx b/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx
index aaecc0b14..3e8a68f11 100644
--- a/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx
+++ b/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx
@@ -2,6 +2,7 @@ import React from 'react';
import { Icon } from 'UI';
import { checkForRecent } from 'App/date';
import { withSiteId, alertEdit } from 'App/routes';
+import { numberWithCommas } from 'App/utils';
// @ts-ignore
import { DateTime } from 'luxon';
import { withRouter, RouteComponentProps } from 'react-router-dom';
@@ -108,7 +109,7 @@ function AlertListItem(props: Props) {
{' is '}
{alert.query.operator}
- {alert.query.right} {alert.metric.unit}
+ {numberWithCommas(alert.query.right)} {alert.metric.unit}
{' over the past '}
{getThreshold(alert.currentPeriod)}
diff --git a/frontend/app/components/Dashboard/components/Alerts/NewAlert.tsx b/frontend/app/components/Dashboard/components/Alerts/NewAlert.tsx
index 0ea012e71..6027646f7 100644
--- a/frontend/app/components/Dashboard/components/Alerts/NewAlert.tsx
+++ b/frontend/app/components/Dashboard/components/Alerts/NewAlert.tsx
@@ -122,6 +122,9 @@ const NewAlert = (props: IProps) => {
) {
remove(instance.alertId).then(() => {
props.history.push(withSiteId(alerts(), siteId));
+ toast.success('Alert deleted');
+ }).catch(() => {
+ toast.error('Failed to delete an alert');
});
}
};
@@ -135,6 +138,8 @@ const NewAlert = (props: IProps) => {
} else {
toast.success('Alert updated');
}
+ }).catch(() => {
+ toast.error('Failed to create an alert');
});
};
diff --git a/frontend/app/components/Dashboard/components/DashboardList/DashboardsView.tsx b/frontend/app/components/Dashboard/components/DashboardList/DashboardsView.tsx
index 5341c3487..7378e88f8 100644
--- a/frontend/app/components/Dashboard/components/DashboardList/DashboardsView.tsx
+++ b/frontend/app/components/Dashboard/components/DashboardList/DashboardsView.tsx
@@ -35,7 +35,7 @@ function DashboardsView({ history, siteId }: { history: any, siteId: string }) {
- A dashboard is a custom visualization using your OpenReplay data.
+ A Dashboard is a collection of Metrics that can be shared across teams.
diff --git a/frontend/app/components/Dashboard/components/Funnels/FunnelIssuesDropdown/FunnelIssuesDropdown.tsx b/frontend/app/components/Dashboard/components/Funnels/FunnelIssuesDropdown/FunnelIssuesDropdown.tsx
index 361337443..2dade061d 100644
--- a/frontend/app/components/Dashboard/components/Funnels/FunnelIssuesDropdown/FunnelIssuesDropdown.tsx
+++ b/frontend/app/components/Dashboard/components/Funnels/FunnelIssuesDropdown/FunnelIssuesDropdown.tsx
@@ -49,7 +49,8 @@ function FunnelIssuesDropdown() {
}
}
- const onClickOutside = () => {
+ const onClickOutside = (e: any) => {
+ if (e.target.id === 'dd-button') return;
if (isOpen) {
setTimeout(() => {
setIsOpen(false);
@@ -85,21 +86,23 @@ function FunnelIssuesDropdown() {
IndicatorSeparator: (): any => null,
IndicatorsContainer: (): any => null,
Control: ({ children, ...props }: any) => (
-
-
- { children }
-
-
-
+
+
+ { children }
+
+
+
+
),
Placeholder: (): any => null,
SingleValue: (): any => null,
diff --git a/frontend/app/components/Dashboard/components/MetricsView/MetricsView.tsx b/frontend/app/components/Dashboard/components/MetricsView/MetricsView.tsx
index dd87b2fef..6c39114cd 100644
--- a/frontend/app/components/Dashboard/components/MetricsView/MetricsView.tsx
+++ b/frontend/app/components/Dashboard/components/MetricsView/MetricsView.tsx
@@ -30,7 +30,7 @@ function MetricsView({ siteId }: Props) {
- Create custom Metrics to capture key interactions and track KPIs.
+ Create custom Metrics to capture user frustrations, monitor your app's performance and track other KPIs.