From d028d31cba7a4f36989408ad20e96efc2494d2a5 Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Tue, 14 Feb 2023 16:02:11 +0100 Subject: [PATCH 001/151] feat(chalice): fixed EXP_SESSIONS_SEARCH ids check --- ee/api/chalicelib/core/sessions_exp.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/ee/api/chalicelib/core/sessions_exp.py b/ee/api/chalicelib/core/sessions_exp.py index a4713e992..428e8410a 100644 --- a/ee/api/chalicelib/core/sessions_exp.py +++ b/ee/api/chalicelib/core/sessions_exp.py @@ -202,7 +202,7 @@ def _isUndefined_operator(op: schemas.SearchEventOperator): # This function executes the query and return result def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_id, errors_only=False, - error_status=schemas.ErrorStatus.all, count_only=False, issue=None): + error_status=schemas.ErrorStatus.all, count_only=False, issue=None, ids_only=False): full_args, query_part = search_query_parts_ch(data=data, error_status=error_status, errors_only=errors_only, favorite_only=data.bookmarked, issue=issue, project_id=project_id, user_id=user_id) @@ -264,6 +264,12 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_ GROUP BY user_id ) AS users_sessions;""", full_args) + elif ids_only: + main_query = cur.format(f"""SELECT DISTINCT ON(s.session_id) s.session_id + {query_part} + ORDER BY s.session_id desc + LIMIT %(sessions_limit)s OFFSET %(sessions_limit_s)s;""", + full_args) else: if data.order is None: data.order = schemas.SortOrderType.desc.value @@ -302,8 +308,8 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_ print(data.json()) print("--------------------") raise err - if errors_only: - return helper.list_to_camel_case(cur.fetchall()) + if errors_only or ids_only: + return helper.list_to_camel_case(sessions) if len(sessions) > 0: sessions = sessions[0] From 734baf2d41c6e3d658d39d03ad38b193a2f2f2e7 Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Tue, 14 Feb 2023 16:14:25 +0100 Subject: [PATCH 002/151] chore(actions): changing chart --- .github/workflows/crons-ee.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/crons-ee.yaml b/.github/workflows/crons-ee.yaml index 0792edd9a..d56440e67 100644 --- a/.github/workflows/crons-ee.yaml +++ b/.github/workflows/crons-ee.yaml @@ -110,9 +110,9 @@ jobs: cat /tmp/image_override.yaml # Deploy command - mv openreplay/charts/{ingress-nginx,crons,quickwit} /tmp + mv openreplay/charts/{ingress-nginx,utilities,quickwit} /tmp rm -rf openreplay/charts/* - mv /tmp/{ingress-nginx,crons,quickwit} openreplay/charts/ + mv /tmp/{ingress-nginx,utilities,quickwit} openreplay/charts/ helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f - env: DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} From b7c71d13a46c8f97da1e6338bc49f30641f02b5b Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Tue, 14 Feb 2023 16:40:29 +0100 Subject: [PATCH 003/151] feat(chalice): EXP fixes --- api/chalicelib/core/sessions.py | 12 +++++++----- ee/api/chalicelib/core/sessions.py | 12 +++++++----- ee/api/chalicelib/core/sessions_exp.py | 13 +++++++------ 3 files changed, 21 insertions(+), 16 deletions(-) diff --git a/api/chalicelib/core/sessions.py b/api/chalicelib/core/sessions.py index 512c05769..c95bed903 100644 --- a/api/chalicelib/core/sessions.py +++ b/api/chalicelib/core/sessions.py @@ -301,7 +301,7 @@ def search2_series(data: schemas.SessionsSearchPayloadSchema, project_id: int, d extra_col = "" extra_where = "" pre_query = "" - distinct_on="s.session_id" + distinct_on = "s.session_id" if metric_of == schemas.MetricOfTable.user_country: main_col = "user_country" elif metric_of == schemas.MetricOfTable.user_device: @@ -321,7 +321,7 @@ def search2_series(data: schemas.SessionsSearchPayloadSchema, project_id: int, d elif metric_of == schemas.MetricOfTable.visited_url: main_col = "path" extra_col = ", path" - distinct_on+=",path" + distinct_on += ",path" main_query = cur.mogrify(f"""{pre_query} SELECT COUNT(*) AS count, COALESCE(JSONB_AGG(users_sessions) FILTER ( WHERE rn <= 200 ), '[]'::JSONB) AS values FROM (SELECT {main_col} AS name, @@ -1194,8 +1194,9 @@ def delete_sessions_by_user_ids(project_id, user_ids): def count_all(): with pg_client.PostgresClient(unlimited_query=True) as cur: - row = cur.execute(query="SELECT COUNT(session_id) AS count FROM public.sessions") - return row.get("count", 0) + cur.execute(query="SELECT COUNT(session_id) AS count FROM public.sessions") + row = cur.fetchone() + return row.get("count", 0) if row else 0 def session_exists(project_id, session_id): @@ -1203,7 +1204,8 @@ def session_exists(project_id, session_id): query = cur.mogrify("""SELECT 1 FROM public.sessions WHERE session_id=%(session_id)s - AND project_id=%(project_id)s""", + AND project_id=%(project_id)s + LIMIT 1;""", {"project_id": project_id, "session_id": session_id}) cur.execute(query) row = cur.fetchone() diff --git a/ee/api/chalicelib/core/sessions.py b/ee/api/chalicelib/core/sessions.py index bc7613278..6d92c3954 100644 --- a/ee/api/chalicelib/core/sessions.py +++ b/ee/api/chalicelib/core/sessions.py @@ -304,7 +304,7 @@ def search2_series(data: schemas.SessionsSearchPayloadSchema, project_id: int, d extra_col = "" extra_where = "" pre_query = "" - distinct_on="s.session_id" + distinct_on = "s.session_id" if metric_of == schemas.MetricOfTable.user_country: main_col = "user_country" elif metric_of == schemas.MetricOfTable.user_device: @@ -324,7 +324,7 @@ def search2_series(data: schemas.SessionsSearchPayloadSchema, project_id: int, d elif metric_of == schemas.MetricOfTable.visited_url: main_col = "path" extra_col = ", path" - distinct_on+=",path" + distinct_on += ",path" main_query = cur.mogrify(f"""{pre_query} SELECT COUNT(*) AS count, COALESCE(JSONB_AGG(users_sessions) FILTER ( WHERE rn <= 200 ), '[]'::JSONB) AS values FROM (SELECT {main_col} AS name, @@ -1197,8 +1197,9 @@ def delete_sessions_by_user_ids(project_id, user_ids): def count_all(): with pg_client.PostgresClient(unlimited_query=True) as cur: - row = cur.execute(query="SELECT COUNT(session_id) AS count FROM public.sessions") - return row.get("count", 0) + cur.execute(query="SELECT COUNT(session_id) AS count FROM public.sessions") + row = cur.fetchone() + return row.get("count", 0) if row else 0 def session_exists(project_id, session_id): @@ -1206,7 +1207,8 @@ def session_exists(project_id, session_id): query = cur.mogrify("""SELECT 1 FROM public.sessions WHERE session_id=%(session_id)s - AND project_id=%(project_id)s""", + AND project_id=%(project_id)s + LIMIT 1;""", {"project_id": project_id, "session_id": session_id}) cur.execute(query) row = cur.fetchone() diff --git a/ee/api/chalicelib/core/sessions_exp.py b/ee/api/chalicelib/core/sessions_exp.py index 428e8410a..35eabad5d 100644 --- a/ee/api/chalicelib/core/sessions_exp.py +++ b/ee/api/chalicelib/core/sessions_exp.py @@ -1526,17 +1526,18 @@ def delete_sessions_by_user_ids(project_id, user_ids): def count_all(): - with pg_client.PostgresClient(unlimited_query=True) as cur: - row = cur.execute(query="SELECT COUNT(session_id) AS count FROM public.sessions") + with ch_client.ClickHouseClient() as cur: + row = cur.execute(query=f"SELECT COUNT(session_id) AS count FROM {exp_ch_helper.get_main_sessions_table()}") return row.get("count", 0) def session_exists(project_id, session_id): with ch_client.ClickHouseClient() as cur: - query = cur.format("""SELECT 1 - FROM public.sessions - WHERE session_id=%(session_id)s - AND project_id=%(project_id)s""", + query = cur.format(f"""SELECT 1 + FROM {exp_ch_helper.get_main_sessions_table()} + WHERE session_id=%(session_id)s + AND project_id=%(project_id)s + LIMIT 1""", {"project_id": project_id, "session_id": session_id}) row = cur.execute(query) return row is not None From 81b9a91760f5cf2268c4db5559f52ea12f586cb2 Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Wed, 15 Feb 2023 12:14:04 +0100 Subject: [PATCH 004/151] feat(chalice): revert to generate_presigned_url for file upload --- api/chalicelib/utils/s3.py | 7 ++----- ee/api/.gitignore | 1 + 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/api/chalicelib/utils/s3.py b/api/chalicelib/utils/s3.py index c2e5b58c7..366a5d181 100644 --- a/api/chalicelib/utils/s3.py +++ b/api/chalicelib/utils/s3.py @@ -55,7 +55,7 @@ def get_presigned_url_for_sharing(bucket, expires_in, key, check_exists=False): ) -def get_presigned_url_for_upload_deprecated(bucket, expires_in, key, **args): +def get_presigned_url_for_upload(bucket, expires_in, key, **args): return client.generate_presigned_url( 'put_object', Params={ @@ -66,10 +66,7 @@ def get_presigned_url_for_upload_deprecated(bucket, expires_in, key, **args): ) - - - -def get_presigned_url_for_upload(bucket, expires_in, key, conditions=None, public=False, content_type=None): +def get_presigned_url_for_upload_secure(bucket, expires_in, key, conditions=None, public=False, content_type=None): acl = 'private' if public: acl = 'public-read' diff --git a/ee/api/.gitignore b/ee/api/.gitignore index 5e982fda6..79aec2ade 100644 --- a/ee/api/.gitignore +++ b/ee/api/.gitignore @@ -263,5 +263,6 @@ Pipfile.lock /chalicelib/core/saved_search.py /app_alerts.py /build_alerts.sh +/build_crons.sh /routers/subs/v1_api.py #exp /chalicelib/core/dashboards.py From e110659ee491c74789de39a075f2766206db3aa3 Mon Sep 17 00:00:00 2001 From: Shekar Siri Date: Tue, 14 Feb 2023 14:48:02 +0100 Subject: [PATCH 005/151] change(ui) - update filters from url instead of applying and fetching --- .../SessionSearchQueryParamHandler.tsx | 8 ++++---- frontend/app/duck/search.js | 6 ++++++ 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/frontend/app/components/shared/SessionSearchQueryParamHandler/SessionSearchQueryParamHandler.tsx b/frontend/app/components/shared/SessionSearchQueryParamHandler/SessionSearchQueryParamHandler.tsx index 820ed4aa0..5bd5d739d 100644 --- a/frontend/app/components/shared/SessionSearchQueryParamHandler/SessionSearchQueryParamHandler.tsx +++ b/frontend/app/components/shared/SessionSearchQueryParamHandler/SessionSearchQueryParamHandler.tsx @@ -2,12 +2,12 @@ import React, { useEffect } from 'react'; import { useHistory } from 'react-router'; import { connect } from 'react-redux'; import { addFilterByKeyAndValue, addFilter } from 'Duck/search'; -import { applyFilter } from 'Duck/search'; +import { updateFilter } from 'Duck/search'; import { createUrlQuery, getFiltersFromQuery } from 'App/utils/search'; interface Props { appliedFilter: any; - applyFilter: any; + updateFilter: any; addFilterByKeyAndValue: typeof addFilterByKeyAndValue; addFilter: typeof addFilter; } @@ -17,7 +17,7 @@ const SessionSearchQueryParamHandler = (props: Props) => { const applyFilterFromQuery = () => { const filter = getFiltersFromQuery(history.location.search, appliedFilter); - props.applyFilter(filter, true); + props.updateFilter(filter, true); }; const generateUrlQuery = () => { @@ -35,5 +35,5 @@ export default connect( (state: any) => ({ appliedFilter: state.getIn(['search', 'instance']), }), - { addFilterByKeyAndValue, addFilter, applyFilter } + { addFilterByKeyAndValue, addFilter, updateFilter } )(SessionSearchQueryParamHandler); diff --git a/frontend/app/duck/search.js b/frontend/app/duck/search.js index f4d84ffce..31f028bc3 100644 --- a/frontend/app/duck/search.js +++ b/frontend/app/duck/search.js @@ -243,6 +243,12 @@ export const applyFilter = reduceThenFetchResource((filter, force = false) => ({ force, })); +export const updateFilter = (filter, force = false) => ({ + type: APPLY, + filter, + force, +}); + export const updateCurrentPage = reduceThenFetchResource((page) => ({ type: UPDATE_CURRENT_PAGE, page, From 1e2a5f2644edb1a5b74ac19f6ce964566772ee0e Mon Sep 17 00:00:00 2001 From: nick-delirium Date: Tue, 14 Feb 2023 17:24:05 +0100 Subject: [PATCH 006/151] fix(tracker/player): minor fixes for recording behavior --- .../ScreenRecorder/ScreenRecorder.tsx | 55 +++++++++++-------- frontend/app/date.ts | 2 +- frontend/app/utils/screenRecorder.ts | 11 ++-- .../src/ScreenRecordingState.ts | 15 +---- 4 files changed, 43 insertions(+), 40 deletions(-) diff --git a/frontend/app/components/Session_/ScreenRecorder/ScreenRecorder.tsx b/frontend/app/components/Session_/ScreenRecorder/ScreenRecorder.tsx index 9e9aa0ed3..b690669c8 100644 --- a/frontend/app/components/Session_/ScreenRecorder/ScreenRecorder.tsx +++ b/frontend/app/components/Session_/ScreenRecorder/ScreenRecorder.tsx @@ -8,7 +8,7 @@ let stopRecorderCb: () => void; import { recordingsService } from 'App/services'; import { toast } from 'react-toastify'; import { formatTimeOrDate } from 'App/date'; -import { PlayerContext } from 'App/components/Session/playerContext'; +import { PlayerContext, ILivePlayerContext } from 'App/components/Session/playerContext'; import { observer } from 'mobx-react-lite'; /** @@ -24,9 +24,7 @@ function isSupported() { if (agent.includes('edge') || agent.includes('edg/')) return true; // @ts-ignore - if (agent.includes('chrome') && !!window.chrome) return true; - - return false; + return agent.includes('chrome') && !!window.chrome; } const supportedBrowsers = ['Chrome v91+', 'Edge v90+']; @@ -41,8 +39,8 @@ function ScreenRecorder({ sessionId: string; isEnterprise: boolean; }) { - const { player, store } = React.useContext(PlayerContext) - const recordingState = store.get().recordingState + const { player, store } = React.useContext(PlayerContext) as ILivePlayerContext; + const recordingState = store.get().recordingState; const [isRecording, setRecording] = React.useState(false); @@ -54,7 +52,7 @@ function ScreenRecorder({ try { toast.warn('Uploading the recording...'); const { URL, key } = await recordingsService.reserveUrl(siteId, { ...saveObj, sessionId }); - const status = recordingsService.saveFile(URL, blob); + const status = await recordingsService.saveFile(URL, blob); if (status) { await recordingsService.confirmFile(siteId, { ...saveObj, sessionId }, key); @@ -68,38 +66,49 @@ function ScreenRecorder({ React.useEffect(() => { if (!isRecording && recordingState === SessionRecordingStatus.Recording) { - startRecording(); + void startRecording(); } if (isRecording && recordingState !== SessionRecordingStatus.Recording) { stopRecordingHandler(); } }, [recordingState, isRecording]); + const onStop = () => { + setRecording(false); + player.assistManager.stopRecording(); + }; + const startRecording = async () => { - const stop = await screenRecorder( - `${formatTimeOrDate(new Date().getTime(), undefined, true)}_${sessionId}`, - sessionId, - onSave - ); - stopRecorderCb = stop; - setRecording(true); + try { + // @ts-ignore + stopRecorderCb = await screenRecorder( + `${formatTimeOrDate(new Date().getTime(), undefined, true)}_${sessionId}`, + sessionId, + onSave, + onStop + ); + setRecording(true); + } catch (e) { + console.error(e); + } }; const stopRecordingHandler = () => { - player.assistManager.stopRecording(); stopRecorderCb?.(); - setRecording(false); + onStop(); }; const recordingRequest = () => { - player.assistManager.requestRecording() + player.assistManager.requestRecording(); }; if (!isSupported() || !isEnterprise) { return (
{/* @ts-ignore */} - + @@ -121,7 +130,7 @@ function ScreenRecorder({ } export default connect((state: any) => ({ - isEnterprise: state.getIn(['user', 'account', 'edition']) === 'ee', - siteId: state.getIn(['site', 'siteId']), - sessionId: state.getIn(['sessions', 'current']).sessionId, - }))(observer(ScreenRecorder)) + isEnterprise: state.getIn(['user', 'account', 'edition']) === 'ee', + siteId: state.getIn(['site', 'siteId']), + sessionId: state.getIn(['sessions', 'current']).sessionId, +}))(observer(ScreenRecorder)); diff --git a/frontend/app/date.ts b/frontend/app/date.ts index a6162d671..eff9898b4 100644 --- a/frontend/app/date.ts +++ b/frontend/app/date.ts @@ -82,7 +82,7 @@ export function formatDateTimeDefault(timestamp: number): string { * @param {Object} timezone fixed offset like UTC+6 * @returns {String} formatted date (or time if its today) */ -export function formatTimeOrDate(timestamp: number, timezone: Timezone, isFull = false): string { +export function formatTimeOrDate(timestamp: number, timezone?: Timezone, isFull = false): string { var date = DateTime.fromMillis(timestamp) if (timezone) { if (timezone.value === 'UTC') date = date.toUTC(); diff --git a/frontend/app/utils/screenRecorder.ts b/frontend/app/utils/screenRecorder.ts index 705b72af5..a434f5d39 100644 --- a/frontend/app/utils/screenRecorder.ts +++ b/frontend/app/utils/screenRecorder.ts @@ -6,7 +6,8 @@ function createFileRecorder( mimeType: string, recName: string, sessionId: string, - saveCb: Function + saveCb: (saveObj: { name: string; duration: number }, blob: Blob) => void, + onStop: () => void ) { let ended = false; const start = new Date().getTime(); @@ -26,6 +27,7 @@ function createFileRecorder( ended = true; saveFile(recordedChunks, mimeType, start, recName, sessionId, saveCb); + onStop() recordedChunks = []; } @@ -48,7 +50,7 @@ function saveFile( startDate: number, recName: string, sessionId: string, - saveCb: Function + saveCb: (saveObj: { name: string; duration: number }, blob: Blob) => void ) { const saveObject = { name: recName, duration: new Date().getTime() - startDate, sessionId }; @@ -90,14 +92,15 @@ async function recordScreen() { * * @returns a promise that resolves to a function that stops the recording */ -export async function screenRecorder(recName: string, sessionId: string, saveCb: Function) { +export async function screenRecorder(recName: string, sessionId: string, saveCb: (saveObj: { name: string; duration: number }, blob: Blob) => void, onStop: () => void) { try { const stream = await recordScreen(); - const mediaRecorder = createFileRecorder(stream, FILE_TYPE, recName, sessionId, saveCb); + const mediaRecorder = createFileRecorder(stream, FILE_TYPE, recName, sessionId, saveCb, onStop); return () => { if (mediaRecorder.state !== 'inactive') { mediaRecorder.stop(); + onStop() } } } catch (e) { diff --git a/tracker/tracker-assist/src/ScreenRecordingState.ts b/tracker/tracker-assist/src/ScreenRecordingState.ts index 4667590a8..a962e85ac 100644 --- a/tracker/tracker-assist/src/ScreenRecordingState.ts +++ b/tracker/tracker-assist/src/ScreenRecordingState.ts @@ -36,7 +36,7 @@ export default class ScreenRecordingState { private status = RecordingState.Off private recordingAgent: string private overlayAdded = false - private uiComponents: [HTMLDivElement, HTMLDivElement] + private uiComponents: [HTMLDivElement] constructor(private readonly confirmOptions: ConfirmOptions) { } @@ -79,16 +79,6 @@ export default class ScreenRecordingState { private readonly acceptRecording = () => { if (!this.overlayAdded) { - const stopButton = window.document.createElement('div') - stopButton.onclick = () => this.rejectRecording() - Object.assign(stopButton.style, buttonStyles) - stopButton.textContent = 'Stop Recording' - stopButton.className = 'or-recording-button' - stopButton.setAttribute('data-openreplay-obscured', '') - stopButton.setAttribute('data-openreplay-hidden', '') - stopButton.setAttribute('data-openreplay-ignore', '') - window.document.body.appendChild(stopButton) - const borderWindow = window.document.createElement('div') Object.assign(borderWindow.style, borderStyles) borderWindow.className = 'or-recording-border' @@ -99,7 +89,7 @@ export default class ScreenRecordingState { this.overlayAdded = true - this.uiComponents = [stopButton, borderWindow,] + this.uiComponents = [borderWindow,] } this.status = RecordingState.Recording } @@ -118,6 +108,7 @@ export default class ScreenRecordingState { this.confirm?.remove() this.status = RecordingState.Off + this.overlayAdded = false this.uiComponents.forEach((el) => el.parentElement?.removeChild(el)) } } From 7e82dce53a3418fb4f2cf7b577dfdcd0bd2a37b6 Mon Sep 17 00:00:00 2001 From: Alex Kaminskii Date: Tue, 14 Feb 2023 17:33:49 +0100 Subject: [PATCH 007/151] feat(tracker): update SetPageLocation referrer value for single-page routing --- tracker/tracker/src/main/modules/viewport.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tracker/tracker/src/main/modules/viewport.ts b/tracker/tracker/src/main/modules/viewport.ts index 9541f085e..16ed279ab 100644 --- a/tracker/tracker/src/main/modules/viewport.ts +++ b/tracker/tracker/src/main/modules/viewport.ts @@ -5,13 +5,15 @@ import { SetPageLocation, SetViewportSize, SetPageVisibility } from '../app/mess export default function (app: App): void { let url: string, width: number, height: number let navigationStart: number + let referrer = document.referrer const sendSetPageLocation = app.safe(() => { const { URL } = document if (URL !== url) { url = URL - app.send(SetPageLocation(url, document.referrer, navigationStart)) + app.send(SetPageLocation(url, referrer, navigationStart)) navigationStart = 0 + referrer = url } }) From 9a6c66f9bf86386d38d119bfb5de7bbda0eb5e2a Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Wed, 15 Feb 2023 13:06:21 +0100 Subject: [PATCH 008/151] chore(actions): changing triggers chore(actions): defining new actions --- .github/workflows/alerts-ee.yaml | 212 +++++++++++--------- .github/workflows/alerts.yaml | 199 +++++++++--------- .github/workflows/api-ee.yaml | 11 + .github/workflows/api.yaml | 7 +- .github/workflows/crons-ee.yaml | 13 ++ .github/workflows/peers-ee.yaml | 70 +++++++ .github/workflows/peers.yaml | 69 +++++++ .github/workflows/sourcemaps-reader.yaml | 3 + .github/workflows/utilities-ee.yaml | 70 +++++++ .github/workflows/utilities.yaml | 3 + ee/api/{clean.sh => clean-dev.sh} | 0 ee/sourcemap-reader/Readme.md | 0 ee/utilities/{clean.sh => clean-dev.sh} | 0 sourcemap-reader/{clean.sh => clean-dev.sh} | 0 14 files changed, 461 insertions(+), 196 deletions(-) create mode 100644 .github/workflows/peers-ee.yaml create mode 100644 .github/workflows/peers.yaml create mode 100644 .github/workflows/utilities-ee.yaml rename ee/api/{clean.sh => clean-dev.sh} (100%) delete mode 100644 ee/sourcemap-reader/Readme.md rename ee/utilities/{clean.sh => clean-dev.sh} (100%) rename sourcemap-reader/{clean.sh => clean-dev.sh} (100%) diff --git a/.github/workflows/alerts-ee.yaml b/.github/workflows/alerts-ee.yaml index 1667ff22b..1033fb19f 100644 --- a/.github/workflows/alerts-ee.yaml +++ b/.github/workflows/alerts-ee.yaml @@ -12,6 +12,20 @@ on: paths: - ee/api/** - api/** + paths-ignore: + - api/.gitignore + - api/routers + - api/app.py + - api/*-dev.sh + - api/requirements.txt + - api/requirements-crons.txt + - ee/api/.gitignore + - ee/api/routers + - ee/api/app.py + - ee/api/*-dev.sh + - ee/api/requirements.txt + - ee/api/requirements-crons.txt + name: Build and Deploy Alerts EE @@ -21,115 +35,115 @@ jobs: runs-on: ubuntu-latest steps: - - name: Checkout - uses: actions/checkout@v2 - with: - # We need to diff with old commit - # to see which workers got changed. - fetch-depth: 2 + - name: Checkout + uses: actions/checkout@v2 + with: + # We need to diff with old commit + # to see which workers got changed. + fetch-depth: 2 - - name: Docker login - run: | - docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}" + - name: Docker login + run: | + docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}" - - uses: azure/k8s-set-context@v1 - with: - method: kubeconfig - kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret. - id: setcontext + - uses: azure/k8s-set-context@v1 + with: + method: kubeconfig + kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret. + id: setcontext - # Caching docker images - - uses: satackey/action-docker-layer-caching@v0.0.11 - # Ignore the failure of a step and avoid terminating the job. - continue-on-error: true + # Caching docker images + - uses: satackey/action-docker-layer-caching@v0.0.11 + # Ignore the failure of a step and avoid terminating the job. + continue-on-error: true - - name: Building and Pushing api image - id: build-image - env: - DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} - IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee - ENVIRONMENT: staging - run: | - skip_security_checks=${{ github.event.inputs.skip_security_checks }} - cd api - PUSH_IMAGE=0 bash -x ./build_alerts.sh ee - [[ "x$skip_security_checks" == "xtrue" ]] || { - curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./ + - name: Building and Pushing api image + id: build-image + env: + DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} + IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee + ENVIRONMENT: staging + run: | + skip_security_checks=${{ github.event.inputs.skip_security_checks }} + cd api + PUSH_IMAGE=0 bash -x ./build_alerts.sh ee + [[ "x$skip_security_checks" == "xtrue" ]] || { + curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./ + images=("alerts") + for image in ${images[*]};do + ./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG + done + err_code=$? + [[ $err_code -ne 0 ]] && { + exit $err_code + } + } && { + echo "Skipping Security Checks" + } images=("alerts") for image in ${images[*]};do - ./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG + docker push $DOCKER_REPO/$image:$IMAGE_TAG + done + - name: Creating old image input + run: | + # + # Create yaml with existing image tags + # + kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\ + tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt + + echo > /tmp/image_override.yaml + + for line in `cat /tmp/image_tag.txt`; + do + image_array=($(echo "$line" | tr ':' '\n')) + cat <> /tmp/image_override.yaml + ${image_array[0]}: + image: + # We've to strip off the -ee, as helm will append it. + tag: `echo ${image_array[1]} | cut -d '-' -f 1` + EOF done - err_code=$? - [[ $err_code -ne 0 ]] && { - exit $err_code - } - } && { - echo "Skipping Security Checks" - } - images=("alerts") - for image in ${images[*]};do - docker push $DOCKER_REPO/$image:$IMAGE_TAG - done - - name: Creating old image input - run: | - # - # Create yaml with existing image tags - # - kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\ - tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt - echo > /tmp/image_override.yaml + - name: Deploy to kubernetes + run: | + cd scripts/helmcharts/ + + ## Update secerts + sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml + sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml + sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml + sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml + sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml + sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml + sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml + + # Update changed image tag + sed -i "/alerts/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml + + cat /tmp/image_override.yaml + # Deploy command + mv openreplay/charts/{ingress-nginx,alerts,quickwit} /tmp + rm -rf openreplay/charts/* + mv /tmp/{ingress-nginx,alerts,quickwit} openreplay/charts/ + helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f - + env: + DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} + # We're not passing -ee flag, because helm will add that. + IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} + ENVIRONMENT: staging - for line in `cat /tmp/image_tag.txt`; - do - image_array=($(echo "$line" | tr ':' '\n')) - cat <> /tmp/image_override.yaml - ${image_array[0]}: - image: - # We've to strip off the -ee, as helm will append it. - tag: `echo ${image_array[1]} | cut -d '-' -f 1` - EOF - done - - - name: Deploy to kubernetes - run: | - cd scripts/helmcharts/ - - ## Update secerts - sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml - sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml - sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml - sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml - sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml - sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml - sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml - - # Update changed image tag - sed -i "/alerts/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml - - cat /tmp/image_override.yaml - # Deploy command - mv openreplay/charts/{ingress-nginx,alerts,quickwit} /tmp - rm -rf openreplay/charts/* - mv /tmp/{ingress-nginx,alerts,quickwit} openreplay/charts/ - helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f - - env: - DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} - # We're not passing -ee flag, because helm will add that. - IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} - ENVIRONMENT: staging - - - name: Alert slack - if: ${{ failure() }} - uses: rtCamp/action-slack-notify@v2 - env: - SLACK_CHANNEL: ee - SLACK_TITLE: "Failed ${{ github.workflow }}" - SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff' - SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }} - SLACK_USERNAME: "OR Bot" - SLACK_MESSAGE: 'Build failed :bomb:' + - name: Alert slack + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_CHANNEL: ee + SLACK_TITLE: "Failed ${{ github.workflow }}" + SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff' + SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }} + SLACK_USERNAME: "OR Bot" + SLACK_MESSAGE: 'Build failed :bomb:' # - name: Debug Job # # if: ${{ failure() }} diff --git a/.github/workflows/alerts.yaml b/.github/workflows/alerts.yaml index 85d25f498..f5e2cbe76 100644 --- a/.github/workflows/alerts.yaml +++ b/.github/workflows/alerts.yaml @@ -11,6 +11,13 @@ on: - api-v1.10.0 paths: - api/** + paths-ignore: + - api/.gitignore + - api/routers + - api/app.py + - api/*-dev.sh + - api/requirements.txt + - api/requirements-crons.txt name: Build and Deploy Alerts @@ -20,112 +27,112 @@ jobs: runs-on: ubuntu-latest steps: - - name: Checkout - uses: actions/checkout@v2 - with: - # We need to diff with old commit - # to see which workers got changed. - fetch-depth: 2 + - name: Checkout + uses: actions/checkout@v2 + with: + # We need to diff with old commit + # to see which workers got changed. + fetch-depth: 2 - - name: Docker login - run: | - docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}" + - name: Docker login + run: | + docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}" - - uses: azure/k8s-set-context@v1 - with: - method: kubeconfig - kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret. - id: setcontext + - uses: azure/k8s-set-context@v1 + with: + method: kubeconfig + kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret. + id: setcontext - # Caching docker images - - uses: satackey/action-docker-layer-caching@v0.0.11 - # Ignore the failure of a step and avoid terminating the job. - continue-on-error: true + # Caching docker images + - uses: satackey/action-docker-layer-caching@v0.0.11 + # Ignore the failure of a step and avoid terminating the job. + continue-on-error: true - - name: Building and Pushing Alerts image - id: build-image - env: - DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }} - IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} - ENVIRONMENT: staging - run: | - skip_security_checks=${{ github.event.inputs.skip_security_checks }} - cd api - PUSH_IMAGE=0 bash -x ./build_alerts.sh - [[ "x$skip_security_checks" == "xtrue" ]] || { - curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./ + - name: Building and Pushing Alerts image + id: build-image + env: + DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }} + IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} + ENVIRONMENT: staging + run: | + skip_security_checks=${{ github.event.inputs.skip_security_checks }} + cd api + PUSH_IMAGE=0 bash -x ./build_alerts.sh + [[ "x$skip_security_checks" == "xtrue" ]] || { + curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./ + images=("alerts") + for image in ${images[*]};do + ./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG + done + err_code=$? + [[ $err_code -ne 0 ]] && { + exit $err_code + } + } && { + echo "Skipping Security Checks" + } images=("alerts") for image in ${images[*]};do - ./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG + docker push $DOCKER_REPO/$image:$IMAGE_TAG + done + - name: Creating old image input + run: | + # + # Create yaml with existing image tags + # + kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\ + tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt + + echo > /tmp/image_override.yaml + + for line in `cat /tmp/image_tag.txt`; + do + image_array=($(echo "$line" | tr ':' '\n')) + cat <> /tmp/image_override.yaml + ${image_array[0]}: + image: + tag: ${image_array[1]} + EOF done - err_code=$? - [[ $err_code -ne 0 ]] && { - exit $err_code - } - } && { - echo "Skipping Security Checks" - } - images=("alerts") - for image in ${images[*]};do - docker push $DOCKER_REPO/$image:$IMAGE_TAG - done - - name: Creating old image input - run: | - # - # Create yaml with existing image tags - # - kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\ - tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt - echo > /tmp/image_override.yaml + - name: Deploy to kubernetes + run: | + cd scripts/helmcharts/ + + ## Update secerts + sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml + sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml + sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml + sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml + sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml + sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml + + # Update changed image tag + sed -i "/alerts/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml + + cat /tmp/image_override.yaml + # Deploy command + mv openreplay/charts/{ingress-nginx,alerts,quickwit} /tmp + rm -rf openreplay/charts/* + mv /tmp/{ingress-nginx,alerts,quickwit} openreplay/charts/ + helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f - + env: + DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }} + IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} + ENVIRONMENT: staging - for line in `cat /tmp/image_tag.txt`; - do - image_array=($(echo "$line" | tr ':' '\n')) - cat <> /tmp/image_override.yaml - ${image_array[0]}: - image: - tag: ${image_array[1]} - EOF - done - - - name: Deploy to kubernetes - run: | - cd scripts/helmcharts/ - - ## Update secerts - sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml - sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml - sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml - sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml - sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml - sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml - - # Update changed image tag - sed -i "/alerts/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml - - cat /tmp/image_override.yaml - # Deploy command - mv openreplay/charts/{ingress-nginx,alerts,quickwit} /tmp - rm -rf openreplay/charts/* - mv /tmp/{ingress-nginx,alerts,quickwit} openreplay/charts/ - helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f - - env: - DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }} - IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} - ENVIRONMENT: staging - - - name: Alert slack - if: ${{ failure() }} - uses: rtCamp/action-slack-notify@v2 - env: - SLACK_CHANNEL: foss - SLACK_TITLE: "Failed ${{ github.workflow }}" - SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff' - SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }} - SLACK_USERNAME: "OR Bot" - SLACK_MESSAGE: 'Build failed :bomb:' + - name: Alert slack + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_CHANNEL: foss + SLACK_TITLE: "Failed ${{ github.workflow }}" + SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff' + SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }} + SLACK_USERNAME: "OR Bot" + SLACK_MESSAGE: 'Build failed :bomb:' # - name: Debug Job # if: ${{ failure() }} diff --git a/.github/workflows/api-ee.yaml b/.github/workflows/api-ee.yaml index 1405f6e81..d62ff77ed 100644 --- a/.github/workflows/api-ee.yaml +++ b/.github/workflows/api-ee.yaml @@ -12,6 +12,17 @@ on: paths: - ee/api/** - api/** + paths-ignore: + - api/.gitignore + - api/app_alerts.py + - api/*-dev.sh + - api/requirements-*.txt + - ee/api/.gitignore + - ee/api/app_alerts.py + - ee/api/app_crons.py + - ee/api/*-dev.sh + - ee/api/requirements-*.txt + name: Build and Deploy Chalice EE diff --git a/.github/workflows/api.yaml b/.github/workflows/api.yaml index 91d6c45a1..ffe7efc4c 100644 --- a/.github/workflows/api.yaml +++ b/.github/workflows/api.yaml @@ -8,9 +8,14 @@ on: default: 'false' push: branches: - - dev + - api-v1.10.0 paths: - api/** + paths-ignore: + - api/.gitignore + - api/app_alerts.py + - api/*-dev.sh + - api/requirements-*.txt name: Build and Deploy Chalice diff --git a/.github/workflows/crons-ee.yaml b/.github/workflows/crons-ee.yaml index d56440e67..9358ebfa4 100644 --- a/.github/workflows/crons-ee.yaml +++ b/.github/workflows/crons-ee.yaml @@ -12,6 +12,19 @@ on: paths: - ee/api/** - api/** + paths-ignore: + - api/.gitignore + - api/app.py + - api/app_alerts.py + - api/*-dev.sh + - api/requirements.txt + - api/requirements-alerts.txt + - ee/api/.gitignore + - ee/api/app.py + - ee/api/app_alerts.py + - ee/api/*-dev.sh + - ee/api/requirements.txt + - ee/api/requirements-crons.txt name: Build and Deploy Crons EE diff --git a/.github/workflows/peers-ee.yaml b/.github/workflows/peers-ee.yaml new file mode 100644 index 000000000..1b2c03bd9 --- /dev/null +++ b/.github/workflows/peers-ee.yaml @@ -0,0 +1,70 @@ +# This action will push the peers changes to aws +on: + workflow_dispatch: + push: + branches: + - dev + paths: + - ee/peers/** + - peers/** + paths-ignore: + - peers/.gitignore + - peers/*-dev.sh + +name: Build and Deploy Peers + +jobs: + deploy: + name: Deploy + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + # We need to diff with old commit + # to see which workers got changed. + fetch-depth: 2 + + - name: Docker login + run: | + docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}" + + - uses: azure/k8s-set-context@v1 + with: + method: kubeconfig + kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret. + id: setcontext + + - name: Building and Pushing api image + id: build-image + env: + DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} + IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} + ENVIRONMENT: staging + run: | + cd peers + PUSH_IMAGE=1 bash build.sh ee + - name: Deploy to kubernetes + run: | + cd scripts/helmcharts/ + sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.EE_REGISTRY_URL }}\"#g" vars.yaml + sed -i "s#minio_access_key.*#minio_access_key: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\" #g" vars.yaml + sed -i "s#minio_secret_key.*#minio_secret_key: \"${{ secrets.EE_MINIO_SECRET_KEY }}\" #g" vars.yaml + sed -i "s#domain_name.*#domain_name: \"ee.openreplay.com\" #g" vars.yaml + sed -i "s#kubeconfig.*#kubeconfig_path: ${KUBECONFIG}#g" vars.yaml + sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml + bash kube-install.sh --app peers + env: + DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} + IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} + ENVIRONMENT: staging + + # - name: Debug Job + # if: ${{ failure() }} + # uses: mxschmitt/action-tmate@v3 + # env: + # DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} + # IMAGE_TAG: ${{ github.sha }} + # ENVIRONMENT: staging + # diff --git a/.github/workflows/peers.yaml b/.github/workflows/peers.yaml new file mode 100644 index 000000000..6de50b023 --- /dev/null +++ b/.github/workflows/peers.yaml @@ -0,0 +1,69 @@ +# This action will push the peers changes to aws +on: + workflow_dispatch: + push: + branches: + - dev + paths: + - peers/** + paths-ignore: + - peers/.gitignore + - peers/*-dev.sh + +name: Build and Deploy Peers + +jobs: + deploy: + name: Deploy + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + # We need to diff with old commit + # to see which workers got changed. + fetch-depth: 2 + + - name: Docker login + run: | + docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}" + + - uses: azure/k8s-set-context@v1 + with: + method: kubeconfig + kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret. + id: setcontext + + - name: Building and Pushing api image + id: build-image + env: + DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }} + IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} + ENVIRONMENT: staging + run: | + cd peers + PUSH_IMAGE=1 bash build.sh + - name: Deploy to kubernetes + run: | + cd scripts/helmcharts/ + sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml + sed -i "s#minio_access_key.*#minio_access_key: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\" #g" vars.yaml + sed -i "s#minio_secret_key.*#minio_secret_key: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\" #g" vars.yaml + sed -i "s#domain_name.*#domain_name: \"foss.openreplay.com\" #g" vars.yaml + sed -i "s#kubeconfig.*#kubeconfig_path: ${KUBECONFIG}#g" vars.yaml + sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml + bash kube-install.sh --app peers + env: + DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }} + IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} + ENVIRONMENT: staging + + # - name: Debug Job + # if: ${{ failure() }} + # uses: mxschmitt/action-tmate@v3 + # env: + # DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }} + # IMAGE_TAG: ${{ github.sha }} + # ENVIRONMENT: staging + # diff --git a/.github/workflows/sourcemaps-reader.yaml b/.github/workflows/sourcemaps-reader.yaml index 2d8aed9c2..f7e27aaa2 100644 --- a/.github/workflows/sourcemaps-reader.yaml +++ b/.github/workflows/sourcemaps-reader.yaml @@ -6,6 +6,9 @@ on: - dev paths: - sourcemap-reader/** + paths-ignore: + - sourcemap-reader/.gitignore + - sourcemap-reader/*-dev.sh name: Build and Deploy sourcemap-reader diff --git a/.github/workflows/utilities-ee.yaml b/.github/workflows/utilities-ee.yaml new file mode 100644 index 000000000..76682a975 --- /dev/null +++ b/.github/workflows/utilities-ee.yaml @@ -0,0 +1,70 @@ +# This action will push the utilities changes to aws +on: + workflow_dispatch: + push: + branches: + - dev + paths: + - ee/utilities/** + - utilities/*/** + paths-ignore: + - utilities/.gitignore + - utilities/*-dev.sh + +name: Build and Deploy Utilities EE + +jobs: + deploy: + name: Deploy + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + # We need to diff with old commit + # to see which workers got changed. + fetch-depth: 2 + + - name: Docker login + run: | + docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}" + + - uses: azure/k8s-set-context@v1 + with: + method: kubeconfig + kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret. + id: setcontext + + - name: Building and Pushing api image + id: build-image + env: + DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} + IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee + ENVIRONMENT: staging + run: | + cd utilities + PUSH_IMAGE=1 bash build.sh ee + - name: Deploy to kubernetes + run: | + cd scripts/helmcharts/ + sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.EE_REGISTRY_URL }}\"#g" vars.yaml + sed -i "s#minio_access_key.*#minio_access_key: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\" #g" vars.yaml + sed -i "s#minio_secret_key.*#minio_secret_key: \"${{ secrets.EE_MINIO_SECRET_KEY }}\" #g" vars.yaml + sed -i "s#domain_name.*#domain_name: \"ee.openreplay.com\" #g" vars.yaml + sed -i "s#kubeconfig.*#kubeconfig_path: ${EE_KUBECONFIG}#g" vars.yaml + sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml + bash kube-install.sh --app utilities + env: + DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} + IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} + ENVIRONMENT: staging + + # - name: Debug Job + # if: ${{ failure() }} + # uses: mxschmitt/action-tmate@v3 + # env: + # DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} + # IMAGE_TAG: ${{ github.sha }} + # ENVIRONMENT: staging + # diff --git a/.github/workflows/utilities.yaml b/.github/workflows/utilities.yaml index 4a4fad5d3..331b33b3a 100644 --- a/.github/workflows/utilities.yaml +++ b/.github/workflows/utilities.yaml @@ -6,6 +6,9 @@ on: - dev paths: - utilities/** + paths-ignore: + - utilities/.gitignore + - utilities/*-dev.sh name: Build and Deploy Utilities diff --git a/ee/api/clean.sh b/ee/api/clean-dev.sh similarity index 100% rename from ee/api/clean.sh rename to ee/api/clean-dev.sh diff --git a/ee/sourcemap-reader/Readme.md b/ee/sourcemap-reader/Readme.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/ee/utilities/clean.sh b/ee/utilities/clean-dev.sh similarity index 100% rename from ee/utilities/clean.sh rename to ee/utilities/clean-dev.sh diff --git a/sourcemap-reader/clean.sh b/sourcemap-reader/clean-dev.sh similarity index 100% rename from sourcemap-reader/clean.sh rename to sourcemap-reader/clean-dev.sh From a767b6f265bf5a56018c9ad49096e53d4869076f Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Wed, 15 Feb 2023 13:15:02 +0100 Subject: [PATCH 009/151] chore(actions): fixing paths --- .github/workflows/alerts-ee.yaml | 29 ++++++++++++------------ .github/workflows/alerts.yaml | 15 ++++++------ .github/workflows/api-ee.yaml | 23 +++++++++---------- .github/workflows/api.yaml | 11 ++++----- .github/workflows/crons-ee.yaml | 29 ++++++++++++------------ .github/workflows/peers-ee.yaml | 9 ++++---- .github/workflows/peers.yaml | 7 +++--- .github/workflows/sourcemaps-reader.yaml | 7 +++--- .github/workflows/utilities-ee.yaml | 9 ++++---- .github/workflows/utilities.yaml | 7 +++--- 10 files changed, 68 insertions(+), 78 deletions(-) diff --git a/.github/workflows/alerts-ee.yaml b/.github/workflows/alerts-ee.yaml index 1033fb19f..10482a7cb 100644 --- a/.github/workflows/alerts-ee.yaml +++ b/.github/workflows/alerts-ee.yaml @@ -10,21 +10,20 @@ on: branches: - api-v1.10.0 paths: - - ee/api/** - - api/** - paths-ignore: - - api/.gitignore - - api/routers - - api/app.py - - api/*-dev.sh - - api/requirements.txt - - api/requirements-crons.txt - - ee/api/.gitignore - - ee/api/routers - - ee/api/app.py - - ee/api/*-dev.sh - - ee/api/requirements.txt - - ee/api/requirements-crons.txt + - "ee/api/**" + - "api/**" + - "!api/.gitignore" + - "!api/routers" + - "!api/app.py" + - "!api/*-dev.sh" + - "!api/requirements.txt" + - "!api/requirements-crons.txt" + - "!ee/api/.gitignore" + - "!ee/api/routers" + - "!ee/api/app.py" + - "!ee/api/*-dev.sh" + - "!ee/api/requirements.txt" + - "!ee/api/requirements-crons.txt" name: Build and Deploy Alerts EE diff --git a/.github/workflows/alerts.yaml b/.github/workflows/alerts.yaml index f5e2cbe76..539cc5e65 100644 --- a/.github/workflows/alerts.yaml +++ b/.github/workflows/alerts.yaml @@ -10,14 +10,13 @@ on: branches: - api-v1.10.0 paths: - - api/** - paths-ignore: - - api/.gitignore - - api/routers - - api/app.py - - api/*-dev.sh - - api/requirements.txt - - api/requirements-crons.txt + - "api/**" + - "!api/.gitignore" + - "!api/routers" + - "!api/app.py" + - "!api/*-dev.sh" + - "!api/requirements.txt" + - "!api/requirements-crons.txt" name: Build and Deploy Alerts diff --git a/.github/workflows/api-ee.yaml b/.github/workflows/api-ee.yaml index d62ff77ed..b2a31f276 100644 --- a/.github/workflows/api-ee.yaml +++ b/.github/workflows/api-ee.yaml @@ -10,18 +10,17 @@ on: branches: - api-v1.10.0 paths: - - ee/api/** - - api/** - paths-ignore: - - api/.gitignore - - api/app_alerts.py - - api/*-dev.sh - - api/requirements-*.txt - - ee/api/.gitignore - - ee/api/app_alerts.py - - ee/api/app_crons.py - - ee/api/*-dev.sh - - ee/api/requirements-*.txt + - "ee/api/**" + - "api/**" + - "!api/.gitignore" + - "!api/app_alerts.py" + - "!api/*-dev.sh" + - "!api/requirements-*.txt" + - "!ee/api/.gitignore" + - "!ee/api/app_alerts.py" + - "!ee/api/app_crons.py" + - "!ee/api/*-dev.sh" + - "!ee/api/requirements-*.txt" name: Build and Deploy Chalice EE diff --git a/.github/workflows/api.yaml b/.github/workflows/api.yaml index ffe7efc4c..26d59ff87 100644 --- a/.github/workflows/api.yaml +++ b/.github/workflows/api.yaml @@ -10,12 +10,11 @@ on: branches: - api-v1.10.0 paths: - - api/** - paths-ignore: - - api/.gitignore - - api/app_alerts.py - - api/*-dev.sh - - api/requirements-*.txt + - "api/**" + - "!api/.gitignore" + - "!api/app_alerts.py" + - "!api/*-dev.sh" + - "!api/requirements-*.txt" name: Build and Deploy Chalice diff --git a/.github/workflows/crons-ee.yaml b/.github/workflows/crons-ee.yaml index 9358ebfa4..762dae33e 100644 --- a/.github/workflows/crons-ee.yaml +++ b/.github/workflows/crons-ee.yaml @@ -10,21 +10,20 @@ on: branches: - api-v1.10.0 paths: - - ee/api/** - - api/** - paths-ignore: - - api/.gitignore - - api/app.py - - api/app_alerts.py - - api/*-dev.sh - - api/requirements.txt - - api/requirements-alerts.txt - - ee/api/.gitignore - - ee/api/app.py - - ee/api/app_alerts.py - - ee/api/*-dev.sh - - ee/api/requirements.txt - - ee/api/requirements-crons.txt + - "ee/api/**" + - "api/**" + - "!api/.gitignore" + - "!api/app.py" + - "!api/app_alerts.py" + - "!api/*-dev.sh" + - "!api/requirements.txt" + - "!api/requirements-alerts.txt" + - "!ee/api/.gitignore" + - "!ee/api/app.py" + - "!ee/api/app_alerts.py" + - "!ee/api/*-dev.sh" + - "!ee/api/requirements.txt" + - "!ee/api/requirements-crons.txt" name: Build and Deploy Crons EE diff --git a/.github/workflows/peers-ee.yaml b/.github/workflows/peers-ee.yaml index 1b2c03bd9..5db7436da 100644 --- a/.github/workflows/peers-ee.yaml +++ b/.github/workflows/peers-ee.yaml @@ -5,11 +5,10 @@ on: branches: - dev paths: - - ee/peers/** - - peers/** - paths-ignore: - - peers/.gitignore - - peers/*-dev.sh + - "ee/peers/**" + - "peers/**" + - "!peers/.gitignore" + - "!peers/*-dev.sh" name: Build and Deploy Peers diff --git a/.github/workflows/peers.yaml b/.github/workflows/peers.yaml index 6de50b023..7b2a715d8 100644 --- a/.github/workflows/peers.yaml +++ b/.github/workflows/peers.yaml @@ -5,10 +5,9 @@ on: branches: - dev paths: - - peers/** - paths-ignore: - - peers/.gitignore - - peers/*-dev.sh + - "peers/**" + - "!peers/.gitignore" + - "!peers/*-dev.sh" name: Build and Deploy Peers diff --git a/.github/workflows/sourcemaps-reader.yaml b/.github/workflows/sourcemaps-reader.yaml index f7e27aaa2..095a70784 100644 --- a/.github/workflows/sourcemaps-reader.yaml +++ b/.github/workflows/sourcemaps-reader.yaml @@ -5,10 +5,9 @@ on: branches: - dev paths: - - sourcemap-reader/** - paths-ignore: - - sourcemap-reader/.gitignore - - sourcemap-reader/*-dev.sh + - "sourcemap-reader/**" + - "!sourcemap-reader/.gitignore" + - "!sourcemap-reader/*-dev.sh" name: Build and Deploy sourcemap-reader diff --git a/.github/workflows/utilities-ee.yaml b/.github/workflows/utilities-ee.yaml index 76682a975..a589bfb81 100644 --- a/.github/workflows/utilities-ee.yaml +++ b/.github/workflows/utilities-ee.yaml @@ -5,11 +5,10 @@ on: branches: - dev paths: - - ee/utilities/** - - utilities/*/** - paths-ignore: - - utilities/.gitignore - - utilities/*-dev.sh + - "ee/utilities/**" + - "utilities/*/**" + - "!utilities/.gitignore" + - "!utilities/*-dev.sh" name: Build and Deploy Utilities EE diff --git a/.github/workflows/utilities.yaml b/.github/workflows/utilities.yaml index 331b33b3a..b33d86c21 100644 --- a/.github/workflows/utilities.yaml +++ b/.github/workflows/utilities.yaml @@ -5,10 +5,9 @@ on: branches: - dev paths: - - utilities/** - paths-ignore: - - utilities/.gitignore - - utilities/*-dev.sh + - "utilities/**" + - "!utilities/.gitignore" + - "!utilities/*-dev.sh" name: Build and Deploy Utilities From e5ce58438ecc7a4824327ab828a1730a3cd30896 Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Wed, 15 Feb 2023 16:51:38 +0100 Subject: [PATCH 010/151] feat(chalice): limit exp-search to 7 events for then-events --- ee/api/chalicelib/core/sessions_exp.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ee/api/chalicelib/core/sessions_exp.py b/ee/api/chalicelib/core/sessions_exp.py index 35eabad5d..f60090ed4 100644 --- a/ee/api/chalicelib/core/sessions_exp.py +++ b/ee/api/chalicelib/core/sessions_exp.py @@ -1176,6 +1176,9 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu ) {"" if or_events else (f"AS event_{event_index} " + ("ON(TRUE)" if event_index > 0 else ""))}\ """) event_index += 1 + # limit THEN-events to 7 in CH because sequenceMatch cannot take more arguments + if event_index == 7 and data.events_order == schemas.SearchEventOrder._then: + break if event_index < 2: data.events_order = schemas.SearchEventOrder._or From c8eded225f20fdfaa2059129259b933507c373ab Mon Sep 17 00:00:00 2001 From: nick-delirium Date: Wed, 15 Feb 2023 17:10:24 +0100 Subject: [PATCH 011/151] fix(tracker): fix screen recording import --- tracker/tracker-assist/src/Assist.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tracker/tracker-assist/src/Assist.ts b/tracker/tracker-assist/src/Assist.ts index 5fe74b2a2..646360521 100644 --- a/tracker/tracker-assist/src/Assist.ts +++ b/tracker/tracker-assist/src/Assist.ts @@ -12,7 +12,7 @@ import AnnotationCanvas from './AnnotationCanvas.js' import ConfirmWindow from './ConfirmWindow/ConfirmWindow.js' import { callConfirmDefault, } from './ConfirmWindow/defaults.js' import type { Options as ConfirmOptions, } from './ConfirmWindow/defaults.js' -import ScreenRecordingState from './ScreenRecordingState' +import ScreenRecordingState from './ScreenRecordingState.js' // TODO: fully specified strict check with no-any (everywhere) // @ts-ignore From b7fe40ae4a28737fae7c8d8240511fc200edfd5b Mon Sep 17 00:00:00 2001 From: nick-delirium Date: Wed, 15 Feb 2023 17:10:24 +0100 Subject: [PATCH 012/151] fix(tracker): fix screen recording import --- tracker/tracker-assist/src/Assist.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tracker/tracker-assist/src/Assist.ts b/tracker/tracker-assist/src/Assist.ts index 5fe74b2a2..646360521 100644 --- a/tracker/tracker-assist/src/Assist.ts +++ b/tracker/tracker-assist/src/Assist.ts @@ -12,7 +12,7 @@ import AnnotationCanvas from './AnnotationCanvas.js' import ConfirmWindow from './ConfirmWindow/ConfirmWindow.js' import { callConfirmDefault, } from './ConfirmWindow/defaults.js' import type { Options as ConfirmOptions, } from './ConfirmWindow/defaults.js' -import ScreenRecordingState from './ScreenRecordingState' +import ScreenRecordingState from './ScreenRecordingState.js' // TODO: fully specified strict check with no-any (everywhere) // @ts-ignore From b2a5670552414c9affe0e84b858cded193a16d03 Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Wed, 15 Feb 2023 18:18:19 +0100 Subject: [PATCH 013/151] feat(assist): changes --- ee/utilities/package-lock.json | 1180 ++++++++++++++++++++++++++++++++ ee/utilities/package.json | 2 +- utilities/package.json | 2 +- 3 files changed, 1182 insertions(+), 2 deletions(-) create mode 100644 ee/utilities/package-lock.json diff --git a/ee/utilities/package-lock.json b/ee/utilities/package-lock.json new file mode 100644 index 000000000..c90edb001 --- /dev/null +++ b/ee/utilities/package-lock.json @@ -0,0 +1,1180 @@ +{ + "name": "utilities-server", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "utilities-server", + "version": "1.0.0", + "license": "Elastic License 2.0 (ELv2)", + "dependencies": { + "@maxmind/geoip2-node": "^3.5.0", + "@socket.io/redis-adapter": "^8.1.0", + "express": "^4.18.2", + "jsonwebtoken": "^9.0.0", + "redis": "^4.6.4", + "socket.io": "^4.6.0", + "ua-parser-js": "^1.0.33", + "uWebSockets.js": "github:uNetworking/uWebSockets.js#v20.19.0" + } + }, + "node_modules/@maxmind/geoip2-node": { + "version": "3.5.0", + "resolved": "https://registry.npmjs.org/@maxmind/geoip2-node/-/geoip2-node-3.5.0.tgz", + "integrity": "sha512-WG2TNxMwDWDOrljLwyZf5bwiEYubaHuICvQRlgz74lE9OZA/z4o+ZT6OisjDBAZh/yRJVNK6mfHqmP5lLlAwsA==", + "dependencies": { + "camelcase-keys": "^7.0.0", + "ip6addr": "^0.2.5", + "maxmind": "^4.2.0" + } + }, + "node_modules/@redis/bloom": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@redis/bloom/-/bloom-1.2.0.tgz", + "integrity": "sha512-HG2DFjYKbpNmVXsa0keLHp/3leGJz1mjh09f2RLGGLQZzSHpkmZWuwJbAvo3QcRY8p80m5+ZdXZdYOSBLlp7Cg==", + "peerDependencies": { + "@redis/client": "^1.0.0" + } + }, + "node_modules/@redis/client": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@redis/client/-/client-1.5.5.tgz", + "integrity": "sha512-fuMnpDYSjT5JXR9rrCW1YWA4L8N/9/uS4ImT3ZEC/hcaQRI1D/9FvwjriRj1UvepIgzZXthFVKMNRzP/LNL7BQ==", + "dependencies": { + "cluster-key-slot": "1.1.2", + "generic-pool": "3.9.0", + "yallist": "4.0.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/@redis/graph": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@redis/graph/-/graph-1.1.0.tgz", + "integrity": "sha512-16yZWngxyXPd+MJxeSr0dqh2AIOi8j9yXKcKCwVaKDbH3HTuETpDVPcLujhFYVPtYrngSco31BUcSa9TH31Gqg==", + "peerDependencies": { + "@redis/client": "^1.0.0" + } + }, + "node_modules/@redis/json": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@redis/json/-/json-1.0.4.tgz", + "integrity": "sha512-LUZE2Gdrhg0Rx7AN+cZkb1e6HjoSKaeeW8rYnt89Tly13GBI5eP4CwDVr+MY8BAYfCg4/N15OUrtLoona9uSgw==", + "peerDependencies": { + "@redis/client": "^1.0.0" + } + }, + "node_modules/@redis/search": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@redis/search/-/search-1.1.1.tgz", + "integrity": "sha512-pqCXTc5e7wJJgUuJiC3hBgfoFRoPxYzwn0BEfKgejTM7M/9zP3IpUcqcjgfp8hF+LoV8rHZzcNTz7V+pEIY7LQ==", + "peerDependencies": { + "@redis/client": "^1.0.0" + } + }, + "node_modules/@redis/time-series": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@redis/time-series/-/time-series-1.0.4.tgz", + "integrity": "sha512-ThUIgo2U/g7cCuZavucQTQzA9g9JbDDY2f64u3AbAoz/8vE2lt2U37LamDUVChhaDA3IRT9R6VvJwqnUfTJzng==", + "peerDependencies": { + "@redis/client": "^1.0.0" + } + }, + "node_modules/@socket.io/component-emitter": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@socket.io/component-emitter/-/component-emitter-3.1.0.tgz", + "integrity": "sha512-+9jVqKhRSpsc591z5vX+X5Yyw+he/HCB4iQ/RYxw35CEPaY1gnsNE43nf9n9AaYjAQrTiI/mOwKUKdUs9vf7Xg==" + }, + "node_modules/@socket.io/redis-adapter": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@socket.io/redis-adapter/-/redis-adapter-8.1.0.tgz", + "integrity": "sha512-8nGMKcQ+DWpgefxA/Pi25aLajVilRPKwu29mZXu5cT+WGVYItcCkfMr4RsMmyYXUyJf00mN+7WinVLihmJwpXA==", + "dependencies": { + "debug": "~4.3.1", + "notepack.io": "~3.0.1", + "uid2": "1.0.0" + }, + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "socket.io-adapter": "^2.4.0" + } + }, + "node_modules/@types/cookie": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/@types/cookie/-/cookie-0.4.1.tgz", + "integrity": "sha512-XW/Aa8APYr6jSVVA1y/DEIZX0/GMKLEVekNG727R8cs56ahETkRAy/3DR7+fJyh7oUgGwNQaRfXCun0+KbWY7Q==" + }, + "node_modules/@types/cors": { + "version": "2.8.13", + "resolved": "https://registry.npmjs.org/@types/cors/-/cors-2.8.13.tgz", + "integrity": "sha512-RG8AStHlUiV5ysZQKq97copd2UmVYw3/pRMLefISZ3S1hK104Cwm7iLQ3fTKx+lsUH2CE8FlLaYeEA2LSeqYUA==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/node": { + "version": "18.13.0", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.13.0.tgz", + "integrity": "sha512-gC3TazRzGoOnoKAhUx+Q0t8S9Tzs74z7m0ipwGpSqQrleP14hKxP4/JUeEQcD3W1/aIpnWl8pHowI7WokuZpXg==" + }, + "node_modules/accepts": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", + "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", + "dependencies": { + "mime-types": "~2.1.34", + "negotiator": "0.6.3" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/array-flatten": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", + "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==" + }, + "node_modules/assert-plus": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", + "integrity": "sha512-NfJ4UzBCcQGLDlQq7nHxH+tv3kyZ0hHQqF5BO6J7tNJeP5do1llPr8dZ8zHonfhAu0PHAdMkSo+8o0wxg9lZWw==", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/base64id": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/base64id/-/base64id-2.0.0.tgz", + "integrity": "sha512-lGe34o6EHj9y3Kts9R4ZYs/Gr+6N7MCaMlIFA3F1R2O5/m7K06AxfSeO5530PEERE6/WyEg3lsuyw4GHlPZHog==", + "engines": { + "node": "^4.5.0 || >= 5.9" + } + }, + "node_modules/body-parser": { + "version": "1.20.1", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.1.tgz", + "integrity": "sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw==", + "dependencies": { + "bytes": "3.1.2", + "content-type": "~1.0.4", + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "on-finished": "2.4.1", + "qs": "6.11.0", + "raw-body": "2.5.1", + "type-is": "~1.6.18", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/body-parser/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/body-parser/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/buffer-equal-constant-time": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz", + "integrity": "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==" + }, + "node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/call-bind": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", + "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", + "dependencies": { + "function-bind": "^1.1.1", + "get-intrinsic": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/camelcase-keys": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/camelcase-keys/-/camelcase-keys-7.0.2.tgz", + "integrity": "sha512-Rjs1H+A9R+Ig+4E/9oyB66UC5Mj9Xq3N//vcLf2WzgdTi/3gUu3Z9KoqmlrEG4VuuLK8wJHofxzdQXz/knhiYg==", + "dependencies": { + "camelcase": "^6.3.0", + "map-obj": "^4.1.0", + "quick-lru": "^5.1.1", + "type-fest": "^1.2.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cluster-key-slot": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/cluster-key-slot/-/cluster-key-slot-1.1.2.tgz", + "integrity": "sha512-RMr0FhtfXemyinomL4hrWcYJxmX6deFdCxpJzhDttxgO1+bcCnkk+9drydLVDmAMG7NE6aN/fl4F7ucU/90gAA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/content-disposition": { + "version": "0.5.4", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", + "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", + "dependencies": { + "safe-buffer": "5.2.1" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/content-type": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.5.0.tgz", + "integrity": "sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie-signature": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", + "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==" + }, + "node_modules/core-util-is": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", + "integrity": "sha512-3lqz5YjWTYnW6dlDa5TLaTCcShfar1e40rmcJVwCBJC6mWlFuj0eCHIElmG1g5kyuJ/GD+8Wn4FFCcz4gJPfaQ==" + }, + "node_modules/cors": { + "version": "2.8.5", + "resolved": "https://registry.npmjs.org/cors/-/cors-2.8.5.tgz", + "integrity": "sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g==", + "dependencies": { + "object-assign": "^4", + "vary": "^1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/depd": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", + "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/destroy": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", + "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/ecdsa-sig-formatter": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz", + "integrity": "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==", + "dependencies": { + "safe-buffer": "^5.0.1" + } + }, + "node_modules/ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==" + }, + "node_modules/encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/engine.io": { + "version": "6.4.0", + "resolved": "https://registry.npmjs.org/engine.io/-/engine.io-6.4.0.tgz", + "integrity": "sha512-OgxY1c/RuCSeO/rTr8DIFXx76IzUUft86R7/P7MMbbkuzeqJoTNw2lmeD91IyGz41QYleIIjWeMJGgug043sfQ==", + "dependencies": { + "@types/cookie": "^0.4.1", + "@types/cors": "^2.8.12", + "@types/node": ">=10.0.0", + "accepts": "~1.3.4", + "base64id": "2.0.0", + "cookie": "~0.4.1", + "cors": "~2.8.5", + "debug": "~4.3.1", + "engine.io-parser": "~5.0.3", + "ws": "~8.11.0" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/engine.io-parser": { + "version": "5.0.6", + "resolved": "https://registry.npmjs.org/engine.io-parser/-/engine.io-parser-5.0.6.tgz", + "integrity": "sha512-tjuoZDMAdEhVnSFleYPCtdL2GXwVTGtNjoeJd9IhIG3C1xs9uwxqRNEu5WpnDZCaozwVlK/nuQhpodhXSIMaxw==", + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/engine.io/node_modules/cookie": { + "version": "0.4.2", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.4.2.tgz", + "integrity": "sha512-aSWTXFzaKWkvHO1Ny/s+ePFpvKsPnjc551iI41v3ny/ow6tBG5Vd+FuqGNhh1LxOmVzOlGUriIlOaokOvhaStA==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==" + }, + "node_modules/etag": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", + "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/express": { + "version": "4.18.2", + "resolved": "https://registry.npmjs.org/express/-/express-4.18.2.tgz", + "integrity": "sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ==", + "dependencies": { + "accepts": "~1.3.8", + "array-flatten": "1.1.1", + "body-parser": "1.20.1", + "content-disposition": "0.5.4", + "content-type": "~1.0.4", + "cookie": "0.5.0", + "cookie-signature": "1.0.6", + "debug": "2.6.9", + "depd": "2.0.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "finalhandler": "1.2.0", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "merge-descriptors": "1.0.1", + "methods": "~1.1.2", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "path-to-regexp": "0.1.7", + "proxy-addr": "~2.0.7", + "qs": "6.11.0", + "range-parser": "~1.2.1", + "safe-buffer": "5.2.1", + "send": "0.18.0", + "serve-static": "1.15.0", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "type-is": "~1.6.18", + "utils-merge": "1.0.1", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.10.0" + } + }, + "node_modules/express/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/express/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/extsprintf": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", + "integrity": "sha512-11Ndz7Nv+mvAC1j0ktTa7fAb0vLyGGX+rMHNBYQviQDGU0Hw7lhctJANqbPhu9nV9/izT/IntTgZ7Im/9LJs9g==", + "engines": [ + "node >=0.6.0" + ] + }, + "node_modules/finalhandler": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.2.0.tgz", + "integrity": "sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==", + "dependencies": { + "debug": "2.6.9", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "statuses": "2.0.1", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/finalhandler/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/finalhandler/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/forwarded": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", + "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fresh": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", + "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/function-bind": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", + "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" + }, + "node_modules/generic-pool": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/generic-pool/-/generic-pool-3.9.0.tgz", + "integrity": "sha512-hymDOu5B53XvN4QT9dBmZxPX4CWhBPPLguTZ9MMFeFa/Kg0xWVfylOVNlJji/E7yTZWFd/q9GO5TxDLq156D7g==", + "engines": { + "node": ">= 4" + } + }, + "node_modules/get-intrinsic": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.0.tgz", + "integrity": "sha512-L049y6nFOuom5wGyRc3/gdTLO94dySVKRACj1RmJZBQXlbTMhtNIgkWkUHq+jYmZvKf14EW1EoJnnjbmoHij0Q==", + "dependencies": { + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-symbols": "^1.0.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", + "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", + "dependencies": { + "function-bind": "^1.1.1" + }, + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/has-symbols": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", + "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/http-errors": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", + "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", + "dependencies": { + "depd": "2.0.0", + "inherits": "2.0.4", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "toidentifier": "1.0.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, + "node_modules/ip6addr": { + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/ip6addr/-/ip6addr-0.2.5.tgz", + "integrity": "sha512-9RGGSB6Zc9Ox5DpDGFnJdIeF0AsqXzdH+FspCfPPaU/L/4tI6P+5lIoFUFm9JXs9IrJv1boqAaNCQmoDADTSKQ==", + "dependencies": { + "assert-plus": "^1.0.0", + "jsprim": "^2.0.2" + } + }, + "node_modules/ipaddr.js": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", + "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/json-schema": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.4.0.tgz", + "integrity": "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==" + }, + "node_modules/jsonwebtoken": { + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/jsonwebtoken/-/jsonwebtoken-9.0.0.tgz", + "integrity": "sha512-tuGfYXxkQGDPnLJ7SibiQgVgeDgfbPq2k2ICcbgqW8WxWLBAxKQM/ZCu/IT8SOSwmaYl4dpTFCW5xZv7YbbWUw==", + "dependencies": { + "jws": "^3.2.2", + "lodash": "^4.17.21", + "ms": "^2.1.1", + "semver": "^7.3.8" + }, + "engines": { + "node": ">=12", + "npm": ">=6" + } + }, + "node_modules/jsprim": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-2.0.2.tgz", + "integrity": "sha512-gqXddjPqQ6G40VdnI6T6yObEC+pDNvyP95wdQhkWkg7crHH3km5qP1FsOXEkzEQwnz6gz5qGTn1c2Y52wP3OyQ==", + "engines": [ + "node >=0.6.0" + ], + "dependencies": { + "assert-plus": "1.0.0", + "extsprintf": "1.3.0", + "json-schema": "0.4.0", + "verror": "1.10.0" + } + }, + "node_modules/jwa": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/jwa/-/jwa-1.4.1.tgz", + "integrity": "sha512-qiLX/xhEEFKUAJ6FiBMbes3w9ATzyk5W7Hvzpa/SLYdxNtng+gcurvrI7TbACjIXlsJyr05/S1oUhZrc63evQA==", + "dependencies": { + "buffer-equal-constant-time": "1.0.1", + "ecdsa-sig-formatter": "1.0.11", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/jws": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/jws/-/jws-3.2.2.tgz", + "integrity": "sha512-YHlZCB6lMTllWDtSPHz/ZXTsi8S00usEV6v1tjq8tOUZzw7DpSDWVXjXDre6ed1w/pd495ODpHZYSdkRTsa0HA==", + "dependencies": { + "jwa": "^1.4.1", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" + }, + "node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/map-obj": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-4.3.0.tgz", + "integrity": "sha512-hdN1wVrZbb29eBGiGjJbeP8JbKjq1urkHJ/LIP/NY48MZ1QVXUsQBV1G1zvYFHn1XE06cwjBsOI2K3Ulnj1YXQ==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/maxmind": { + "version": "4.3.8", + "resolved": "https://registry.npmjs.org/maxmind/-/maxmind-4.3.8.tgz", + "integrity": "sha512-HrfxEu5yPBPtTy/OT+W5bPQwEfLUX0EHqe2EbJiB47xQMumHqXvSP7PAwzV8Z++NRCmQwy4moQrTSt0+dH+Jmg==", + "dependencies": { + "mmdb-lib": "2.0.2", + "tiny-lru": "9.0.3" + }, + "engines": { + "node": ">=12", + "npm": ">=6" + } + }, + "node_modules/media-typer": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/merge-descriptors": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", + "integrity": "sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==" + }, + "node_modules/methods": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", + "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", + "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mmdb-lib": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/mmdb-lib/-/mmdb-lib-2.0.2.tgz", + "integrity": "sha512-shi1I+fCPQonhTi7qyb6hr7hi87R7YS69FlfJiMFuJ12+grx0JyL56gLNzGTYXPU7EhAPkMLliGeyHer0K+AVA==", + "engines": { + "node": ">=10", + "npm": ">=6" + } + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, + "node_modules/negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/notepack.io": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/notepack.io/-/notepack.io-3.0.1.tgz", + "integrity": "sha512-TKC/8zH5pXIAMVQio2TvVDTtPRX+DJPHDqjRbxogtFiByHyzKmy96RA0JtCQJ+WouyyL4A10xomQzgbUT+1jCg==" + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-inspect": { + "version": "1.12.3", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.3.tgz", + "integrity": "sha512-geUvdk7c+eizMNUDkRpW1wJwgfOiOeHbxBR/hLXK1aT6zmVSO0jsQcs7fj6MGw89jC/cjGfLcNOrtMYtGqm81g==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/on-finished": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", + "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", + "dependencies": { + "ee-first": "1.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/parseurl": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", + "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/path-to-regexp": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", + "integrity": "sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==" + }, + "node_modules/proxy-addr": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", + "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", + "dependencies": { + "forwarded": "0.2.0", + "ipaddr.js": "1.9.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/qs": { + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz", + "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==", + "dependencies": { + "side-channel": "^1.0.4" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/quick-lru": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-5.1.1.tgz", + "integrity": "sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/raw-body": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.1.tgz", + "integrity": "sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig==", + "dependencies": { + "bytes": "3.1.2", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/redis": { + "version": "4.6.4", + "resolved": "https://registry.npmjs.org/redis/-/redis-4.6.4.tgz", + "integrity": "sha512-wi2tgDdQ+Q8q+PR5FLRx4QvDiWaA+PoJbrzsyFqlClN5R4LplHqN3scs/aGjE//mbz++W19SgxiEnQ27jnCRaA==", + "dependencies": { + "@redis/bloom": "1.2.0", + "@redis/client": "1.5.5", + "@redis/graph": "1.1.0", + "@redis/json": "1.0.4", + "@redis/search": "1.1.1", + "@redis/time-series": "1.0.4" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" + }, + "node_modules/semver": { + "version": "7.3.8", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.8.tgz", + "integrity": "sha512-NB1ctGL5rlHrPJtFDVIVzTyQylMLu9N9VICA6HSFJo8MCGVTMW6gfpicwKmmK/dAjTOrqu5l63JJOpDSrAis3A==", + "dependencies": { + "lru-cache": "^6.0.0" + }, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/send": { + "version": "0.18.0", + "resolved": "https://registry.npmjs.org/send/-/send-0.18.0.tgz", + "integrity": "sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==", + "dependencies": { + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "mime": "1.6.0", + "ms": "2.1.3", + "on-finished": "2.4.1", + "range-parser": "~1.2.1", + "statuses": "2.0.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/send/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/send/node_modules/debug/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/send/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" + }, + "node_modules/serve-static": { + "version": "1.15.0", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.15.0.tgz", + "integrity": "sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==", + "dependencies": { + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "parseurl": "~1.3.3", + "send": "0.18.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/setprototypeof": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", + "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==" + }, + "node_modules/side-channel": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", + "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", + "dependencies": { + "call-bind": "^1.0.0", + "get-intrinsic": "^1.0.2", + "object-inspect": "^1.9.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/socket.io": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/socket.io/-/socket.io-4.6.0.tgz", + "integrity": "sha512-b65bp6INPk/BMMrIgVvX12x3Q+NqlGqSlTuvKQWt0BUJ3Hyy3JangBl7fEoWZTXbOKlCqNPbQ6MbWgok/km28w==", + "dependencies": { + "accepts": "~1.3.4", + "base64id": "~2.0.0", + "debug": "~4.3.2", + "engine.io": "~6.4.0", + "socket.io-adapter": "~2.5.2", + "socket.io-parser": "~4.2.1" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/socket.io-adapter": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/socket.io-adapter/-/socket.io-adapter-2.5.2.tgz", + "integrity": "sha512-87C3LO/NOMc+eMcpcxUBebGjkpMDkNBS9tf7KJqcDsmL936EChtVva71Dw2q4tQcuVC+hAUy4an2NO/sYXmwRA==", + "dependencies": { + "ws": "~8.11.0" + } + }, + "node_modules/socket.io-parser": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-4.2.2.tgz", + "integrity": "sha512-DJtziuKypFkMMHCm2uIshOYC7QaylbtzQwiMYDuCKy3OPkjLzu4B2vAhTlqipRHHzrI0NJeBAizTK7X+6m1jVw==", + "dependencies": { + "@socket.io/component-emitter": "~3.1.0", + "debug": "~4.3.1" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/statuses": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", + "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/tiny-lru": { + "version": "9.0.3", + "resolved": "https://registry.npmjs.org/tiny-lru/-/tiny-lru-9.0.3.tgz", + "integrity": "sha512-/i9GruRjXsnDgehxvy6iZ4AFNVxngEFbwzirhdulomMNPGPVV3ECMZOWSw0w4sRMZ9Al9m4jy08GPvRxRUGYlw==", + "engines": { + "node": ">=6" + } + }, + "node_modules/toidentifier": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", + "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/type-fest": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-1.4.0.tgz", + "integrity": "sha512-yGSza74xk0UG8k+pLh5oeoYirvIiWo5t0/o3zHHAO2tRDiZcxWP7fywNlXhqb6/r6sWvwi+RsyQMWhVLe4BVuA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/type-is": { + "version": "1.6.18", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", + "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", + "dependencies": { + "media-typer": "0.3.0", + "mime-types": "~2.1.24" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/ua-parser-js": { + "version": "1.0.33", + "resolved": "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-1.0.33.tgz", + "integrity": "sha512-RqshF7TPTE0XLYAqmjlu5cLLuGdKrNu9O1KLA/qp39QtbZwuzwv1dT46DZSopoUMsYgXpB3Cv8a03FI8b74oFQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/ua-parser-js" + }, + { + "type": "paypal", + "url": "https://paypal.me/faisalman" + } + ], + "engines": { + "node": "*" + } + }, + "node_modules/uid2": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/uid2/-/uid2-1.0.0.tgz", + "integrity": "sha512-+I6aJUv63YAcY9n4mQreLUt0d4lvwkkopDNmpomkAUz0fAkEMV9pRWxN0EjhW1YfRhcuyHg2v3mwddCDW1+LFQ==", + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/unpipe": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", + "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/utils-merge": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", + "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/uWebSockets.js": { + "version": "20.19.0", + "resolved": "git+ssh://git@github.com/uNetworking/uWebSockets.js.git#42c9c0d5d31f46ca4115dc75672b0037ec970f28" + }, + "node_modules/vary": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", + "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/verror": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", + "integrity": "sha512-ZZKSmDAEFOijERBLkmYfJ+vmk3w+7hOLYDNkRCuRuMJGEmqYNCNLyBBFwWKVMhfwaEF3WOd0Zlw86U/WC/+nYw==", + "engines": [ + "node >=0.6.0" + ], + "dependencies": { + "assert-plus": "^1.0.0", + "core-util-is": "1.0.2", + "extsprintf": "^1.2.0" + } + }, + "node_modules/ws": { + "version": "8.11.0", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.11.0.tgz", + "integrity": "sha512-HPG3wQd9sNQoT9xHyNCXoDUa+Xw/VevmY9FoHyQ+g+rrMn4j6FB4np7Z0OhdTgjx6MgQLK7jwSy1YecU1+4Asg==", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": "^5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" + } + } +} diff --git a/ee/utilities/package.json b/ee/utilities/package.json index 2f61c6a95..3fcedf03b 100644 --- a/ee/utilities/package.json +++ b/ee/utilities/package.json @@ -1,5 +1,5 @@ { - "name": "utilities-server", + "name": "assist-server", "version": "1.0.0", "description": "assist server to get live sessions & sourcemaps reader to get stack trace", "main": "peerjs-server.js", diff --git a/utilities/package.json b/utilities/package.json index 11a467947..b06c8cae5 100644 --- a/utilities/package.json +++ b/utilities/package.json @@ -1,5 +1,5 @@ { - "name": "utilities-server", + "name": "assist-server", "version": "1.0.0", "description": "assist server to get live sessions & sourcemaps reader to get stack trace", "main": "peerjs-server.js", From c7b6122c2c64485dd551b8ab44a2f8dd4ce98244 Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Wed, 15 Feb 2023 18:28:43 +0100 Subject: [PATCH 014/151] chore(actions): changing deploy command for assist --- .github/workflows/utilities-ee.yaml | 2 +- .github/workflows/utilities.yaml | 9 +++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/.github/workflows/utilities-ee.yaml b/.github/workflows/utilities-ee.yaml index a589bfb81..81341b5d4 100644 --- a/.github/workflows/utilities-ee.yaml +++ b/.github/workflows/utilities-ee.yaml @@ -10,7 +10,7 @@ on: - "!utilities/.gitignore" - "!utilities/*-dev.sh" -name: Build and Deploy Utilities EE +name: Build and Deploy Assist EE jobs: deploy: diff --git a/.github/workflows/utilities.yaml b/.github/workflows/utilities.yaml index b33d86c21..aa962bfce 100644 --- a/.github/workflows/utilities.yaml +++ b/.github/workflows/utilities.yaml @@ -9,7 +9,7 @@ on: - "!utilities/.gitignore" - "!utilities/*-dev.sh" -name: Build and Deploy Utilities +name: Build and Deploy Assist jobs: deploy: @@ -52,7 +52,12 @@ jobs: sed -i "s#domain_name.*#domain_name: \"foss.openreplay.com\" #g" vars.yaml sed -i "s#kubeconfig.*#kubeconfig_path: ${KUBECONFIG}#g" vars.yaml sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml - bash kube-install.sh --app utilities +# bash kube-install.sh --app utilities + # Deploy command + mv openreplay/charts/{ingress-nginx,assist,quickwit} /tmp + rm -rf openreplay/charts/* + mv /tmp/{ingress-nginx,assist,quickwit} openreplay/charts/ + helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f - env: DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }} IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} From 5beeb149e3982fb5ad02ac04c32b811134a89390 Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Wed, 15 Feb 2023 18:29:45 +0100 Subject: [PATCH 015/151] feat(assist): changes --- .github/workflows/utilities.yaml | 2 +- utilities/Dockerfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/utilities.yaml b/.github/workflows/utilities.yaml index aa962bfce..e862c500c 100644 --- a/.github/workflows/utilities.yaml +++ b/.github/workflows/utilities.yaml @@ -3,7 +3,7 @@ on: workflow_dispatch: push: branches: - - dev + - api-v1.10.0 paths: - "utilities/**" - "!utilities/.gitignore" diff --git a/utilities/Dockerfile b/utilities/Dockerfile index bd47e1c71..8f4d98549 100644 --- a/utilities/Dockerfile +++ b/utilities/Dockerfile @@ -21,4 +21,4 @@ USER 1001 ADD --chown=1001 https://static.openreplay.com/geoip/GeoLite2-Country.mmdb $MAXMINDDB_FILE ENTRYPOINT ["/sbin/tini", "--"] -CMD npm start +CMD npm start \ No newline at end of file From bc1f148ea7eb5f86fe78a49d69849cbf1bb1b1cf Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Wed, 15 Feb 2023 18:31:03 +0100 Subject: [PATCH 016/151] chore(actions): changing deploy command for assist --- .github/workflows/utilities.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/utilities.yaml b/.github/workflows/utilities.yaml index e862c500c..15f7a5f97 100644 --- a/.github/workflows/utilities.yaml +++ b/.github/workflows/utilities.yaml @@ -52,7 +52,7 @@ jobs: sed -i "s#domain_name.*#domain_name: \"foss.openreplay.com\" #g" vars.yaml sed -i "s#kubeconfig.*#kubeconfig_path: ${KUBECONFIG}#g" vars.yaml sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml -# bash kube-install.sh --app utilities + # bash kube-install.sh --app utilities # Deploy command mv openreplay/charts/{ingress-nginx,assist,quickwit} /tmp rm -rf openreplay/charts/* From fb8db2d3e497966d589cfe63e3d896197251ae31 Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Wed, 15 Feb 2023 18:31:36 +0100 Subject: [PATCH 017/151] feat(assist): changes --- utilities/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utilities/Dockerfile b/utilities/Dockerfile index 8f4d98549..bd47e1c71 100644 --- a/utilities/Dockerfile +++ b/utilities/Dockerfile @@ -21,4 +21,4 @@ USER 1001 ADD --chown=1001 https://static.openreplay.com/geoip/GeoLite2-Country.mmdb $MAXMINDDB_FILE ENTRYPOINT ["/sbin/tini", "--"] -CMD npm start \ No newline at end of file +CMD npm start From 02e413d4afea360759226899d32cf01d8082cb31 Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Wed, 15 Feb 2023 18:39:33 +0100 Subject: [PATCH 018/151] chore(actions): changing deploy command for assist --- .github/workflows/utilities.yaml | 7 +------ utilities/Dockerfile | 2 +- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/.github/workflows/utilities.yaml b/.github/workflows/utilities.yaml index 15f7a5f97..82d84cf15 100644 --- a/.github/workflows/utilities.yaml +++ b/.github/workflows/utilities.yaml @@ -52,12 +52,7 @@ jobs: sed -i "s#domain_name.*#domain_name: \"foss.openreplay.com\" #g" vars.yaml sed -i "s#kubeconfig.*#kubeconfig_path: ${KUBECONFIG}#g" vars.yaml sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml - # bash kube-install.sh --app utilities - # Deploy command - mv openreplay/charts/{ingress-nginx,assist,quickwit} /tmp - rm -rf openreplay/charts/* - mv /tmp/{ingress-nginx,assist,quickwit} openreplay/charts/ - helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f - + bash kube-install.sh --app utilities env: DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }} IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} diff --git a/utilities/Dockerfile b/utilities/Dockerfile index bd47e1c71..8f4d98549 100644 --- a/utilities/Dockerfile +++ b/utilities/Dockerfile @@ -21,4 +21,4 @@ USER 1001 ADD --chown=1001 https://static.openreplay.com/geoip/GeoLite2-Country.mmdb $MAXMINDDB_FILE ENTRYPOINT ["/sbin/tini", "--"] -CMD npm start +CMD npm start \ No newline at end of file From b3427abfbe72630cf7439d0bfa0678c0ccd443e3 Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Thu, 16 Feb 2023 11:09:32 +0100 Subject: [PATCH 019/151] chore(actions): changing comments --- .github/workflows/utilities-ee.yaml | 2 +- .github/workflows/utilities.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/utilities-ee.yaml b/.github/workflows/utilities-ee.yaml index 81341b5d4..f9a1ac677 100644 --- a/.github/workflows/utilities-ee.yaml +++ b/.github/workflows/utilities-ee.yaml @@ -1,4 +1,4 @@ -# This action will push the utilities changes to aws +# This action will push the assist changes to aws on: workflow_dispatch: push: diff --git a/.github/workflows/utilities.yaml b/.github/workflows/utilities.yaml index 82d84cf15..7d2792d9b 100644 --- a/.github/workflows/utilities.yaml +++ b/.github/workflows/utilities.yaml @@ -1,4 +1,4 @@ -# This action will push the utilities changes to aws +# This action will push the assist changes to aws on: workflow_dispatch: push: From 71c9a82e93816915b42cf145208dbec4ace45956 Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Thu, 16 Feb 2023 11:31:19 +0100 Subject: [PATCH 020/151] chore(actions): changing paths --- .github/workflows/utilities-ee.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/utilities-ee.yaml b/.github/workflows/utilities-ee.yaml index f9a1ac677..92270dc3d 100644 --- a/.github/workflows/utilities-ee.yaml +++ b/.github/workflows/utilities-ee.yaml @@ -6,6 +6,8 @@ on: - dev paths: - "ee/utilities/**" + - "!ee/utilities/.gitignore" + - "!ee/utilities/*-dev.sh" - "utilities/*/**" - "!utilities/.gitignore" - "!utilities/*-dev.sh" From 553955ccec7d8591375ec207deb800800bbf359a Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Thu, 16 Feb 2023 11:31:48 +0100 Subject: [PATCH 021/151] chore(actions): changing branches --- .github/workflows/utilities-ee.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/utilities-ee.yaml b/.github/workflows/utilities-ee.yaml index 92270dc3d..d28f72968 100644 --- a/.github/workflows/utilities-ee.yaml +++ b/.github/workflows/utilities-ee.yaml @@ -3,7 +3,7 @@ on: workflow_dispatch: push: branches: - - dev + - api-v1.10.0 paths: - "ee/utilities/**" - "!ee/utilities/.gitignore" From 804a3cfc6addff3ff2b6dc6a3da8da3ad4bf6af9 Mon Sep 17 00:00:00 2001 From: rjshrjndrn Date: Thu, 16 Feb 2023 14:46:03 +0100 Subject: [PATCH 022/151] ci(actions): Adding assist GH actions Signed-off-by: rjshrjndrn --- .github/workflows/assist-ee.yaml | 120 ++++++++++++++++++++++++++++ .github/workflows/assist.yaml | 120 ++++++++++++++++++++++++++++ .github/workflows/utilities-ee.yaml | 71 ---------------- .github/workflows/utilities.yaml | 68 ---------------- 4 files changed, 240 insertions(+), 139 deletions(-) create mode 100644 .github/workflows/assist-ee.yaml create mode 100644 .github/workflows/assist.yaml delete mode 100644 .github/workflows/utilities-ee.yaml delete mode 100644 .github/workflows/utilities.yaml diff --git a/.github/workflows/assist-ee.yaml b/.github/workflows/assist-ee.yaml new file mode 100644 index 000000000..78a783dd1 --- /dev/null +++ b/.github/workflows/assist-ee.yaml @@ -0,0 +1,120 @@ +# This action will push the assist changes to aws +on: + workflow_dispatch: + push: + branches: + - dev + paths: + - "ee/utilities/**" + - "utilities/*/**" + - "!utilities/.gitignore" + - "!utilities/*-dev.sh" + +name: Build and Deploy Assist EE + +jobs: + deploy: + name: Deploy + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + # We need to diff with old commit + # to see which workers got changed. + fetch-depth: 2 + + - name: Docker login + run: | + docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}" + + - uses: azure/k8s-set-context@v1 + with: + method: kubeconfig + kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret. + id: setcontext + + - name: Building and Pushing Assist image + id: build-image + env: + DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} + IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee + ENVIRONMENT: staging + run: | + skip_security_checks=${{ github.event.inputs.skip_security_checks }} + cd utilities + PUSH_IMAGE=0 bash -x ./build.sh ee + [[ "x$skip_security_checks" == "xtrue" ]] || { + curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./ + images=("assist") + for image in ${images[*]};do + ./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG + done + err_code=$? + [[ $err_code -ne 0 ]] && { + exit $err_code + } + } && { + echo "Skipping Security Checks" + } + images=("assist") + for image in ${images[*]};do + docker push $DOCKER_REPO/$image:$IMAGE_TAG + done + - name: Creating old image input + run: | + # + # Create yaml with existing image tags + # + kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\ + tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt + + echo > /tmp/image_override.yaml + + for line in `cat /tmp/image_tag.txt`; + do + image_array=($(echo "$line" | tr ':' '\n')) + cat <> /tmp/image_override.yaml + ${image_array[0]}: + image: + # We've to strip off the -ee, as helm will append it. + tag: `echo ${image_array[1]} | cut -d '-' -f 1` + EOF + done + - name: Deploy to kubernetes + run: | + cd scripts/helmcharts/ + + ## Update secerts + sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml + sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml + sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml + sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml + sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml + sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml + sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml + + # Update changed image tag + sed -i "/assist/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml + + cat /tmp/image_override.yaml + # Deploy command + mv openreplay/charts/{ingress-nginx,chalice,quickwit} /tmp + rm -rf openreplay/charts/* + mv /tmp/{ingress-nginx,chalice,quickwit} openreplay/charts/ + helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f - + env: + DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} + # We're not passing -ee flag, because helm will add that. + IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} + ENVIRONMENT: staging + + # - name: Debug Job + # if: ${{ failure() }} + # uses: mxschmitt/action-tmate@v3 + # env: + # DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} + # IMAGE_TAG: ${{ github.sha }} + # ENVIRONMENT: staging + # diff --git a/.github/workflows/assist.yaml b/.github/workflows/assist.yaml new file mode 100644 index 000000000..cf4d184cf --- /dev/null +++ b/.github/workflows/assist.yaml @@ -0,0 +1,120 @@ +# This action will push the assist changes to aws +on: + workflow_dispatch: + push: + branches: + - dev + paths: + - "ee/utilities/**" + - "utilities/*/**" + - "!utilities/.gitignore" + - "!utilities/*-dev.sh" + +name: Build and Deploy Assist EE + +jobs: + deploy: + name: Deploy + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + # We need to diff with old commit + # to see which workers got changed. + fetch-depth: 2 + + - name: Docker login + run: | + docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}" + + - uses: azure/k8s-set-context@v1 + with: + method: kubeconfig + kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret. + id: setcontext + + - name: Building and Pushing Assist image + id: build-image + env: + DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }} + IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee + ENVIRONMENT: staging + run: | + skip_security_checks=${{ github.event.inputs.skip_security_checks }} + cd utilities + PUSH_IMAGE=0 bash -x ./build.sh ee + [[ "x$skip_security_checks" == "xtrue" ]] || { + curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./ + images=("assist") + for image in ${images[*]};do + ./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG + done + err_code=$? + [[ $err_code -ne 0 ]] && { + exit $err_code + } + } && { + echo "Skipping Security Checks" + } + images=("assist") + for image in ${images[*]};do + docker push $DOCKER_REPO/$image:$IMAGE_TAG + done + - name: Creating old image input + run: | + # + # Create yaml with existing image tags + # + kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\ + tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt + + echo > /tmp/image_override.yaml + + for line in `cat /tmp/image_tag.txt`; + do + image_array=($(echo "$line" | tr ':' '\n')) + cat <> /tmp/image_override.yaml + ${image_array[0]}: + image: + # We've to strip off the -ee, as helm will append it. + tag: `echo ${image_array[1]} | cut -d '-' -f 1` + EOF + done + - name: Deploy to kubernetes + run: | + cd scripts/helmcharts/ + + ## Update secerts + sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml + sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml + sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml + sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml + sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml + sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml + sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.OSS_LICENSE_KEY }}\"/g" vars.yaml + + # Update changed image tag + sed -i "/assist/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml + + cat /tmp/image_override.yaml + # Deploy command + mv openreplay/charts/{ingress-nginx,chalice,quickwit} /tmp + rm -rf openreplay/charts/* + mv /tmp/{ingress-nginx,chalice,quickwit} openreplay/charts/ + helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f - + env: + DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }} + # We're not passing -ee flag, because helm will add that. + IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} + ENVIRONMENT: staging + + # - name: Debug Job + # if: ${{ failure() }} + # uses: mxschmitt/action-tmate@v3 + # env: + # DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }} + # IMAGE_TAG: ${{ github.sha }} + # ENVIRONMENT: staging + # diff --git a/.github/workflows/utilities-ee.yaml b/.github/workflows/utilities-ee.yaml deleted file mode 100644 index d28f72968..000000000 --- a/.github/workflows/utilities-ee.yaml +++ /dev/null @@ -1,71 +0,0 @@ -# This action will push the assist changes to aws -on: - workflow_dispatch: - push: - branches: - - api-v1.10.0 - paths: - - "ee/utilities/**" - - "!ee/utilities/.gitignore" - - "!ee/utilities/*-dev.sh" - - "utilities/*/**" - - "!utilities/.gitignore" - - "!utilities/*-dev.sh" - -name: Build and Deploy Assist EE - -jobs: - deploy: - name: Deploy - runs-on: ubuntu-latest - - steps: - - name: Checkout - uses: actions/checkout@v2 - with: - # We need to diff with old commit - # to see which workers got changed. - fetch-depth: 2 - - - name: Docker login - run: | - docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}" - - - uses: azure/k8s-set-context@v1 - with: - method: kubeconfig - kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret. - id: setcontext - - - name: Building and Pushing api image - id: build-image - env: - DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} - IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee - ENVIRONMENT: staging - run: | - cd utilities - PUSH_IMAGE=1 bash build.sh ee - - name: Deploy to kubernetes - run: | - cd scripts/helmcharts/ - sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.EE_REGISTRY_URL }}\"#g" vars.yaml - sed -i "s#minio_access_key.*#minio_access_key: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\" #g" vars.yaml - sed -i "s#minio_secret_key.*#minio_secret_key: \"${{ secrets.EE_MINIO_SECRET_KEY }}\" #g" vars.yaml - sed -i "s#domain_name.*#domain_name: \"ee.openreplay.com\" #g" vars.yaml - sed -i "s#kubeconfig.*#kubeconfig_path: ${EE_KUBECONFIG}#g" vars.yaml - sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml - bash kube-install.sh --app utilities - env: - DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} - IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} - ENVIRONMENT: staging - - # - name: Debug Job - # if: ${{ failure() }} - # uses: mxschmitt/action-tmate@v3 - # env: - # DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} - # IMAGE_TAG: ${{ github.sha }} - # ENVIRONMENT: staging - # diff --git a/.github/workflows/utilities.yaml b/.github/workflows/utilities.yaml deleted file mode 100644 index 7d2792d9b..000000000 --- a/.github/workflows/utilities.yaml +++ /dev/null @@ -1,68 +0,0 @@ -# This action will push the assist changes to aws -on: - workflow_dispatch: - push: - branches: - - api-v1.10.0 - paths: - - "utilities/**" - - "!utilities/.gitignore" - - "!utilities/*-dev.sh" - -name: Build and Deploy Assist - -jobs: - deploy: - name: Deploy - runs-on: ubuntu-latest - - steps: - - name: Checkout - uses: actions/checkout@v2 - with: - # We need to diff with old commit - # to see which workers got changed. - fetch-depth: 2 - - - name: Docker login - run: | - docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}" - - - uses: azure/k8s-set-context@v1 - with: - method: kubeconfig - kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret. - id: setcontext - - - name: Building and Pushing api image - id: build-image - env: - DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }} - IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} - ENVIRONMENT: staging - run: | - cd utilities - PUSH_IMAGE=1 bash build.sh - - name: Deploy to kubernetes - run: | - cd scripts/helmcharts/ - sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml - sed -i "s#minio_access_key.*#minio_access_key: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\" #g" vars.yaml - sed -i "s#minio_secret_key.*#minio_secret_key: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\" #g" vars.yaml - sed -i "s#domain_name.*#domain_name: \"foss.openreplay.com\" #g" vars.yaml - sed -i "s#kubeconfig.*#kubeconfig_path: ${KUBECONFIG}#g" vars.yaml - sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml - bash kube-install.sh --app utilities - env: - DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }} - IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} - ENVIRONMENT: staging - - # - name: Debug Job - # if: ${{ failure() }} - # uses: mxschmitt/action-tmate@v3 - # env: - # DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }} - # IMAGE_TAG: ${{ github.sha }} - # ENVIRONMENT: staging - # From 3d31bab0600d3fbf5b2dd6e8b723c94412f59508 Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Thu, 16 Feb 2023 17:11:25 +0100 Subject: [PATCH 023/151] feat(assist): support missing protocol --- ee/utilities/servers/websocket-cluster.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ee/utilities/servers/websocket-cluster.js b/ee/utilities/servers/websocket-cluster.js index 6aa2bade5..77cfd5067 100644 --- a/ee/utilities/servers/websocket-cluster.js +++ b/ee/utilities/servers/websocket-cluster.js @@ -24,7 +24,7 @@ const { const {createAdapter} = require("@socket.io/redis-adapter"); const {createClient} = require("redis"); const wsRouter = express.Router(); -const REDIS_URL = process.env.REDIS_URL || "redis://localhost:6379"; +const REDIS_URL = (process.env.REDIS_URL || "localhost:6379").replace(/((^\w+:|^)\/\/|^)/, 'redis://'); const pubClient = createClient({url: REDIS_URL}); const subClient = pubClient.duplicate(); console.log(`Using Redis: ${REDIS_URL}`); From 2f2e84f9f6c354f330a0011bf0899842961e68be Mon Sep 17 00:00:00 2001 From: rjshrjndrn Date: Thu, 16 Feb 2023 16:15:58 +0000 Subject: [PATCH 024/151] chore(helm): Updating minio image Signed-off-by: rjshrjndrn --- .../charts/minio/templates/deployment-standalone.yaml | 8 ++++---- scripts/helmcharts/databases/charts/minio/values.yaml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/scripts/helmcharts/databases/charts/minio/templates/deployment-standalone.yaml b/scripts/helmcharts/databases/charts/minio/templates/deployment-standalone.yaml index 23a7232a8..de4af3d90 100755 --- a/scripts/helmcharts/databases/charts/minio/templates/deployment-standalone.yaml +++ b/scripts/helmcharts/databases/charts/minio/templates/deployment-standalone.yaml @@ -75,20 +75,20 @@ spec: - name: MINIO_FORCE_NEW_KEYS value: {{ ternary "yes" "no" .Values.forceNewKeys | quote }} {{- if .Values.useCredentialsFile }} - - name: MINIO_ACCESS_KEY_FILE + - name: MINIO_ROOT_USER_FILE value: "/opt/bitnami/minio/secrets/access-key" {{- else }} - - name: MINIO_ACCESS_KEY + - name: MINIO_ROOT_USER valueFrom: secretKeyRef: name: {{ include "minio.secretName" . }} key: access-key {{- end }} {{- if .Values.useCredentialsFile }} - - name: MINIO_SECRET_KEY_FILE + - name: MINIO_ROOT_PASSWORD_FILE value: "/opt/bitnami/minio/secrets/secret-key" {{- else }} - - name: MINIO_SECRET_KEY + - name: MINIO_ROOT_PASSWORD valueFrom: secretKeyRef: name: {{ include "minio.secretName" . }} diff --git a/scripts/helmcharts/databases/charts/minio/values.yaml b/scripts/helmcharts/databases/charts/minio/values.yaml index 8aee06beb..7062254a7 100755 --- a/scripts/helmcharts/databases/charts/minio/values.yaml +++ b/scripts/helmcharts/databases/charts/minio/values.yaml @@ -21,7 +21,7 @@ global: image: registry: docker.io repository: bitnami/minio - tag: 2020.10.9-debian-10-r6 + tag: 2023.2.10-debian-11-r1 ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images From 161a84b86d3ab8b6053aa2ff36fd7c84dce0a929 Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Thu, 16 Feb 2023 18:25:58 +0100 Subject: [PATCH 025/151] feat(assist): upgrade changes --- ee/utilities/.gitignore | 1 - ee/utilities/run-dev.sh | 6 ++++++ ee/utilities/servers/websocket-cluster.js | 5 +++-- ee/utilities/servers/websocket.js | 2 +- utilities/servers/websocket.js | 2 +- 5 files changed, 11 insertions(+), 5 deletions(-) create mode 100755 ee/utilities/run-dev.sh diff --git a/ee/utilities/.gitignore b/ee/utilities/.gitignore index 8c9dca279..cd68b1ffb 100644 --- a/ee/utilities/.gitignore +++ b/ee/utilities/.gitignore @@ -15,5 +15,4 @@ servers/sourcemaps-server.js /utils/helper.js /utils/assistHelper.js .local -run-dev.sh *.mmdb diff --git a/ee/utilities/run-dev.sh b/ee/utilities/run-dev.sh new file mode 100755 index 000000000..00e8d5a4b --- /dev/null +++ b/ee/utilities/run-dev.sh @@ -0,0 +1,6 @@ +#!/bin/bash +set -a +source .env +set +a + +npm start \ No newline at end of file diff --git a/ee/utilities/servers/websocket-cluster.js b/ee/utilities/servers/websocket-cluster.js index 77cfd5067..fef572a52 100644 --- a/ee/utilities/servers/websocket-cluster.js +++ b/ee/utilities/servers/websocket-cluster.js @@ -283,7 +283,7 @@ module.exports = { wsRouter, start: (server, prefix) => { createSocketIOServer(server, prefix); - io.use(async (socket, next) => await authorizer.check(socket, next)); + // io.use(async (socket, next) => await authorizer.check(socket, next)); io.on('connection', async (socket) => { socket.on(EVENTS_DEFINITION.listen.ERROR, err => errorHandler(EVENTS_DEFINITION.listen.ERROR, err)); debug && console.log(`WS started:${socket.id}, Query:${JSON.stringify(socket.handshake.query)}`); @@ -309,7 +309,8 @@ module.exports = { debug && console.log(`notifying new agent about no SESSIONS`); io.to(socket.id).emit(EVENTS_DEFINITION.emit.NO_SESSIONS); } - await io.of('/').adapter.remoteJoin(socket.id, socket.peerId); + // await io.of('/').adapter.join(socket.id, socket.peerId); + await socket.join(socket.peerId); let rooms = await io.of('/').adapter.allRooms(); if (rooms.has(socket.peerId)) { let connectedSockets = await io.in(socket.peerId).fetchSockets(); diff --git a/ee/utilities/servers/websocket.js b/ee/utilities/servers/websocket.js index bf65789f2..c906b5987 100644 --- a/ee/utilities/servers/websocket.js +++ b/ee/utilities/servers/websocket.js @@ -287,7 +287,7 @@ module.exports = { debug && console.log(`notifying new agent about no SESSIONS`); io.to(socket.id).emit(EVENTS_DEFINITION.emit.NO_SESSIONS); } - socket.join(socket.peerId); + await socket.join(socket.peerId); if (io.sockets.adapter.rooms.get(socket.peerId)) { debug && console.log(`${socket.id} joined room:${socket.peerId}, as:${socket.identity}, members:${io.sockets.adapter.rooms.get(socket.peerId).size}`); } diff --git a/utilities/servers/websocket.js b/utilities/servers/websocket.js index 615390996..f5d029bc2 100644 --- a/utilities/servers/websocket.js +++ b/utilities/servers/websocket.js @@ -268,7 +268,7 @@ module.exports = { debug && console.log(`notifying new agent about no SESSIONS`); io.to(socket.id).emit(EVENTS_DEFINITION.emit.NO_SESSIONS); } - socket.join(socket.peerId); + await socket.join(socket.peerId); if (io.sockets.adapter.rooms.get(socket.peerId)) { debug && console.log(`${socket.id} joined room:${socket.peerId}, as:${socket.identity}, members:${io.sockets.adapter.rooms.get(socket.peerId).size}`); } From f158596e5e5e0f50c52f5bba69c5fcbb5a6f7ba2 Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Thu, 16 Feb 2023 11:31:19 +0100 Subject: [PATCH 026/151] chore(actions): changing paths --- .github/workflows/utilities-ee.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/utilities-ee.yaml b/.github/workflows/utilities-ee.yaml index f9a1ac677..92270dc3d 100644 --- a/.github/workflows/utilities-ee.yaml +++ b/.github/workflows/utilities-ee.yaml @@ -6,6 +6,8 @@ on: - dev paths: - "ee/utilities/**" + - "!ee/utilities/.gitignore" + - "!ee/utilities/*-dev.sh" - "utilities/*/**" - "!utilities/.gitignore" - "!utilities/*-dev.sh" From 1721683eb89b8e8ab47e9499a8d2b37bd1fbc487 Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Thu, 16 Feb 2023 11:31:48 +0100 Subject: [PATCH 027/151] chore(actions): changing branches --- .github/workflows/utilities-ee.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/utilities-ee.yaml b/.github/workflows/utilities-ee.yaml index 92270dc3d..d28f72968 100644 --- a/.github/workflows/utilities-ee.yaml +++ b/.github/workflows/utilities-ee.yaml @@ -3,7 +3,7 @@ on: workflow_dispatch: push: branches: - - dev + - api-v1.10.0 paths: - "ee/utilities/**" - "!ee/utilities/.gitignore" From 15f4f0e45beac0a3159c84b81c6595237164bf24 Mon Sep 17 00:00:00 2001 From: rjshrjndrn Date: Thu, 16 Feb 2023 14:46:03 +0100 Subject: [PATCH 028/151] ci(actions): Adding assist GH actions Signed-off-by: rjshrjndrn --- .github/workflows/assist-ee.yaml | 120 ++++++++++++++++++++++++++++ .github/workflows/assist.yaml | 120 ++++++++++++++++++++++++++++ .github/workflows/utilities-ee.yaml | 71 ---------------- .github/workflows/utilities.yaml | 68 ---------------- 4 files changed, 240 insertions(+), 139 deletions(-) create mode 100644 .github/workflows/assist-ee.yaml create mode 100644 .github/workflows/assist.yaml delete mode 100644 .github/workflows/utilities-ee.yaml delete mode 100644 .github/workflows/utilities.yaml diff --git a/.github/workflows/assist-ee.yaml b/.github/workflows/assist-ee.yaml new file mode 100644 index 000000000..78a783dd1 --- /dev/null +++ b/.github/workflows/assist-ee.yaml @@ -0,0 +1,120 @@ +# This action will push the assist changes to aws +on: + workflow_dispatch: + push: + branches: + - dev + paths: + - "ee/utilities/**" + - "utilities/*/**" + - "!utilities/.gitignore" + - "!utilities/*-dev.sh" + +name: Build and Deploy Assist EE + +jobs: + deploy: + name: Deploy + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + # We need to diff with old commit + # to see which workers got changed. + fetch-depth: 2 + + - name: Docker login + run: | + docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}" + + - uses: azure/k8s-set-context@v1 + with: + method: kubeconfig + kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret. + id: setcontext + + - name: Building and Pushing Assist image + id: build-image + env: + DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} + IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee + ENVIRONMENT: staging + run: | + skip_security_checks=${{ github.event.inputs.skip_security_checks }} + cd utilities + PUSH_IMAGE=0 bash -x ./build.sh ee + [[ "x$skip_security_checks" == "xtrue" ]] || { + curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./ + images=("assist") + for image in ${images[*]};do + ./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG + done + err_code=$? + [[ $err_code -ne 0 ]] && { + exit $err_code + } + } && { + echo "Skipping Security Checks" + } + images=("assist") + for image in ${images[*]};do + docker push $DOCKER_REPO/$image:$IMAGE_TAG + done + - name: Creating old image input + run: | + # + # Create yaml with existing image tags + # + kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\ + tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt + + echo > /tmp/image_override.yaml + + for line in `cat /tmp/image_tag.txt`; + do + image_array=($(echo "$line" | tr ':' '\n')) + cat <> /tmp/image_override.yaml + ${image_array[0]}: + image: + # We've to strip off the -ee, as helm will append it. + tag: `echo ${image_array[1]} | cut -d '-' -f 1` + EOF + done + - name: Deploy to kubernetes + run: | + cd scripts/helmcharts/ + + ## Update secerts + sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml + sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml + sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml + sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml + sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml + sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml + sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml + + # Update changed image tag + sed -i "/assist/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml + + cat /tmp/image_override.yaml + # Deploy command + mv openreplay/charts/{ingress-nginx,chalice,quickwit} /tmp + rm -rf openreplay/charts/* + mv /tmp/{ingress-nginx,chalice,quickwit} openreplay/charts/ + helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f - + env: + DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} + # We're not passing -ee flag, because helm will add that. + IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} + ENVIRONMENT: staging + + # - name: Debug Job + # if: ${{ failure() }} + # uses: mxschmitt/action-tmate@v3 + # env: + # DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} + # IMAGE_TAG: ${{ github.sha }} + # ENVIRONMENT: staging + # diff --git a/.github/workflows/assist.yaml b/.github/workflows/assist.yaml new file mode 100644 index 000000000..cf4d184cf --- /dev/null +++ b/.github/workflows/assist.yaml @@ -0,0 +1,120 @@ +# This action will push the assist changes to aws +on: + workflow_dispatch: + push: + branches: + - dev + paths: + - "ee/utilities/**" + - "utilities/*/**" + - "!utilities/.gitignore" + - "!utilities/*-dev.sh" + +name: Build and Deploy Assist EE + +jobs: + deploy: + name: Deploy + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + # We need to diff with old commit + # to see which workers got changed. + fetch-depth: 2 + + - name: Docker login + run: | + docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}" + + - uses: azure/k8s-set-context@v1 + with: + method: kubeconfig + kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret. + id: setcontext + + - name: Building and Pushing Assist image + id: build-image + env: + DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }} + IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee + ENVIRONMENT: staging + run: | + skip_security_checks=${{ github.event.inputs.skip_security_checks }} + cd utilities + PUSH_IMAGE=0 bash -x ./build.sh ee + [[ "x$skip_security_checks" == "xtrue" ]] || { + curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./ + images=("assist") + for image in ${images[*]};do + ./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG + done + err_code=$? + [[ $err_code -ne 0 ]] && { + exit $err_code + } + } && { + echo "Skipping Security Checks" + } + images=("assist") + for image in ${images[*]};do + docker push $DOCKER_REPO/$image:$IMAGE_TAG + done + - name: Creating old image input + run: | + # + # Create yaml with existing image tags + # + kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\ + tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt + + echo > /tmp/image_override.yaml + + for line in `cat /tmp/image_tag.txt`; + do + image_array=($(echo "$line" | tr ':' '\n')) + cat <> /tmp/image_override.yaml + ${image_array[0]}: + image: + # We've to strip off the -ee, as helm will append it. + tag: `echo ${image_array[1]} | cut -d '-' -f 1` + EOF + done + - name: Deploy to kubernetes + run: | + cd scripts/helmcharts/ + + ## Update secerts + sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml + sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml + sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml + sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml + sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml + sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml + sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.OSS_LICENSE_KEY }}\"/g" vars.yaml + + # Update changed image tag + sed -i "/assist/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml + + cat /tmp/image_override.yaml + # Deploy command + mv openreplay/charts/{ingress-nginx,chalice,quickwit} /tmp + rm -rf openreplay/charts/* + mv /tmp/{ingress-nginx,chalice,quickwit} openreplay/charts/ + helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f - + env: + DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }} + # We're not passing -ee flag, because helm will add that. + IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} + ENVIRONMENT: staging + + # - name: Debug Job + # if: ${{ failure() }} + # uses: mxschmitt/action-tmate@v3 + # env: + # DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }} + # IMAGE_TAG: ${{ github.sha }} + # ENVIRONMENT: staging + # diff --git a/.github/workflows/utilities-ee.yaml b/.github/workflows/utilities-ee.yaml deleted file mode 100644 index d28f72968..000000000 --- a/.github/workflows/utilities-ee.yaml +++ /dev/null @@ -1,71 +0,0 @@ -# This action will push the assist changes to aws -on: - workflow_dispatch: - push: - branches: - - api-v1.10.0 - paths: - - "ee/utilities/**" - - "!ee/utilities/.gitignore" - - "!ee/utilities/*-dev.sh" - - "utilities/*/**" - - "!utilities/.gitignore" - - "!utilities/*-dev.sh" - -name: Build and Deploy Assist EE - -jobs: - deploy: - name: Deploy - runs-on: ubuntu-latest - - steps: - - name: Checkout - uses: actions/checkout@v2 - with: - # We need to diff with old commit - # to see which workers got changed. - fetch-depth: 2 - - - name: Docker login - run: | - docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}" - - - uses: azure/k8s-set-context@v1 - with: - method: kubeconfig - kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret. - id: setcontext - - - name: Building and Pushing api image - id: build-image - env: - DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} - IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee - ENVIRONMENT: staging - run: | - cd utilities - PUSH_IMAGE=1 bash build.sh ee - - name: Deploy to kubernetes - run: | - cd scripts/helmcharts/ - sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.EE_REGISTRY_URL }}\"#g" vars.yaml - sed -i "s#minio_access_key.*#minio_access_key: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\" #g" vars.yaml - sed -i "s#minio_secret_key.*#minio_secret_key: \"${{ secrets.EE_MINIO_SECRET_KEY }}\" #g" vars.yaml - sed -i "s#domain_name.*#domain_name: \"ee.openreplay.com\" #g" vars.yaml - sed -i "s#kubeconfig.*#kubeconfig_path: ${EE_KUBECONFIG}#g" vars.yaml - sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml - bash kube-install.sh --app utilities - env: - DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} - IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} - ENVIRONMENT: staging - - # - name: Debug Job - # if: ${{ failure() }} - # uses: mxschmitt/action-tmate@v3 - # env: - # DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }} - # IMAGE_TAG: ${{ github.sha }} - # ENVIRONMENT: staging - # diff --git a/.github/workflows/utilities.yaml b/.github/workflows/utilities.yaml deleted file mode 100644 index 7d2792d9b..000000000 --- a/.github/workflows/utilities.yaml +++ /dev/null @@ -1,68 +0,0 @@ -# This action will push the assist changes to aws -on: - workflow_dispatch: - push: - branches: - - api-v1.10.0 - paths: - - "utilities/**" - - "!utilities/.gitignore" - - "!utilities/*-dev.sh" - -name: Build and Deploy Assist - -jobs: - deploy: - name: Deploy - runs-on: ubuntu-latest - - steps: - - name: Checkout - uses: actions/checkout@v2 - with: - # We need to diff with old commit - # to see which workers got changed. - fetch-depth: 2 - - - name: Docker login - run: | - docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}" - - - uses: azure/k8s-set-context@v1 - with: - method: kubeconfig - kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret. - id: setcontext - - - name: Building and Pushing api image - id: build-image - env: - DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }} - IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} - ENVIRONMENT: staging - run: | - cd utilities - PUSH_IMAGE=1 bash build.sh - - name: Deploy to kubernetes - run: | - cd scripts/helmcharts/ - sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml - sed -i "s#minio_access_key.*#minio_access_key: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\" #g" vars.yaml - sed -i "s#minio_secret_key.*#minio_secret_key: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\" #g" vars.yaml - sed -i "s#domain_name.*#domain_name: \"foss.openreplay.com\" #g" vars.yaml - sed -i "s#kubeconfig.*#kubeconfig_path: ${KUBECONFIG}#g" vars.yaml - sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml - bash kube-install.sh --app utilities - env: - DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }} - IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} - ENVIRONMENT: staging - - # - name: Debug Job - # if: ${{ failure() }} - # uses: mxschmitt/action-tmate@v3 - # env: - # DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }} - # IMAGE_TAG: ${{ github.sha }} - # ENVIRONMENT: staging - # From a52adbd4f509ff8bf27cbf3ef683e0099a773aed Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Thu, 16 Feb 2023 17:11:25 +0100 Subject: [PATCH 029/151] feat(assist): support missing protocol --- ee/utilities/servers/websocket-cluster.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ee/utilities/servers/websocket-cluster.js b/ee/utilities/servers/websocket-cluster.js index 6aa2bade5..77cfd5067 100644 --- a/ee/utilities/servers/websocket-cluster.js +++ b/ee/utilities/servers/websocket-cluster.js @@ -24,7 +24,7 @@ const { const {createAdapter} = require("@socket.io/redis-adapter"); const {createClient} = require("redis"); const wsRouter = express.Router(); -const REDIS_URL = process.env.REDIS_URL || "redis://localhost:6379"; +const REDIS_URL = (process.env.REDIS_URL || "localhost:6379").replace(/((^\w+:|^)\/\/|^)/, 'redis://'); const pubClient = createClient({url: REDIS_URL}); const subClient = pubClient.duplicate(); console.log(`Using Redis: ${REDIS_URL}`); From 80007f45aee7dc343ef39d081f1d489f02f5e5ee Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Thu, 16 Feb 2023 18:25:58 +0100 Subject: [PATCH 030/151] feat(assist): upgrade changes --- ee/utilities/.gitignore | 1 - ee/utilities/run-dev.sh | 6 ++++++ ee/utilities/servers/websocket-cluster.js | 5 +++-- ee/utilities/servers/websocket.js | 2 +- utilities/servers/websocket.js | 2 +- 5 files changed, 11 insertions(+), 5 deletions(-) create mode 100755 ee/utilities/run-dev.sh diff --git a/ee/utilities/.gitignore b/ee/utilities/.gitignore index 8c9dca279..cd68b1ffb 100644 --- a/ee/utilities/.gitignore +++ b/ee/utilities/.gitignore @@ -15,5 +15,4 @@ servers/sourcemaps-server.js /utils/helper.js /utils/assistHelper.js .local -run-dev.sh *.mmdb diff --git a/ee/utilities/run-dev.sh b/ee/utilities/run-dev.sh new file mode 100755 index 000000000..00e8d5a4b --- /dev/null +++ b/ee/utilities/run-dev.sh @@ -0,0 +1,6 @@ +#!/bin/bash +set -a +source .env +set +a + +npm start \ No newline at end of file diff --git a/ee/utilities/servers/websocket-cluster.js b/ee/utilities/servers/websocket-cluster.js index 77cfd5067..fef572a52 100644 --- a/ee/utilities/servers/websocket-cluster.js +++ b/ee/utilities/servers/websocket-cluster.js @@ -283,7 +283,7 @@ module.exports = { wsRouter, start: (server, prefix) => { createSocketIOServer(server, prefix); - io.use(async (socket, next) => await authorizer.check(socket, next)); + // io.use(async (socket, next) => await authorizer.check(socket, next)); io.on('connection', async (socket) => { socket.on(EVENTS_DEFINITION.listen.ERROR, err => errorHandler(EVENTS_DEFINITION.listen.ERROR, err)); debug && console.log(`WS started:${socket.id}, Query:${JSON.stringify(socket.handshake.query)}`); @@ -309,7 +309,8 @@ module.exports = { debug && console.log(`notifying new agent about no SESSIONS`); io.to(socket.id).emit(EVENTS_DEFINITION.emit.NO_SESSIONS); } - await io.of('/').adapter.remoteJoin(socket.id, socket.peerId); + // await io.of('/').adapter.join(socket.id, socket.peerId); + await socket.join(socket.peerId); let rooms = await io.of('/').adapter.allRooms(); if (rooms.has(socket.peerId)) { let connectedSockets = await io.in(socket.peerId).fetchSockets(); diff --git a/ee/utilities/servers/websocket.js b/ee/utilities/servers/websocket.js index bf65789f2..c906b5987 100644 --- a/ee/utilities/servers/websocket.js +++ b/ee/utilities/servers/websocket.js @@ -287,7 +287,7 @@ module.exports = { debug && console.log(`notifying new agent about no SESSIONS`); io.to(socket.id).emit(EVENTS_DEFINITION.emit.NO_SESSIONS); } - socket.join(socket.peerId); + await socket.join(socket.peerId); if (io.sockets.adapter.rooms.get(socket.peerId)) { debug && console.log(`${socket.id} joined room:${socket.peerId}, as:${socket.identity}, members:${io.sockets.adapter.rooms.get(socket.peerId).size}`); } diff --git a/utilities/servers/websocket.js b/utilities/servers/websocket.js index 615390996..f5d029bc2 100644 --- a/utilities/servers/websocket.js +++ b/utilities/servers/websocket.js @@ -268,7 +268,7 @@ module.exports = { debug && console.log(`notifying new agent about no SESSIONS`); io.to(socket.id).emit(EVENTS_DEFINITION.emit.NO_SESSIONS); } - socket.join(socket.peerId); + await socket.join(socket.peerId); if (io.sockets.adapter.rooms.get(socket.peerId)) { debug && console.log(`${socket.id} joined room:${socket.peerId}, as:${socket.identity}, members:${io.sockets.adapter.rooms.get(socket.peerId).size}`); } From ffdb16d8995e43649f410034b644aaf11d4cd48c Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Thu, 16 Feb 2023 18:58:32 +0100 Subject: [PATCH 031/151] feat(assist): upgrade changes --- ee/utilities/servers/websocket-cluster.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ee/utilities/servers/websocket-cluster.js b/ee/utilities/servers/websocket-cluster.js index fef572a52..e129bfcb6 100644 --- a/ee/utilities/servers/websocket-cluster.js +++ b/ee/utilities/servers/websocket-cluster.js @@ -283,7 +283,7 @@ module.exports = { wsRouter, start: (server, prefix) => { createSocketIOServer(server, prefix); - // io.use(async (socket, next) => await authorizer.check(socket, next)); + io.use(async (socket, next) => await authorizer.check(socket, next)); io.on('connection', async (socket) => { socket.on(EVENTS_DEFINITION.listen.ERROR, err => errorHandler(EVENTS_DEFINITION.listen.ERROR, err)); debug && console.log(`WS started:${socket.id}, Query:${JSON.stringify(socket.handshake.query)}`); From 6535e5c81dc0d8726b448f0630c6a4422372d0c9 Mon Sep 17 00:00:00 2001 From: Shekar Siri Date: Fri, 17 Feb 2023 12:57:29 +0100 Subject: [PATCH 032/151] fix(ui) - alerts list pagination reset --- .../Dashboard/components/Alerts/AlertsList.tsx | 4 ++-- .../Dashboard/components/Alerts/AlertsView.tsx | 16 +++++++++++++++- frontend/app/mstore/alertsStore.ts | 6 ++++++ 3 files changed, 23 insertions(+), 3 deletions(-) diff --git a/frontend/app/components/Dashboard/components/Alerts/AlertsList.tsx b/frontend/app/components/Dashboard/components/Alerts/AlertsList.tsx index e4005098e..d1d4c84ef 100644 --- a/frontend/app/components/Dashboard/components/Alerts/AlertsList.tsx +++ b/frontend/app/components/Dashboard/components/Alerts/AlertsList.tsx @@ -17,10 +17,10 @@ function AlertsList({ siteId }: Props) { const { alertsStore, settingsStore } = useStore(); const { fetchWebhooks, webhooks } = settingsStore const { alerts: alertsList, alertsSearch, fetchList, init } = alertsStore + const page = alertsStore.page; React.useEffect(() => { fetchList(); fetchWebhooks() }, []); const alertsArray = alertsList - const [page, setPage] = React.useState(1); const filteredAlerts = filterList(alertsArray, alertsSearch, ['name'], (item, query) => query.test(item.query.left)) const list = alertsSearch !== '' ? filteredAlerts : alertsArray; @@ -59,7 +59,7 @@ function AlertsList({ siteId }: Props) { setPage(page)} + onPageChange={(page) => alertsStore.updateKey('page', page)} limit={pageSize} debounceRequest={100} /> diff --git a/frontend/app/components/Dashboard/components/Alerts/AlertsView.tsx b/frontend/app/components/Dashboard/components/Alerts/AlertsView.tsx index 631df8e43..544c86f8f 100644 --- a/frontend/app/components/Dashboard/components/Alerts/AlertsView.tsx +++ b/frontend/app/components/Dashboard/components/Alerts/AlertsView.tsx @@ -1,16 +1,30 @@ -import React from 'react'; +import React, { useEffect } from 'react'; import { Button, PageTitle, Icon, Link } from 'UI'; import withPageTitle from 'HOCs/withPageTitle'; import { withSiteId, alertCreate } from 'App/routes'; import AlertsList from './AlertsList'; import AlertsSearch from './AlertsSearch'; +import { useHistory } from 'react-router'; +import { useStore } from 'App/mstore'; interface IAlertsView { siteId: string; } function AlertsView({ siteId }: IAlertsView) { + const history = useHistory(); + const { alertsStore } = useStore(); + + + useEffect(() => { + const unmount = history.listen((location) => { + if (!location.pathname.includes('/alert')) { + alertsStore.updateKey('page', 1); + } + }); + return unmount; + }, [history]); return (
diff --git a/frontend/app/mstore/alertsStore.ts b/frontend/app/mstore/alertsStore.ts index d377af81e..e608c1873 100644 --- a/frontend/app/mstore/alertsStore.ts +++ b/frontend/app/mstore/alertsStore.ts @@ -9,6 +9,7 @@ export default class AlertsStore { // @ts-ignore instance: Alert = new Alert({}, false); loading = false + page: number = 1; constructor() { makeAutoObservable(this); @@ -16,6 +17,11 @@ export default class AlertsStore { changeSearch = (value: string) => { this.alertsSearch = value; + this.page = 1; + } + + updateKey(key: string, value: any) { + this[key] = value } fetchList = async () => { From a9659823a705bf26fc3283dc5ea19d15bc0ae385 Mon Sep 17 00:00:00 2001 From: Shekar Siri Date: Fri, 17 Feb 2023 13:04:59 +0100 Subject: [PATCH 033/151] fix(ui) - seriesName in alert list item --- .../components/Dashboard/components/Alerts/AlertListItem.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx b/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx index e3412bdce..78f2aa24f 100644 --- a/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx +++ b/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx @@ -127,7 +127,7 @@ function AlertListItem(props: Props) { {'When the '} {alert.detectionMethod} {' of '} - {alert.seriesName} + {alert.seriesName || alert.query.left} {' is '} {alert.query.operator} From a5bab0a438217ec253be430777b6bf8e36f4f38c Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Fri, 17 Feb 2023 13:10:34 +0100 Subject: [PATCH 034/151] feat(alerts): fixed no-events-join builder --- api/app_alerts.py | 7 +++++++ api/chalicelib/core/alerts_processor.py | 19 ++++++++++--------- api/run-alerts-dev.sh | 3 +++ ee/api/chalicelib/core/alerts_processor.py | 16 +++++++++------- .../chalicelib/core/alerts_processor_exp.py | 10 ++++++++-- 5 files changed, 37 insertions(+), 18 deletions(-) create mode 100755 api/run-alerts-dev.sh diff --git a/api/app_alerts.py b/api/app_alerts.py index 7107423de..111bad2a1 100644 --- a/api/app_alerts.py +++ b/api/app_alerts.py @@ -53,3 +53,10 @@ async def stop_server(): await shutdown() import os, signal os.kill(1, signal.SIGTERM) + + +if config("LOCAL_DEV", default=False, cast=bool): + @app.get('/private/trigger', tags=["private"]) + async def trigger_main_cron(): + logging.info("Triggering main cron") + alerts_processor.process() diff --git a/api/chalicelib/core/alerts_processor.py b/api/chalicelib/core/alerts_processor.py index 8049b2f39..4babe64ce 100644 --- a/api/chalicelib/core/alerts_processor.py +++ b/api/chalicelib/core/alerts_processor.py @@ -49,10 +49,12 @@ LeftToDb = { schemas.AlertColumn.errors__4xx_5xx__count: { "table": "events.resources INNER JOIN public.sessions USING(session_id)", "formula": "COUNT(session_id)", "condition": "status/100!=2"}, - schemas.AlertColumn.errors__4xx__count: {"table": "events.resources INNER JOIN public.sessions USING(session_id)", - "formula": "COUNT(session_id)", "condition": "status/100=4"}, - schemas.AlertColumn.errors__5xx__count: {"table": "events.resources INNER JOIN public.sessions USING(session_id)", - "formula": "COUNT(session_id)", "condition": "status/100=5"}, + schemas.AlertColumn.errors__4xx__count: { + "table": "events.resources INNER JOIN public.sessions USING(session_id)", + "formula": "COUNT(session_id)", "condition": "status/100=4"}, + schemas.AlertColumn.errors__5xx__count: { + "table": "events.resources INNER JOIN public.sessions USING(session_id)", + "formula": "COUNT(session_id)", "condition": "status/100=5"}, schemas.AlertColumn.errors__javascript__impacted_sessions__count: { "table": "events.resources INNER JOIN public.sessions USING(session_id)", "formula": "COUNT(DISTINCT session_id)", "condition": "success= FALSE AND type='script'"}, @@ -95,7 +97,7 @@ def can_check(a) -> bool: a["options"].get("lastNotification") is None or a["options"]["lastNotification"] <= 0 or ((now - a["options"]["lastNotification"]) > a["options"]["renotifyInterval"] * 60 * 1000)) \ - and ((now - a["createdAt"]) % (TimeInterval[repetitionBase] * 60 * 1000)) < 60 * 1000 + and ((now - a["createdAt"]) % (TimeInterval[repetitionBase] * 60 * 1000)) < 60 * 1000 def Build(a): @@ -119,7 +121,7 @@ def Build(a): subQ = f"""SELECT {colDef["formula"]} AS value FROM {colDef["table"]} WHERE project_id = %(project_id)s - {"AND " + colDef["condition"] if colDef.get("condition") is not None else ""}""" + {"AND " + colDef["condition"] if colDef.get("condition") else ""}""" j_s = colDef.get("joinSessions", True) main_table = colDef["table"] is_ss = main_table == "public.sessions" @@ -142,8 +144,7 @@ def Build(a): "startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000, "timestamp_sub2": TimeUTC.now() - 2 * a["options"]["currentPeriod"] * 60 * 1000} else: - sub1 = f"""{subQ} AND timestamp>=%(startDate)s - AND timestamp<=%(now)s + sub1 = f"""{subQ} {"AND timestamp >= %(startDate)s AND timestamp <= %(now)s" if not is_ss else ""} {"AND start_ts >= %(startDate)s AND start_ts <= %(now)s" if j_s else ""}""" params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000 sub2 = f"""{subQ} {"AND timestamp < %(startDate)s AND timestamp >= %(timestamp_sub2)s" if not is_ss else ""} @@ -206,7 +207,7 @@ def process(): cur = cur.recreate(rollback=True) if len(notifications) > 0: cur.execute( - cur.mogrify(f"""UPDATE public.Alerts + cur.mogrify(f"""UPDATE public.alerts SET options = options||'{{"lastNotification":{TimeUTC.now()}}}'::jsonb WHERE alert_id IN %(ids)s;""", {"ids": tuple([n["alertId"] for n in notifications])})) if len(notifications) > 0: diff --git a/api/run-alerts-dev.sh b/api/run-alerts-dev.sh new file mode 100755 index 000000000..54db30171 --- /dev/null +++ b/api/run-alerts-dev.sh @@ -0,0 +1,3 @@ +#!/bin/zsh + +uvicorn app_alerts:app --reload \ No newline at end of file diff --git a/ee/api/chalicelib/core/alerts_processor.py b/ee/api/chalicelib/core/alerts_processor.py index 69a0f7f5f..06663336c 100644 --- a/ee/api/chalicelib/core/alerts_processor.py +++ b/ee/api/chalicelib/core/alerts_processor.py @@ -54,10 +54,12 @@ LeftToDb = { schemas.AlertColumn.errors__4xx_5xx__count: { "table": "events.resources INNER JOIN public.sessions USING(session_id)", "formula": "COUNT(session_id)", "condition": "status/100!=2"}, - schemas.AlertColumn.errors__4xx__count: {"table": "events.resources INNER JOIN public.sessions USING(session_id)", - "formula": "COUNT(session_id)", "condition": "status/100=4"}, - schemas.AlertColumn.errors__5xx__count: {"table": "events.resources INNER JOIN public.sessions USING(session_id)", - "formula": "COUNT(session_id)", "condition": "status/100=5"}, + schemas.AlertColumn.errors__4xx__count: { + "table": "events.resources INNER JOIN public.sessions USING(session_id)", + "formula": "COUNT(session_id)", "condition": "status/100=4"}, + schemas.AlertColumn.errors__5xx__count: { + "table": "events.resources INNER JOIN public.sessions USING(session_id)", + "formula": "COUNT(session_id)", "condition": "status/100=5"}, schemas.AlertColumn.errors__javascript__impacted_sessions__count: { "table": "events.resources INNER JOIN public.sessions USING(session_id)", "formula": "COUNT(DISTINCT session_id)", "condition": "success= FALSE AND type='script'"}, @@ -100,7 +102,7 @@ def can_check(a) -> bool: a["options"].get("lastNotification") is None or a["options"]["lastNotification"] <= 0 or ((now - a["options"]["lastNotification"]) > a["options"]["renotifyInterval"] * 60 * 1000)) \ - and ((now - a["createdAt"]) % (TimeInterval[repetitionBase] * 60 * 1000)) < 60 * 1000 + and ((now - a["createdAt"]) % (TimeInterval[repetitionBase] * 60 * 1000)) < 60 * 1000 def Build(a): @@ -124,7 +126,7 @@ def Build(a): subQ = f"""SELECT {colDef["formula"]} AS value FROM {colDef["table"]} WHERE project_id = %(project_id)s - {"AND " + colDef["condition"] if colDef.get("condition") is not None else ""}""" + {"AND " + colDef["condition"] if colDef.get("condition") else ""}""" j_s = colDef.get("joinSessions", True) main_table = colDef["table"] is_ss = main_table == "public.sessions" @@ -211,7 +213,7 @@ def process(): cur = cur.recreate(rollback=True) if len(notifications) > 0: cur.execute( - cur.mogrify(f"""UPDATE public.Alerts + cur.mogrify(f"""UPDATE public.alerts SET options = options||'{{"lastNotification":{TimeUTC.now()}}}'::jsonb WHERE alert_id IN %(ids)s;""", {"ids": tuple([n["alertId"] for n in notifications])})) if len(notifications) > 0: diff --git a/ee/api/chalicelib/core/alerts_processor_exp.py b/ee/api/chalicelib/core/alerts_processor_exp.py index 7a300654c..0d8b7753c 100644 --- a/ee/api/chalicelib/core/alerts_processor_exp.py +++ b/ee/api/chalicelib/core/alerts_processor_exp.py @@ -135,7 +135,7 @@ def Build(a): FROM {colDef["table"](now)} WHERE project_id = %(project_id)s {"AND event_type=%(event_type)s" if params["event_type"] else ""} - {"AND " + colDef["condition"] if colDef.get("condition") is not None else ""}""" + {"AND " + colDef["condition"] if colDef.get("condition") else ""}""" q = f"""SELECT coalesce(value,0) AS value, coalesce(value,0) {a["query"]["operator"]} {a["query"]["right"]} AS valid""" @@ -200,7 +200,13 @@ def process(): if alerts_processor.can_check(alert): logging.info(f"Querying alertId:{alert['alertId']} name: {alert['name']}") query, params = Build(alert) - query = ch_cur.format(query, params) + try: + query = ch_cur.format(query, params) + except Exception as e: + logging.error( + f"!!!Error while building alert query for alertId:{alert['alertId']} name: {alert['name']}") + logging.error(e) + continue logging.debug(alert) logging.debug(query) try: From 5d94e72da2836abdcde01bab06aefd03851b6333 Mon Sep 17 00:00:00 2001 From: Shekar Siri Date: Fri, 17 Feb 2023 12:57:29 +0100 Subject: [PATCH 035/151] fix(ui) - alerts list pagination reset --- .../Dashboard/components/Alerts/AlertsList.tsx | 4 ++-- .../Dashboard/components/Alerts/AlertsView.tsx | 16 +++++++++++++++- frontend/app/mstore/alertsStore.ts | 6 ++++++ 3 files changed, 23 insertions(+), 3 deletions(-) diff --git a/frontend/app/components/Dashboard/components/Alerts/AlertsList.tsx b/frontend/app/components/Dashboard/components/Alerts/AlertsList.tsx index e4005098e..d1d4c84ef 100644 --- a/frontend/app/components/Dashboard/components/Alerts/AlertsList.tsx +++ b/frontend/app/components/Dashboard/components/Alerts/AlertsList.tsx @@ -17,10 +17,10 @@ function AlertsList({ siteId }: Props) { const { alertsStore, settingsStore } = useStore(); const { fetchWebhooks, webhooks } = settingsStore const { alerts: alertsList, alertsSearch, fetchList, init } = alertsStore + const page = alertsStore.page; React.useEffect(() => { fetchList(); fetchWebhooks() }, []); const alertsArray = alertsList - const [page, setPage] = React.useState(1); const filteredAlerts = filterList(alertsArray, alertsSearch, ['name'], (item, query) => query.test(item.query.left)) const list = alertsSearch !== '' ? filteredAlerts : alertsArray; @@ -59,7 +59,7 @@ function AlertsList({ siteId }: Props) { setPage(page)} + onPageChange={(page) => alertsStore.updateKey('page', page)} limit={pageSize} debounceRequest={100} /> diff --git a/frontend/app/components/Dashboard/components/Alerts/AlertsView.tsx b/frontend/app/components/Dashboard/components/Alerts/AlertsView.tsx index 631df8e43..544c86f8f 100644 --- a/frontend/app/components/Dashboard/components/Alerts/AlertsView.tsx +++ b/frontend/app/components/Dashboard/components/Alerts/AlertsView.tsx @@ -1,16 +1,30 @@ -import React from 'react'; +import React, { useEffect } from 'react'; import { Button, PageTitle, Icon, Link } from 'UI'; import withPageTitle from 'HOCs/withPageTitle'; import { withSiteId, alertCreate } from 'App/routes'; import AlertsList from './AlertsList'; import AlertsSearch from './AlertsSearch'; +import { useHistory } from 'react-router'; +import { useStore } from 'App/mstore'; interface IAlertsView { siteId: string; } function AlertsView({ siteId }: IAlertsView) { + const history = useHistory(); + const { alertsStore } = useStore(); + + + useEffect(() => { + const unmount = history.listen((location) => { + if (!location.pathname.includes('/alert')) { + alertsStore.updateKey('page', 1); + } + }); + return unmount; + }, [history]); return (
diff --git a/frontend/app/mstore/alertsStore.ts b/frontend/app/mstore/alertsStore.ts index d377af81e..e608c1873 100644 --- a/frontend/app/mstore/alertsStore.ts +++ b/frontend/app/mstore/alertsStore.ts @@ -9,6 +9,7 @@ export default class AlertsStore { // @ts-ignore instance: Alert = new Alert({}, false); loading = false + page: number = 1; constructor() { makeAutoObservable(this); @@ -16,6 +17,11 @@ export default class AlertsStore { changeSearch = (value: string) => { this.alertsSearch = value; + this.page = 1; + } + + updateKey(key: string, value: any) { + this[key] = value } fetchList = async () => { From 1028e2f9482024203920372521dc3655c0a03516 Mon Sep 17 00:00:00 2001 From: Shekar Siri Date: Fri, 17 Feb 2023 13:04:59 +0100 Subject: [PATCH 036/151] fix(ui) - seriesName in alert list item --- .../components/Dashboard/components/Alerts/AlertListItem.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx b/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx index e3412bdce..78f2aa24f 100644 --- a/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx +++ b/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx @@ -127,7 +127,7 @@ function AlertListItem(props: Props) { {'When the '} {alert.detectionMethod} {' of '} - {alert.seriesName} + {alert.seriesName || alert.query.left} {' is '} {alert.query.operator} From b226f2bbb934cf999ccfb9fc8278900404a560e4 Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Fri, 17 Feb 2023 13:24:41 +0100 Subject: [PATCH 037/151] feat(alerts): changes --- ee/api/chalicelib/core/alerts_processor_exp.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ee/api/chalicelib/core/alerts_processor_exp.py b/ee/api/chalicelib/core/alerts_processor_exp.py index 0d8b7753c..37a1b843f 100644 --- a/ee/api/chalicelib/core/alerts_processor_exp.py +++ b/ee/api/chalicelib/core/alerts_processor_exp.py @@ -198,7 +198,6 @@ def process(): if alert["query"]["left"] != "CUSTOM": continue if alerts_processor.can_check(alert): - logging.info(f"Querying alertId:{alert['alertId']} name: {alert['name']}") query, params = Build(alert) try: query = ch_cur.format(query, params) From 5fff5cbad5ee8c59ab703358ea575ed174e9839b Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Fri, 17 Feb 2023 15:24:13 +0100 Subject: [PATCH 038/151] feat(alerts): changes feat(chalice): changes --- ee/api/chalicelib/core/__init__.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/ee/api/chalicelib/core/__init__.py b/ee/api/chalicelib/core/__init__.py index 64529b782..62723d0f1 100644 --- a/ee/api/chalicelib/core/__init__.py +++ b/ee/api/chalicelib/core/__init__.py @@ -6,41 +6,41 @@ logging.basicConfig(level=config("LOGLEVEL", default=logging.INFO)) from . import sessions as sessions_legacy if config("EXP_SESSIONS_SEARCH", cast=bool, default=False): - print(">>> Using experimental sessions search") + logging.info(">>> Using experimental sessions search") from . import sessions_exp as sessions else: from . import sessions as sessions if config("EXP_AUTOCOMPLETE", cast=bool, default=False): - print(">>> Using experimental autocomplete") + logging.info(">>> Using experimental autocomplete") from . import autocomplete_exp as autocomplete else: from . import autocomplete as autocomplete if config("EXP_ERRORS_SEARCH", cast=bool, default=False): - print(">>> Using experimental error search") + logging.info(">>> Using experimental error search") from . import errors as errors_legacy from . import errors_exp as errors if config("EXP_ERRORS_GET", cast=bool, default=False): - print(">>> Using experimental error get") + logging.info(">>> Using experimental error get") else: from . import errors as errors if config("EXP_METRICS", cast=bool, default=False): - print(">>> Using experimental metrics") + logging.info(">>> Using experimental metrics") from . import metrics_exp as metrics else: from . import metrics as metrics if config("EXP_ALERTS", cast=bool, default=False): - print(">>> Using experimental alerts") + logging.info(">>> Using experimental alerts") from . import alerts_processor_exp as alerts_processor else: from . import alerts_processor as alerts_processor if config("EXP_FUNNELS", cast=bool, default=False): - print(">>> Using experimental funnels") + logging.info(">>> Using experimental funnels") if not config("EXP_SESSIONS_SEARCH", cast=bool, default=False): from . import sessions as sessions_legacy @@ -49,4 +49,4 @@ else: from . import significance as significance if config("EXP_RESOURCES", cast=bool, default=False): - print(">>> Using experimental resources for session-replay") + logging.info(">>> Using experimental resources for session-replay") From d5b5b56ba1b02126380700234e50435a3625c0a0 Mon Sep 17 00:00:00 2001 From: Shekar Siri Date: Fri, 17 Feb 2023 15:24:33 +0100 Subject: [PATCH 039/151] change(ui) - show percentage based on trigger option --- .../components/Dashboard/components/Alerts/AlertListItem.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx b/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx index 78f2aa24f..acef2a71c 100644 --- a/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx +++ b/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx @@ -131,7 +131,7 @@ function AlertListItem(props: Props) { {' is '} {alert.query.operator} - {numberWithCommas(alert.query.right)} {alert.metric?.unit} + {numberWithCommas(alert.query.right)} {alert.change === 'percent' ? '%' : alert.metric?.unit} {' over the past '} {getThreshold( From a8c2539ffdf881cff7924ac86086a78de0ca4283 Mon Sep 17 00:00:00 2001 From: Shekar Siri Date: Fri, 17 Feb 2023 15:29:07 +0100 Subject: [PATCH 040/151] change(ui) - unit space --- .../components/Dashboard/components/Alerts/AlertListItem.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx b/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx index acef2a71c..024cc734c 100644 --- a/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx +++ b/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx @@ -131,7 +131,7 @@ function AlertListItem(props: Props) { {' is '} {alert.query.operator} - {numberWithCommas(alert.query.right)} {alert.change === 'percent' ? '%' : alert.metric?.unit} + {numberWithCommas(alert.query.right)}{alert.change === 'percent' ? '%' : alert.metric?.unit} {' over the past '} {getThreshold( From 0fdcefe6e9455d0999b35ac4ee0ae29bb5fa7fca Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Fri, 17 Feb 2023 15:38:56 +0100 Subject: [PATCH 041/151] chore(actions): changes --- .github/workflows/assist.yaml | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/.github/workflows/assist.yaml b/.github/workflows/assist.yaml index cf4d184cf..c599d5cbd 100644 --- a/.github/workflows/assist.yaml +++ b/.github/workflows/assist.yaml @@ -5,12 +5,11 @@ on: branches: - dev paths: - - "ee/utilities/**" - "utilities/*/**" - "!utilities/.gitignore" - "!utilities/*-dev.sh" -name: Build and Deploy Assist EE +name: Build and Deploy Assist jobs: deploy: @@ -21,7 +20,7 @@ jobs: - name: Checkout uses: actions/checkout@v2 with: - # We need to diff with old commit + # We need to diff with old commit # to see which workers got changed. fetch-depth: 2 @@ -39,12 +38,12 @@ jobs: id: build-image env: DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }} - IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee + IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }} ENVIRONMENT: staging run: | skip_security_checks=${{ github.event.inputs.skip_security_checks }} cd utilities - PUSH_IMAGE=0 bash -x ./build.sh ee + PUSH_IMAGE=0 bash -x ./build.sh [[ "x$skip_security_checks" == "xtrue" ]] || { curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./ images=("assist") From f8b8db3332ce561706f8399d2acd9b36a210b87f Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Fri, 17 Feb 2023 15:55:40 +0100 Subject: [PATCH 042/151] feat(alerts): fixed exp-alerts with legacy-sessions-search --- ee/api/chalicelib/core/alerts_processor_exp.py | 3 ++- ee/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql | 1 - scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql | 1 - 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/ee/api/chalicelib/core/alerts_processor_exp.py b/ee/api/chalicelib/core/alerts_processor_exp.py index 37a1b843f..310c6faa9 100644 --- a/ee/api/chalicelib/core/alerts_processor_exp.py +++ b/ee/api/chalicelib/core/alerts_processor_exp.py @@ -4,9 +4,10 @@ from decouple import config import schemas from chalicelib.core import alerts_listener, alerts_processor -from chalicelib.core import sessions, alerts +from chalicelib.core import alerts from chalicelib.utils import pg_client, ch_client, exp_ch_helper from chalicelib.utils.TimeUTC import TimeUTC +from chalicelib.core import sessions_exp as sessions logging.basicConfig(level=config("LOGLEVEL", default=logging.INFO)) diff --git a/ee/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql b/ee/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql index 30961fc88..2dd8815cc 100644 --- a/ee/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql +++ b/ee/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql @@ -138,7 +138,6 @@ ALTER TABLE IF EXISTS projects ADD COLUMN IF NOT EXISTS beacon_size integer NOT NULL DEFAULT 0; -- To migrate saved search data --- SET client_min_messages TO NOTICE; -- SET client_min_messages TO NOTICE; CREATE OR REPLACE FUNCTION get_new_event_key(key text) diff --git a/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql b/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql index 76d6dd88b..8b5ee748f 100644 --- a/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql +++ b/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql @@ -112,7 +112,6 @@ ALTER TABLE IF EXISTS projects ADD COLUMN IF NOT EXISTS beacon_size integer NOT NULL DEFAULT 0; -- To migrate saved search data --- SET client_min_messages TO NOTICE; -- SET client_min_messages TO NOTICE; CREATE OR REPLACE FUNCTION get_new_event_key(key text) From cf781d14171fe20b2dc7c5e131f5f1bf7da3dae7 Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Fri, 17 Feb 2023 16:48:34 +0100 Subject: [PATCH 043/151] feat(chalice): filters-events manual-split --- api/schemas.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/api/schemas.py b/api/schemas.py index ab057426a..683c05943 100644 --- a/api/schemas.py +++ b/api/schemas.py @@ -750,7 +750,8 @@ class SessionsSearchPayloadSchema(_PaginatedSchema): class FlatSessionsSearch(BaseModel): events: Optional[List[_SessionSearchEventSchema]] = Field([]) - filters: List[Union[SessionSearchFilterSchema, _SessionSearchEventSchema]] = Field([]) + # filters: List[Union[SessionSearchFilterSchema, _SessionSearchEventSchema]] = Field([]) + filters: List[SessionSearchFilterSchema] = Field([]) @root_validator(pre=True) def flat_to_original(cls, values): From c153e321db421a8f2610ee48224953565dcd5522 Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Fri, 17 Feb 2023 17:09:17 +0100 Subject: [PATCH 044/151] feat(chalice): filters-events un-manual-split --- api/schemas.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/api/schemas.py b/api/schemas.py index 683c05943..ab057426a 100644 --- a/api/schemas.py +++ b/api/schemas.py @@ -750,8 +750,7 @@ class SessionsSearchPayloadSchema(_PaginatedSchema): class FlatSessionsSearch(BaseModel): events: Optional[List[_SessionSearchEventSchema]] = Field([]) - # filters: List[Union[SessionSearchFilterSchema, _SessionSearchEventSchema]] = Field([]) - filters: List[SessionSearchFilterSchema] = Field([]) + filters: List[Union[SessionSearchFilterSchema, _SessionSearchEventSchema]] = Field([]) @root_validator(pre=True) def flat_to_original(cls, values): From 8ee5839c1e8f57b9eaef2381223a2fb0c844a70c Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Fri, 17 Feb 2023 18:11:27 +0100 Subject: [PATCH 045/151] feat(DB): migrate metric_series to new format --- .../db/init_dbs/postgresql/1.10.0/1.10.0.sql | 101 +++++++++++++++++- .../db/init_dbs/postgresql/1.10.0/1.10.0.sql | 101 +++++++++++++++++- 2 files changed, 200 insertions(+), 2 deletions(-) diff --git a/ee/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql b/ee/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql index 2dd8815cc..68b115e46 100644 --- a/ee/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql +++ b/ee/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql @@ -139,7 +139,7 @@ ALTER TABLE IF EXISTS projects -- To migrate saved search data --- SET client_min_messages TO NOTICE; +SET client_min_messages TO NOTICE; CREATE OR REPLACE FUNCTION get_new_event_key(key text) RETURNS text AS $$ @@ -325,6 +325,105 @@ $$ $$ LANGUAGE plpgsql; + +-- To migrate saved metric_series data +DO +$$ + DECLARE + row RECORD; + events_att JSONB; + event_filters_att JSONB; + filters_att JSONB; + element JSONB; + s_element JSONB; + new_value TEXT; + new_events JSONB[]; + new_filters JSONB[]; + new_event_filters JSONB[]; + changed BOOLEAN; + planned_update JSONB[]; + BEGIN + planned_update := '{}'::jsonb[]; + FOR row IN SELECT * FROM metric_series + LOOP + -- Transform events attributes + events_att := row.filter -> 'events'; + IF events_att IS NOT NULL THEN + new_events := '{}'::jsonb[]; + FOR element IN SELECT jsonb_array_elements(events_att) + LOOP + changed := FALSE; + new_value := get_new_event_key(element ->> 'type'); + if new_value IS NOT NULL THEN + changed := TRUE; + new_value := replace(new_value, '"', ''); + element := element || jsonb_build_object('type', new_value); + END IF; + -- Transform event's sub-filters attributes + event_filters_att := element -> 'filters'; + new_event_filters := '{}'::jsonb[]; + IF event_filters_att IS NOT NULL AND jsonb_array_length(event_filters_att) > 0 THEN + FOR s_element IN SELECT jsonb_array_elements(event_filters_att) + LOOP + new_value := get_new_event_filter_key(s_element ->> 'type'); + if new_value IS NOT NULL THEN + changed := TRUE; + new_value := replace(new_value, '"', ''); + s_element := s_element || jsonb_build_object('type', new_value); + new_event_filters := array_append(new_event_filters, s_element); + END IF; + END LOOP; + element := element || jsonb_build_object('filters', new_event_filters); + END IF; + IF changed THEN + new_events := array_append(new_events, element); + END IF; + END LOOP; + IF array_length(new_events, 1) > 0 THEN + row.filter := row.filter || jsonb_build_object('events', new_events); + END IF; + END IF; + + -- Transform filters attributes + filters_att := row.filter -> 'filters'; + IF filters_att IS NOT NULL THEN + new_filters := '{}'::jsonb; + FOR element IN SELECT jsonb_array_elements(filters_att) + LOOP + new_value := get_new_filter_key(element ->> 'type'); + if new_value IS NOT NULL THEN + new_value := replace(new_value, '"', ''); + element := element || jsonb_build_object('type', new_value); + new_filters := array_append(new_filters, element); + END IF; + END LOOP; + IF array_length(new_filters, 1) > 0 THEN + row.filter := row.filter || jsonb_build_object('filters', new_filters); + END IF; + END IF; + + IF array_length(new_events, 1) > 0 OR array_length(new_filters, 1) > 0 THEN + planned_update := array_append(planned_update, + jsonb_build_object('id', row.series_id, 'change', row.filter)); + END IF; + END LOOP; + + -- Update metric_series + IF array_length(planned_update, 1) > 0 THEN + raise notice 'must update % elements',array_length(planned_update, 1); + + UPDATE metric_series + SET filter=changes.change -> 'change' + FROM (SELECT unnest(planned_update)) AS changes(change) + WHERE series_id = (changes.change -> 'id')::integer; + raise notice 'update done'; + ELSE + raise notice 'nothing to update'; + END IF; + END ; +$$ +LANGUAGE plpgsql; + DROP FUNCTION get_new_filter_key; DROP FUNCTION get_new_event_filter_key; DROP FUNCTION get_new_event_key; diff --git a/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql b/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql index 8b5ee748f..b9f0380c4 100644 --- a/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql +++ b/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql @@ -113,7 +113,7 @@ ALTER TABLE IF EXISTS projects -- To migrate saved search data --- SET client_min_messages TO NOTICE; +SET client_min_messages TO NOTICE; CREATE OR REPLACE FUNCTION get_new_event_key(key text) RETURNS text AS $$ @@ -299,6 +299,105 @@ $$ $$ LANGUAGE plpgsql; + +-- To migrate saved metric_series data +DO +$$ + DECLARE + row RECORD; + events_att JSONB; + event_filters_att JSONB; + filters_att JSONB; + element JSONB; + s_element JSONB; + new_value TEXT; + new_events JSONB[]; + new_filters JSONB[]; + new_event_filters JSONB[]; + changed BOOLEAN; + planned_update JSONB[]; + BEGIN + planned_update := '{}'::jsonb[]; + FOR row IN SELECT * FROM metric_series + LOOP + -- Transform events attributes + events_att := row.filter -> 'events'; + IF events_att IS NOT NULL THEN + new_events := '{}'::jsonb[]; + FOR element IN SELECT jsonb_array_elements(events_att) + LOOP + changed := FALSE; + new_value := get_new_event_key(element ->> 'type'); + if new_value IS NOT NULL THEN + changed := TRUE; + new_value := replace(new_value, '"', ''); + element := element || jsonb_build_object('type', new_value); + END IF; + -- Transform event's sub-filters attributes + event_filters_att := element -> 'filters'; + new_event_filters := '{}'::jsonb[]; + IF event_filters_att IS NOT NULL AND jsonb_array_length(event_filters_att) > 0 THEN + FOR s_element IN SELECT jsonb_array_elements(event_filters_att) + LOOP + new_value := get_new_event_filter_key(s_element ->> 'type'); + if new_value IS NOT NULL THEN + changed := TRUE; + new_value := replace(new_value, '"', ''); + s_element := s_element || jsonb_build_object('type', new_value); + new_event_filters := array_append(new_event_filters, s_element); + END IF; + END LOOP; + element := element || jsonb_build_object('filters', new_event_filters); + END IF; + IF changed THEN + new_events := array_append(new_events, element); + END IF; + END LOOP; + IF array_length(new_events, 1) > 0 THEN + row.filter := row.filter || jsonb_build_object('events', new_events); + END IF; + END IF; + + -- Transform filters attributes + filters_att := row.filter -> 'filters'; + IF filters_att IS NOT NULL THEN + new_filters := '{}'::jsonb; + FOR element IN SELECT jsonb_array_elements(filters_att) + LOOP + new_value := get_new_filter_key(element ->> 'type'); + if new_value IS NOT NULL THEN + new_value := replace(new_value, '"', ''); + element := element || jsonb_build_object('type', new_value); + new_filters := array_append(new_filters, element); + END IF; + END LOOP; + IF array_length(new_filters, 1) > 0 THEN + row.filter := row.filter || jsonb_build_object('filters', new_filters); + END IF; + END IF; + + IF array_length(new_events, 1) > 0 OR array_length(new_filters, 1) > 0 THEN + planned_update := array_append(planned_update, + jsonb_build_object('id', row.series_id, 'change', row.filter)); + END IF; + END LOOP; + + -- Update metric_series + IF array_length(planned_update, 1) > 0 THEN + raise notice 'must update % elements',array_length(planned_update, 1); + + UPDATE metric_series + SET filter=changes.change -> 'change' + FROM (SELECT unnest(planned_update)) AS changes(change) + WHERE series_id = (changes.change -> 'id')::integer; + raise notice 'update done'; + ELSE + raise notice 'nothing to update'; + END IF; + END ; +$$ +LANGUAGE plpgsql; + DROP FUNCTION get_new_filter_key; DROP FUNCTION get_new_event_filter_key; DROP FUNCTION get_new_event_key; From 2b0f95cbbb2a6f5cde1042874b7faea51074060e Mon Sep 17 00:00:00 2001 From: Shekar Siri Date: Mon, 20 Feb 2023 10:35:57 +0100 Subject: [PATCH 046/151] fix(ui) - search filters update --- .../Filters/FilterSource/FilterSource.tsx | 6 ---- .../shared/SessionSearch/SessionSearch.tsx | 28 ++++++++++++++----- 2 files changed, 21 insertions(+), 13 deletions(-) diff --git a/frontend/app/components/shared/Filters/FilterSource/FilterSource.tsx b/frontend/app/components/shared/Filters/FilterSource/FilterSource.tsx index eed1e6e1d..08c93d8df 100644 --- a/frontend/app/components/shared/Filters/FilterSource/FilterSource.tsx +++ b/frontend/app/components/shared/Filters/FilterSource/FilterSource.tsx @@ -1,7 +1,6 @@ import { FilterType } from 'App/types/filter/filterType'; import React, { useState, useEffect } from 'react'; import stl from './FilterSource.module.css'; -import { debounce } from 'App/utils'; import cn from 'classnames'; interface Props { @@ -11,16 +10,11 @@ interface Props { function FilterSource(props: Props) { const { filter } = props; const [value, setValue] = useState(filter.source[0] || ''); - const debounceUpdate: any = React.useCallback(debounce(props.onUpdate, 1000), [props.onUpdate]); useEffect(() => { setValue(filter.source[0] || ''); }, [filter]); - useEffect(() => { - debounceUpdate({ ...filter, source: [value] }); - }, [value]); - const write = ({ target: { value, name } }: any) => setValue(value); const renderFiled = () => { diff --git a/frontend/app/components/shared/SessionSearch/SessionSearch.tsx b/frontend/app/components/shared/SessionSearch/SessionSearch.tsx index 48856d929..84fb770a8 100644 --- a/frontend/app/components/shared/SessionSearch/SessionSearch.tsx +++ b/frontend/app/components/shared/SessionSearch/SessionSearch.tsx @@ -1,24 +1,32 @@ -import React from 'react'; +import React, { useEffect } from 'react'; import FilterList from 'Shared/Filters/FilterList'; import FilterSelection from 'Shared/Filters/FilterSelection'; import SaveFilterButton from 'Shared/SaveFilterButton'; import { connect } from 'react-redux'; import { Button } from 'UI'; -import { edit, addFilter } from 'Duck/search'; +import { edit, addFilter, fetchSessions, updateFilter } from 'Duck/search'; import SessionSearchQueryParamHandler from 'Shared/SessionSearchQueryParamHandler'; +import { debounce } from 'App/utils'; + +let debounceFetch: any = () => {} + interface Props { appliedFilter: any; edit: typeof edit; addFilter: typeof addFilter; saveRequestPayloads: boolean; metaLoading?: boolean + fetchSessions: typeof fetchSessions; + updateFilter: typeof updateFilter; } function SessionSearch(props: Props) { const { appliedFilter, saveRequestPayloads = false, metaLoading } = props; const hasEvents = appliedFilter.filters.filter((i: any) => i.isEvent).size > 0; const hasFilters = appliedFilter.filters.filter((i: any) => !i.isEvent).size > 0; - + useEffect(() => { + debounceFetch = debounce(() => props.fetchSessions(), 500); + }, []) const onAddFilter = (filter: any) => { props.addFilter(filter); @@ -33,10 +41,12 @@ function SessionSearch(props: Props) { } }); - props.edit({ + props.updateFilter({ ...appliedFilter, filters: newFilters, }); + + debounceFetch() }; const onRemoveFilter = (filterIndex: any) => { @@ -44,15 +54,19 @@ function SessionSearch(props: Props) { return i !== filterIndex; }); - props.edit({ + props.updateFilter({ filters: newFilters, }); + + debounceFetch() }; const onChangeEventsOrder = (e: any, { value }: any) => { - props.edit({ + props.updateFilter({ eventsOrder: value, }); + + debounceFetch() }; return !metaLoading && ( @@ -102,5 +116,5 @@ export default connect( appliedFilter: state.getIn(['search', 'instance']), metaLoading: state.getIn(['customFields', 'fetchRequestActive', 'loading']) }), - { edit, addFilter } + { edit, addFilter, fetchSessions, updateFilter } )(SessionSearch); From 1518fbf594b125bd0a91a115071a078de6801c10 Mon Sep 17 00:00:00 2001 From: Shekar Siri Date: Fri, 17 Feb 2023 15:24:33 +0100 Subject: [PATCH 047/151] change(ui) - show percentage based on trigger option --- .../components/Dashboard/components/Alerts/AlertListItem.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx b/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx index 78f2aa24f..acef2a71c 100644 --- a/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx +++ b/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx @@ -131,7 +131,7 @@ function AlertListItem(props: Props) { {' is '} {alert.query.operator} - {numberWithCommas(alert.query.right)} {alert.metric?.unit} + {numberWithCommas(alert.query.right)} {alert.change === 'percent' ? '%' : alert.metric?.unit} {' over the past '} {getThreshold( From 2953282b21fbb11687dffaa07fb3305ba57ab91f Mon Sep 17 00:00:00 2001 From: Shekar Siri Date: Fri, 17 Feb 2023 15:29:07 +0100 Subject: [PATCH 048/151] change(ui) - unit space --- .../components/Dashboard/components/Alerts/AlertListItem.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx b/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx index acef2a71c..024cc734c 100644 --- a/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx +++ b/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx @@ -131,7 +131,7 @@ function AlertListItem(props: Props) { {' is '} {alert.query.operator} - {numberWithCommas(alert.query.right)} {alert.change === 'percent' ? '%' : alert.metric?.unit} + {numberWithCommas(alert.query.right)}{alert.change === 'percent' ? '%' : alert.metric?.unit} {' over the past '} {getThreshold( From 6302ff4df4df02600534fcf68b6e86093373ce87 Mon Sep 17 00:00:00 2001 From: Shekar Siri Date: Mon, 20 Feb 2023 10:35:57 +0100 Subject: [PATCH 049/151] fix(ui) - search filters update --- .../Filters/FilterSource/FilterSource.tsx | 6 ---- .../shared/SessionSearch/SessionSearch.tsx | 28 ++++++++++++++----- 2 files changed, 21 insertions(+), 13 deletions(-) diff --git a/frontend/app/components/shared/Filters/FilterSource/FilterSource.tsx b/frontend/app/components/shared/Filters/FilterSource/FilterSource.tsx index eed1e6e1d..08c93d8df 100644 --- a/frontend/app/components/shared/Filters/FilterSource/FilterSource.tsx +++ b/frontend/app/components/shared/Filters/FilterSource/FilterSource.tsx @@ -1,7 +1,6 @@ import { FilterType } from 'App/types/filter/filterType'; import React, { useState, useEffect } from 'react'; import stl from './FilterSource.module.css'; -import { debounce } from 'App/utils'; import cn from 'classnames'; interface Props { @@ -11,16 +10,11 @@ interface Props { function FilterSource(props: Props) { const { filter } = props; const [value, setValue] = useState(filter.source[0] || ''); - const debounceUpdate: any = React.useCallback(debounce(props.onUpdate, 1000), [props.onUpdate]); useEffect(() => { setValue(filter.source[0] || ''); }, [filter]); - useEffect(() => { - debounceUpdate({ ...filter, source: [value] }); - }, [value]); - const write = ({ target: { value, name } }: any) => setValue(value); const renderFiled = () => { diff --git a/frontend/app/components/shared/SessionSearch/SessionSearch.tsx b/frontend/app/components/shared/SessionSearch/SessionSearch.tsx index 48856d929..84fb770a8 100644 --- a/frontend/app/components/shared/SessionSearch/SessionSearch.tsx +++ b/frontend/app/components/shared/SessionSearch/SessionSearch.tsx @@ -1,24 +1,32 @@ -import React from 'react'; +import React, { useEffect } from 'react'; import FilterList from 'Shared/Filters/FilterList'; import FilterSelection from 'Shared/Filters/FilterSelection'; import SaveFilterButton from 'Shared/SaveFilterButton'; import { connect } from 'react-redux'; import { Button } from 'UI'; -import { edit, addFilter } from 'Duck/search'; +import { edit, addFilter, fetchSessions, updateFilter } from 'Duck/search'; import SessionSearchQueryParamHandler from 'Shared/SessionSearchQueryParamHandler'; +import { debounce } from 'App/utils'; + +let debounceFetch: any = () => {} + interface Props { appliedFilter: any; edit: typeof edit; addFilter: typeof addFilter; saveRequestPayloads: boolean; metaLoading?: boolean + fetchSessions: typeof fetchSessions; + updateFilter: typeof updateFilter; } function SessionSearch(props: Props) { const { appliedFilter, saveRequestPayloads = false, metaLoading } = props; const hasEvents = appliedFilter.filters.filter((i: any) => i.isEvent).size > 0; const hasFilters = appliedFilter.filters.filter((i: any) => !i.isEvent).size > 0; - + useEffect(() => { + debounceFetch = debounce(() => props.fetchSessions(), 500); + }, []) const onAddFilter = (filter: any) => { props.addFilter(filter); @@ -33,10 +41,12 @@ function SessionSearch(props: Props) { } }); - props.edit({ + props.updateFilter({ ...appliedFilter, filters: newFilters, }); + + debounceFetch() }; const onRemoveFilter = (filterIndex: any) => { @@ -44,15 +54,19 @@ function SessionSearch(props: Props) { return i !== filterIndex; }); - props.edit({ + props.updateFilter({ filters: newFilters, }); + + debounceFetch() }; const onChangeEventsOrder = (e: any, { value }: any) => { - props.edit({ + props.updateFilter({ eventsOrder: value, }); + + debounceFetch() }; return !metaLoading && ( @@ -102,5 +116,5 @@ export default connect( appliedFilter: state.getIn(['search', 'instance']), metaLoading: state.getIn(['customFields', 'fetchRequestActive', 'loading']) }), - { edit, addFilter } + { edit, addFilter, fetchSessions, updateFilter } )(SessionSearch); From 76f971237f899879714e7b1c63ddf692a6d46ba2 Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Mon, 20 Feb 2023 11:09:50 +0100 Subject: [PATCH 050/151] feat(alerts): fixed no join constraint --- ee/api/chalicelib/core/alerts_processor.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ee/api/chalicelib/core/alerts_processor.py b/ee/api/chalicelib/core/alerts_processor.py index 06663336c..17e4d275f 100644 --- a/ee/api/chalicelib/core/alerts_processor.py +++ b/ee/api/chalicelib/core/alerts_processor.py @@ -149,8 +149,7 @@ def Build(a): "startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000, "timestamp_sub2": TimeUTC.now() - 2 * a["options"]["currentPeriod"] * 60 * 1000} else: - sub1 = f"""{subQ} AND timestamp>=%(startDate)s - AND timestamp<=%(now)s + sub1 = f"""{subQ} {"AND timestamp >= %(startDate)s AND timestamp <= %(now)s" if not is_ss else ""} {"AND start_ts >= %(startDate)s AND start_ts <= %(now)s" if j_s else ""}""" params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000 sub2 = f"""{subQ} {"AND timestamp < %(startDate)s AND timestamp >= %(timestamp_sub2)s" if not is_ss else ""} From 1b675b8e400858261aad81840b9e7f3074eca233 Mon Sep 17 00:00:00 2001 From: Shekar Siri Date: Mon, 20 Feb 2023 11:24:57 +0100 Subject: [PATCH 051/151] fix(ui) - search filters update --- frontend/app/utils/search.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/app/utils/search.ts b/frontend/app/utils/search.ts index 82b3daee1..017a5a7f6 100644 --- a/frontend/app/utils/search.ts +++ b/frontend/app/utils/search.ts @@ -80,7 +80,7 @@ const getFiltersFromEntries = (entires: any) => { filter.value = valueArr; filter.operator = operator; - filter.source = sourceArr; + filter.source = sourceArr && sourceArr.length > 0 ? sourceArr : null; filter.sourceOperator = !!sourceOperator ? decodeURI(sourceOperator) : null; if (!filter.filters || filter.filters.size === 0) { filters.push(filter); From 9a8d43a323968b52ceea2a48e72d5f993a8e2c29 Mon Sep 17 00:00:00 2001 From: Shekar Siri Date: Mon, 20 Feb 2023 11:24:57 +0100 Subject: [PATCH 052/151] fix(ui) - search filters update --- frontend/app/utils/search.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/app/utils/search.ts b/frontend/app/utils/search.ts index 82b3daee1..017a5a7f6 100644 --- a/frontend/app/utils/search.ts +++ b/frontend/app/utils/search.ts @@ -80,7 +80,7 @@ const getFiltersFromEntries = (entires: any) => { filter.value = valueArr; filter.operator = operator; - filter.source = sourceArr; + filter.source = sourceArr && sourceArr.length > 0 ? sourceArr : null; filter.sourceOperator = !!sourceOperator ? decodeURI(sourceOperator) : null; if (!filter.filters || filter.filters.size === 0) { filters.push(filter); From d7c2052ac0da038dcb3afabb9c8e59ca95208de5 Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Mon, 20 Feb 2023 14:43:01 +0100 Subject: [PATCH 053/151] feat(DB): changes --- ee/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql | 2 +- scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ee/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql b/ee/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql index 68b115e46..c6060201a 100644 --- a/ee/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql +++ b/ee/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql @@ -326,7 +326,7 @@ $$ LANGUAGE plpgsql; --- To migrate saved metric_series data +-- To migrate metric_series data DO $$ DECLARE diff --git a/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql b/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql index b9f0380c4..a490d0943 100644 --- a/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql +++ b/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql @@ -300,7 +300,7 @@ $$ LANGUAGE plpgsql; --- To migrate saved metric_series data +-- To migrate metric_series data DO $$ DECLARE From 68c9f30200bef762462b839b93911df3ae5a6c3a Mon Sep 17 00:00:00 2001 From: nick-delirium Date: Mon, 20 Feb 2023 15:30:45 +0100 Subject: [PATCH 054/151] fix(ui): keep share message after sharing --- frontend/app/components/shared/SharePopup/SharePopup.js | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/frontend/app/components/shared/SharePopup/SharePopup.js b/frontend/app/components/shared/SharePopup/SharePopup.js index 984ce0060..07726d14a 100644 --- a/frontend/app/components/shared/SharePopup/SharePopup.js +++ b/frontend/app/components/shared/SharePopup/SharePopup.js @@ -80,8 +80,8 @@ export default class SharePopup extends React.PureComponent { handleSuccess = (endpoint) => { const obj = endpoint === 'Slack' - ? { isOpen: false, comment: '', loadingSlack: false } - : { isOpen: false, comment: '', loadingTeams: false }; + ? { loadingSlack: false } + : { loadingTeams: false }; this.setState(obj); toast.success(`Sent to ${endpoint}.`); }; @@ -109,7 +109,7 @@ export default class SharePopup extends React.PureComponent { return ( this.setState({ isOpen: true })} - onClose={() => this.setState({ isOpen: false })} + onClose={() => this.setState({ isOpen: false, comment: '' })} render={() => (
{this.state.loadingTeams || this.state.loadingSlack ? ( From 2fae4549d665ff5d04642b2f8e24e70d57baa7a2 Mon Sep 17 00:00:00 2001 From: Shekar Siri Date: Mon, 20 Feb 2023 16:37:19 +0100 Subject: [PATCH 055/151] fix(ui) - cards list filter by dashboard --- .../components/Dashboard/components/MetricsList/MetricsList.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/app/components/Dashboard/components/MetricsList/MetricsList.tsx b/frontend/app/components/Dashboard/components/MetricsList/MetricsList.tsx index f2639d37f..1b9f7dfc9 100644 --- a/frontend/app/components/Dashboard/components/MetricsList/MetricsList.tsx +++ b/frontend/app/components/Dashboard/components/MetricsList/MetricsList.tsx @@ -21,7 +21,7 @@ function MetricsList({ const dashboard = dashboardStore.selectedDashboard; const existingCardIds = useMemo(() => dashboard?.widgets?.map(i => parseInt(i.metricId)), [dashboard]); - const cards = useMemo(() => metricStore.filteredCards.filter(i => !existingCardIds?.includes(parseInt(i.metricId))), [metricStore.filteredCards]); + const cards = useMemo(() => !!onSelectionChange ? metricStore.filteredCards.filter(i => !existingCardIds?.includes(parseInt(i.metricId))) : metricStore.filteredCards, [metricStore.filteredCards]); useEffect(() => { metricStore.fetchList(); From fdd28dbc4ae9e9b59a591afad5577679b3113e57 Mon Sep 17 00:00:00 2001 From: Alexander Date: Mon, 20 Feb 2023 16:37:55 +0100 Subject: [PATCH 056/151] Draft: New metrics module (#982) * feat(backend): created new metrics module --- backend/cmd/assets/main.go | 19 +- backend/cmd/db/main.go | 13 +- backend/cmd/ender/main.go | 17 +- backend/cmd/http/main.go | 23 ++- backend/cmd/integrations/main.go | 18 +- backend/cmd/sink/main.go | 36 ++-- backend/cmd/storage/main.go | 15 +- backend/internal/assets/cacher/cacher.go | 47 ++--- backend/internal/http/router/handlers-ios.go | 37 ++-- backend/internal/http/router/handlers-web.go | 65 +++---- backend/internal/http/router/handlers.go | 6 +- backend/internal/http/router/response.go | 33 +++- backend/internal/http/router/router.go | 44 +---- backend/internal/sessionender/ender.go | 45 ++--- backend/internal/sink/assetscache/assets.go | 82 +++----- backend/internal/storage/storage.go | 143 ++++---------- backend/pkg/db/postgres/batches.go | 65 ++----- backend/pkg/db/postgres/bulk.go | 55 ++---- backend/pkg/db/postgres/bulks.go | 33 ++-- backend/pkg/db/postgres/connector.go | 49 +---- backend/pkg/db/postgres/pool.go | 73 +++----- backend/pkg/messages/iterator-sink.go | 3 + backend/pkg/messages/iterator.go | 5 +- backend/pkg/metrics/assets/metrics.go | 72 ++++++++ backend/pkg/metrics/common/metrics.go | 11 ++ backend/pkg/metrics/database/metrics.go | 127 +++++++++++++ backend/pkg/metrics/ender/metrics.go | 51 +++++ backend/pkg/metrics/http/metrics.go | 55 ++++++ backend/pkg/metrics/server.go | 40 ++++ backend/pkg/metrics/sink/metrics.go | 185 +++++++++++++++++++ backend/pkg/metrics/storage/metrics.go | 114 ++++++++++++ ee/backend/pkg/db/clickhouse/bulk.go | 16 +- ee/backend/pkg/db/clickhouse/connector.go | 26 +-- 33 files changed, 1021 insertions(+), 602 deletions(-) create mode 100644 backend/pkg/metrics/assets/metrics.go create mode 100644 backend/pkg/metrics/common/metrics.go create mode 100644 backend/pkg/metrics/database/metrics.go create mode 100644 backend/pkg/metrics/ender/metrics.go create mode 100644 backend/pkg/metrics/http/metrics.go create mode 100644 backend/pkg/metrics/server.go create mode 100644 backend/pkg/metrics/sink/metrics.go create mode 100644 backend/pkg/metrics/storage/metrics.go diff --git a/backend/cmd/assets/main.go b/backend/cmd/assets/main.go index b41dedd87..b05ecbe52 100644 --- a/backend/cmd/assets/main.go +++ b/backend/cmd/assets/main.go @@ -1,9 +1,7 @@ package main import ( - "context" "log" - "openreplay/backend/pkg/pprof" "os" "os/signal" "syscall" @@ -13,12 +11,16 @@ import ( "openreplay/backend/internal/assets/cacher" config "openreplay/backend/internal/config/assets" "openreplay/backend/pkg/messages" - "openreplay/backend/pkg/monitoring" + "openreplay/backend/pkg/metrics" + assetsMetrics "openreplay/backend/pkg/metrics/assets" + "openreplay/backend/pkg/pprof" "openreplay/backend/pkg/queue" ) func main() { - metrics := monitoring.New("assets") + m := metrics.New() + m.Register(assetsMetrics.List()) + log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) cfg := config.New() @@ -26,18 +28,13 @@ func main() { pprof.StartProfilingServer() } - cacher := cacher.NewCacher(cfg, metrics) - - totalAssets, err := metrics.RegisterCounter("assets_total") - if err != nil { - log.Printf("can't create assets_total metric: %s", err) - } + cacher := cacher.NewCacher(cfg) msgHandler := func(msg messages.Message) { switch m := msg.(type) { case *messages.AssetCache: cacher.CacheURL(m.SessionID(), m.URL) - totalAssets.Add(context.Background(), 1) + assetsMetrics.IncreaseProcessesSessions() // TODO: connect to "raw" topic in order to listen for JSException case *messages.JSException: sourceList, err := assets.ExtractJSExceptionSources(&m.Payload) diff --git a/backend/cmd/db/main.go b/backend/cmd/db/main.go index f9440a908..84b0d81ed 100644 --- a/backend/cmd/db/main.go +++ b/backend/cmd/db/main.go @@ -3,8 +3,6 @@ package main import ( "errors" "log" - types2 "openreplay/backend/pkg/db/types" - "openreplay/backend/pkg/pprof" "os" "os/signal" "syscall" @@ -14,16 +12,21 @@ import ( "openreplay/backend/internal/db/datasaver" "openreplay/backend/pkg/db/cache" "openreplay/backend/pkg/db/postgres" + types2 "openreplay/backend/pkg/db/types" "openreplay/backend/pkg/handlers" custom2 "openreplay/backend/pkg/handlers/custom" "openreplay/backend/pkg/messages" - "openreplay/backend/pkg/monitoring" + "openreplay/backend/pkg/metrics" + databaseMetrics "openreplay/backend/pkg/metrics/database" + "openreplay/backend/pkg/pprof" "openreplay/backend/pkg/queue" "openreplay/backend/pkg/sessions" ) func main() { - metrics := monitoring.New("db") + m := metrics.New() + m.Register(databaseMetrics.List()) + log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) cfg := db.New() @@ -33,7 +36,7 @@ func main() { // Init database pg := cache.NewPGCache( - postgres.NewConn(cfg.Postgres.String(), cfg.BatchQueueLimit, cfg.BatchSizeLimit, metrics), cfg.ProjectExpirationTimeoutMs) + postgres.NewConn(cfg.Postgres.String(), cfg.BatchQueueLimit, cfg.BatchSizeLimit), cfg.ProjectExpirationTimeoutMs) defer pg.Close() // HandlersFabric returns the list of message handlers we want to be applied to each incoming message. diff --git a/backend/cmd/ender/main.go b/backend/cmd/ender/main.go index 74b0b8bd2..da7ca9b89 100644 --- a/backend/cmd/ender/main.go +++ b/backend/cmd/ender/main.go @@ -2,8 +2,6 @@ package main import ( "log" - "openreplay/backend/internal/storage" - "openreplay/backend/pkg/pprof" "os" "os/signal" "strings" @@ -12,16 +10,23 @@ import ( "openreplay/backend/internal/config/ender" "openreplay/backend/internal/sessionender" + "openreplay/backend/internal/storage" "openreplay/backend/pkg/db/cache" "openreplay/backend/pkg/db/postgres" "openreplay/backend/pkg/intervals" "openreplay/backend/pkg/messages" - "openreplay/backend/pkg/monitoring" + "openreplay/backend/pkg/metrics" + databaseMetrics "openreplay/backend/pkg/metrics/database" + enderMetrics "openreplay/backend/pkg/metrics/ender" + "openreplay/backend/pkg/pprof" "openreplay/backend/pkg/queue" ) func main() { - metrics := monitoring.New("ender") + m := metrics.New() + m.Register(enderMetrics.List()) + m.Register(databaseMetrics.List()) + log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) cfg := ender.New() @@ -29,10 +34,10 @@ func main() { pprof.StartProfilingServer() } - pg := cache.NewPGCache(postgres.NewConn(cfg.Postgres.String(), 0, 0, metrics), cfg.ProjectExpirationTimeoutMs) + pg := cache.NewPGCache(postgres.NewConn(cfg.Postgres.String(), 0, 0), cfg.ProjectExpirationTimeoutMs) defer pg.Close() - sessions, err := sessionender.New(metrics, intervals.EVENTS_SESSION_END_TIMEOUT, cfg.PartitionsNumber) + sessions, err := sessionender.New(intervals.EVENTS_SESSION_END_TIMEOUT, cfg.PartitionsNumber) if err != nil { log.Printf("can't init ender service: %s", err) return diff --git a/backend/cmd/http/main.go b/backend/cmd/http/main.go index 4fb82b635..83eedaf29 100644 --- a/backend/cmd/http/main.go +++ b/backend/cmd/http/main.go @@ -2,23 +2,28 @@ package main import ( "log" - "openreplay/backend/internal/config/http" - "openreplay/backend/internal/http/router" - "openreplay/backend/internal/http/server" - "openreplay/backend/internal/http/services" - "openreplay/backend/pkg/monitoring" - "openreplay/backend/pkg/pprof" "os" "os/signal" "syscall" + "openreplay/backend/internal/config/http" + "openreplay/backend/internal/http/router" + "openreplay/backend/internal/http/server" + "openreplay/backend/internal/http/services" "openreplay/backend/pkg/db/cache" "openreplay/backend/pkg/db/postgres" + "openreplay/backend/pkg/metrics" + databaseMetrics "openreplay/backend/pkg/metrics/database" + httpMetrics "openreplay/backend/pkg/metrics/http" + "openreplay/backend/pkg/pprof" "openreplay/backend/pkg/queue" ) func main() { - metrics := monitoring.New("http") + m := metrics.New() + m.Register(httpMetrics.List()) + m.Register(databaseMetrics.List()) + log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) cfg := http.New() @@ -31,14 +36,14 @@ func main() { defer producer.Close(15000) // Connect to database - dbConn := cache.NewPGCache(postgres.NewConn(cfg.Postgres.String(), 0, 0, metrics), 1000*60*20) + dbConn := cache.NewPGCache(postgres.NewConn(cfg.Postgres.String(), 0, 0), 1000*60*20) defer dbConn.Close() // Build all services services := services.New(cfg, producer, dbConn) // Init server's routes - router, err := router.NewRouter(cfg, services, metrics) + router, err := router.NewRouter(cfg, services) if err != nil { log.Fatalf("failed while creating engine: %s", err) } diff --git a/backend/cmd/integrations/main.go b/backend/cmd/integrations/main.go index 8c6d56966..3fa07ee9c 100644 --- a/backend/cmd/integrations/main.go +++ b/backend/cmd/integrations/main.go @@ -2,24 +2,26 @@ package main import ( "log" - config "openreplay/backend/internal/config/integrations" - "openreplay/backend/internal/integrations/clientManager" - "openreplay/backend/pkg/monitoring" - "openreplay/backend/pkg/pprof" - "time" - "os" "os/signal" "syscall" + "time" + config "openreplay/backend/internal/config/integrations" + "openreplay/backend/internal/integrations/clientManager" "openreplay/backend/pkg/db/postgres" "openreplay/backend/pkg/intervals" + "openreplay/backend/pkg/metrics" + databaseMetrics "openreplay/backend/pkg/metrics/database" + "openreplay/backend/pkg/pprof" "openreplay/backend/pkg/queue" "openreplay/backend/pkg/token" ) func main() { - metrics := monitoring.New("integrations") + m := metrics.New() + m.Register(databaseMetrics.List()) + log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) cfg := config.New() @@ -27,7 +29,7 @@ func main() { pprof.StartProfilingServer() } - pg := postgres.NewConn(cfg.Postgres.String(), 0, 0, metrics) + pg := postgres.NewConn(cfg.Postgres.String(), 0, 0) defer pg.Close() tokenizer := token.NewTokenizer(cfg.TokenSecret) diff --git a/backend/cmd/sink/main.go b/backend/cmd/sink/main.go index 74e0b1db1..4bbaeeee4 100644 --- a/backend/cmd/sink/main.go +++ b/backend/cmd/sink/main.go @@ -2,10 +2,8 @@ package main import ( "bytes" - "context" "encoding/binary" "log" - "openreplay/backend/pkg/pprof" "os" "os/signal" "syscall" @@ -16,13 +14,16 @@ import ( "openreplay/backend/internal/sink/sessionwriter" "openreplay/backend/internal/storage" "openreplay/backend/pkg/messages" - "openreplay/backend/pkg/monitoring" + "openreplay/backend/pkg/metrics" + sinkMetrics "openreplay/backend/pkg/metrics/sink" + "openreplay/backend/pkg/pprof" "openreplay/backend/pkg/queue" "openreplay/backend/pkg/url/assets" ) func main() { - metrics := monitoring.New("sink") + m := metrics.New() + m.Register(sinkMetrics.List()) log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) cfg := sink.New() @@ -39,22 +40,8 @@ func main() { producer := queue.NewProducer(cfg.MessageSizeLimit, true) defer producer.Close(cfg.ProducerCloseTimeout) rewriter := assets.NewRewriter(cfg.AssetsOrigin) - assetMessageHandler := assetscache.New(cfg, rewriter, producer, metrics) - + assetMessageHandler := assetscache.New(cfg, rewriter, producer) counter := storage.NewLogCounter() - // Session message metrics - totalMessages, err := metrics.RegisterCounter("messages_total") - if err != nil { - log.Printf("can't create messages_total metric: %s", err) - } - savedMessages, err := metrics.RegisterCounter("messages_saved") - if err != nil { - log.Printf("can't create messages_saved metric: %s", err) - } - messageSize, err := metrics.RegisterHistogram("messages_size") - if err != nil { - log.Printf("can't create messages_size metric: %s", err) - } var ( sessionID uint64 @@ -74,11 +61,12 @@ func main() { if domBuffer.Len() <= 0 && devBuffer.Len() <= 0 { return } + sinkMetrics.RecordWrittenBytes(float64(domBuffer.Len()), "dom") + sinkMetrics.RecordWrittenBytes(float64(devBuffer.Len()), "devtools") // Write buffered batches to the session if err := writer.Write(sessionID, domBuffer.Bytes(), devBuffer.Bytes()); err != nil { log.Printf("writer error: %s", err) - return } // Prepare buffer for the next batch @@ -88,8 +76,7 @@ func main() { return } - // [METRICS] Increase the number of processed messages - totalMessages.Add(context.Background(), 1) + sinkMetrics.IncreaseTotalMessages() // Send SessionEnd trigger to storage service if msg.TypeID() == messages.MsgSessionEnd { @@ -187,9 +174,8 @@ func main() { } } - // [METRICS] Increase the number of written to the files messages and the message size - messageSize.Record(context.Background(), float64(len(msg.Encode()))) - savedMessages.Add(context.Background(), 1) + sinkMetrics.IncreaseWrittenMessages() + sinkMetrics.RecordMessageSize(float64(len(msg.Encode()))) } consumer := queue.NewConsumer( diff --git a/backend/cmd/storage/main.go b/backend/cmd/storage/main.go index dcb1b53ed..472324b95 100644 --- a/backend/cmd/storage/main.go +++ b/backend/cmd/storage/main.go @@ -2,7 +2,6 @@ package main import ( "log" - "openreplay/backend/pkg/pprof" "os" "os/signal" "syscall" @@ -12,13 +11,17 @@ import ( "openreplay/backend/internal/storage" "openreplay/backend/pkg/failover" "openreplay/backend/pkg/messages" - "openreplay/backend/pkg/monitoring" + "openreplay/backend/pkg/metrics" + storageMetrics "openreplay/backend/pkg/metrics/storage" + "openreplay/backend/pkg/pprof" "openreplay/backend/pkg/queue" - s3storage "openreplay/backend/pkg/storage" + cloud "openreplay/backend/pkg/storage" ) func main() { - metrics := monitoring.New("storage") + m := metrics.New() + m.Register(storageMetrics.List()) + log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) cfg := config.New() @@ -26,8 +29,8 @@ func main() { pprof.StartProfilingServer() } - s3 := s3storage.NewS3(cfg.S3Region, cfg.S3Bucket) - srv, err := storage.New(cfg, s3, metrics) + s3 := cloud.NewS3(cfg.S3Region, cfg.S3Bucket) + srv, err := storage.New(cfg, s3) if err != nil { log.Printf("can't init storage service: %s", err) return diff --git a/backend/internal/assets/cacher/cacher.go b/backend/internal/assets/cacher/cacher.go index 8bbee092f..4b0353a9a 100644 --- a/backend/internal/assets/cacher/cacher.go +++ b/backend/internal/assets/cacher/cacher.go @@ -1,16 +1,13 @@ package cacher import ( - "context" "crypto/tls" "fmt" - "go.opentelemetry.io/otel/metric/instrument/syncfloat64" "io" "io/ioutil" - "log" "mime" "net/http" - "openreplay/backend/pkg/monitoring" + metrics "openreplay/backend/pkg/metrics/assets" "path/filepath" "strings" "time" @@ -25,30 +22,22 @@ import ( const MAX_CACHE_DEPTH = 5 type cacher struct { - timeoutMap *timeoutMap // Concurrency implemented - s3 *storage.S3 // AWS Docs: "These clients are safe to use concurrently." - httpClient *http.Client // Docs: "Clients are safe for concurrent use by multiple goroutines." - rewriter *assets.Rewriter // Read only - Errors chan error - sizeLimit int - downloadedAssets syncfloat64.Counter - requestHeaders map[string]string - workers *WorkerPool + timeoutMap *timeoutMap // Concurrency implemented + s3 *storage.S3 // AWS Docs: "These clients are safe to use concurrently." + httpClient *http.Client // Docs: "Clients are safe for concurrent use by multiple goroutines." + rewriter *assets.Rewriter // Read only + Errors chan error + sizeLimit int + requestHeaders map[string]string + workers *WorkerPool } func (c *cacher) CanCache() bool { return c.workers.CanAddTask() } -func NewCacher(cfg *config.Config, metrics *monitoring.Metrics) *cacher { +func NewCacher(cfg *config.Config) *cacher { rewriter := assets.NewRewriter(cfg.AssetsOrigin) - if metrics == nil { - log.Fatalf("metrics are empty") - } - downloadedAssets, err := metrics.RegisterCounter("assets_downloaded") - if err != nil { - log.Printf("can't create downloaded_assets metric: %s", err) - } c := &cacher{ timeoutMap: newTimeoutMap(), s3: storage.NewS3(cfg.AWSRegion, cfg.S3BucketAssets), @@ -59,11 +48,10 @@ func NewCacher(cfg *config.Config, metrics *monitoring.Metrics) *cacher { TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, }, }, - rewriter: rewriter, - Errors: make(chan error), - sizeLimit: cfg.AssetsSizeLimit, - downloadedAssets: downloadedAssets, - requestHeaders: cfg.AssetsRequestHeaders, + rewriter: rewriter, + Errors: make(chan error), + sizeLimit: cfg.AssetsSizeLimit, + requestHeaders: cfg.AssetsRequestHeaders, } c.workers = NewPool(64, c.CacheFile) return c @@ -75,6 +63,7 @@ func (c *cacher) CacheFile(task *Task) { func (c *cacher) cacheURL(t *Task) { t.retries-- + start := time.Now() req, _ := http.NewRequest("GET", t.requestURL, nil) if t.retries%2 == 0 { req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 6.1; rv:31.0) Gecko/20100101 Firefox/31.0") @@ -87,6 +76,7 @@ func (c *cacher) cacheURL(t *Task) { c.Errors <- errors.Wrap(err, t.urlContext) return } + metrics.RecordDownloadDuration(float64(time.Now().Sub(start).Milliseconds()), res.StatusCode) defer res.Body.Close() if res.StatusCode >= 400 { printErr := true @@ -122,12 +112,15 @@ func (c *cacher) cacheURL(t *Task) { } // TODO: implement in streams + start = time.Now() err = c.s3.Upload(strings.NewReader(strData), t.cachePath, contentType, false) if err != nil { + metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), true) c.Errors <- errors.Wrap(err, t.urlContext) return } - c.downloadedAssets.Add(context.Background(), 1) + metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), false) + metrics.IncreaseSavedSessions() if isCSS { if t.depth > 0 { diff --git a/backend/internal/http/router/handlers-ios.go b/backend/internal/http/router/handlers-ios.go index e0fc73b6f..b11918d54 100644 --- a/backend/internal/http/router/handlers-ios.go +++ b/backend/internal/http/router/handlers-ios.go @@ -22,28 +22,28 @@ func (e *Router) startSessionHandlerIOS(w http.ResponseWriter, r *http.Request) req := &StartIOSSessionRequest{} if r.Body == nil { - ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty")) + ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, 0) return } body := http.MaxBytesReader(w, r.Body, e.cfg.JsonSizeLimit) defer body.Close() if err := json.NewDecoder(body).Decode(req); err != nil { - ResponseWithError(w, http.StatusBadRequest, err) + ResponseWithError(w, http.StatusBadRequest, err, startTime, r.URL.Path, 0) return } if req.ProjectKey == nil { - ResponseWithError(w, http.StatusForbidden, errors.New("ProjectKey value required")) + ResponseWithError(w, http.StatusForbidden, errors.New("ProjectKey value required"), startTime, r.URL.Path, 0) return } p, err := e.services.Database.GetProjectByKey(*req.ProjectKey) if err != nil { if postgres.IsNoRowsErr(err) { - ResponseWithError(w, http.StatusNotFound, errors.New("Project doesn't exist or is not active")) + ResponseWithError(w, http.StatusNotFound, errors.New("Project doesn't exist or is not active"), startTime, r.URL.Path, 0) } else { - ResponseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging + ResponseWithError(w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0) // TODO: send error here only on staging } return } @@ -53,18 +53,18 @@ func (e *Router) startSessionHandlerIOS(w http.ResponseWriter, r *http.Request) if err != nil { // Starting the new one dice := byte(rand.Intn(100)) // [0, 100) if dice >= p.SampleRate { - ResponseWithError(w, http.StatusForbidden, errors.New("cancel")) + ResponseWithError(w, http.StatusForbidden, errors.New("cancel"), startTime, r.URL.Path, 0) return } ua := e.services.UaParser.ParseFromHTTPRequest(r) if ua == nil { - ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized")) + ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"), startTime, r.URL.Path, 0) return } sessionID, err := e.services.Flaker.Compose(uint64(startTime.UnixMilli())) if err != nil { - ResponseWithError(w, http.StatusInternalServerError, err) + ResponseWithError(w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0) return } // TODO: if EXPIRED => send message for two sessions association @@ -94,22 +94,24 @@ func (e *Router) startSessionHandlerIOS(w http.ResponseWriter, r *http.Request) UserUUID: userUUID, SessionID: strconv.FormatUint(tokenData.ID, 10), BeaconSizeLimit: e.cfg.BeaconSizeLimit, - }) + }, startTime, r.URL.Path, 0) } func (e *Router) pushMessagesHandlerIOS(w http.ResponseWriter, r *http.Request) { + startTime := time.Now() sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r) if err != nil { - ResponseWithError(w, http.StatusUnauthorized, err) + ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0) return } e.pushMessages(w, r, sessionData.ID, e.cfg.TopicRawIOS) } func (e *Router) pushLateMessagesHandlerIOS(w http.ResponseWriter, r *http.Request) { + startTime := time.Now() sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r) if err != nil && err != token.EXPIRED { - ResponseWithError(w, http.StatusUnauthorized, err) + ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0) return } // Check timestamps here? @@ -117,16 +119,17 @@ func (e *Router) pushLateMessagesHandlerIOS(w http.ResponseWriter, r *http.Reque } func (e *Router) imagesUploadHandlerIOS(w http.ResponseWriter, r *http.Request) { + startTime := time.Now() log.Printf("recieved imagerequest") sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r) if err != nil { // Should accept expired token? - ResponseWithError(w, http.StatusUnauthorized, err) + ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0) return } if r.Body == nil { - ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty")) + ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, 0) return } r.Body = http.MaxBytesReader(w, r.Body, e.cfg.FileSizeLimit) @@ -134,21 +137,21 @@ func (e *Router) imagesUploadHandlerIOS(w http.ResponseWriter, r *http.Request) err = r.ParseMultipartForm(1e6) // ~1Mb if err == http.ErrNotMultipart || err == http.ErrMissingBoundary { - ResponseWithError(w, http.StatusUnsupportedMediaType, err) + ResponseWithError(w, http.StatusUnsupportedMediaType, err, startTime, r.URL.Path, 0) return // } else if err == multipart.ErrMessageTooLarge // if non-files part exceeds 10 MB } else if err != nil { - ResponseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging + ResponseWithError(w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0) // TODO: send error here only on staging return } if r.MultipartForm == nil { - ResponseWithError(w, http.StatusInternalServerError, errors.New("Multipart not parsed")) + ResponseWithError(w, http.StatusInternalServerError, errors.New("Multipart not parsed"), startTime, r.URL.Path, 0) return } if len(r.MultipartForm.Value["projectKey"]) == 0 { - ResponseWithError(w, http.StatusBadRequest, errors.New("projectKey parameter missing")) // status for missing/wrong parameter? + ResponseWithError(w, http.StatusBadRequest, errors.New("projectKey parameter missing"), startTime, r.URL.Path, 0) // status for missing/wrong parameter? return } diff --git a/backend/internal/http/router/handlers-web.go b/backend/internal/http/router/handlers-web.go index 7afd184e5..52a37b7f0 100644 --- a/backend/internal/http/router/handlers-web.go +++ b/backend/internal/http/router/handlers-web.go @@ -3,18 +3,17 @@ package router import ( "encoding/json" "errors" - "github.com/Masterminds/semver" - "go.opentelemetry.io/otel/attribute" "io" "log" "math/rand" "net/http" - "openreplay/backend/internal/http/uuid" - "openreplay/backend/pkg/flakeid" "strconv" "time" + "github.com/Masterminds/semver" + "openreplay/backend/internal/http/uuid" "openreplay/backend/pkg/db/postgres" + "openreplay/backend/pkg/flakeid" . "openreplay/backend/pkg/messages" "openreplay/backend/pkg/token" ) @@ -28,13 +27,6 @@ func (e *Router) readBody(w http.ResponseWriter, r *http.Request, limit int64) ( if err != nil { return nil, err } - - reqSize := len(bodyBytes) - e.requestSize.Record( - r.Context(), - float64(reqSize), - []attribute.KeyValue{attribute.String("method", r.URL.Path)}..., - ) return bodyBytes, nil } @@ -56,40 +48,43 @@ func getSessionTimestamp(req *StartSessionRequest, startTimeMili int64) (ts uint func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) { startTime := time.Now() + bodySize := 0 // Check request body if r.Body == nil { - ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty")) + ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, bodySize) return } bodyBytes, err := e.readBody(w, r, e.cfg.JsonSizeLimit) if err != nil { log.Printf("error while reading request body: %s", err) - ResponseWithError(w, http.StatusRequestEntityTooLarge, err) + ResponseWithError(w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize) return } + bodySize = len(bodyBytes) // Parse request body req := &StartSessionRequest{} if err := json.Unmarshal(bodyBytes, req); err != nil { - ResponseWithError(w, http.StatusBadRequest, err) + ResponseWithError(w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize) return } // Handler's logic if req.ProjectKey == nil { - ResponseWithError(w, http.StatusForbidden, errors.New("ProjectKey value required")) + ResponseWithError(w, http.StatusForbidden, errors.New("ProjectKey value required"), startTime, r.URL.Path, bodySize) return } p, err := e.services.Database.GetProjectByKey(*req.ProjectKey) if err != nil { if postgres.IsNoRowsErr(err) { - ResponseWithError(w, http.StatusNotFound, errors.New("project doesn't exist or capture limit has been reached")) + ResponseWithError(w, http.StatusNotFound, + errors.New("project doesn't exist or capture limit has been reached"), startTime, r.URL.Path, bodySize) } else { log.Printf("can't get project by key: %s", err) - ResponseWithError(w, http.StatusInternalServerError, errors.New("can't get project by key")) + ResponseWithError(w, http.StatusInternalServerError, errors.New("can't get project by key"), startTime, r.URL.Path, bodySize) } return } @@ -99,19 +94,19 @@ func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) if err != nil || req.Reset { // Starting the new one dice := byte(rand.Intn(100)) // [0, 100) if dice >= p.SampleRate { - ResponseWithError(w, http.StatusForbidden, errors.New("cancel")) + ResponseWithError(w, http.StatusForbidden, errors.New("cancel"), startTime, r.URL.Path, bodySize) return } ua := e.services.UaParser.ParseFromHTTPRequest(r) if ua == nil { - ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized")) + ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"), startTime, r.URL.Path, bodySize) return } startTimeMili := startTime.UnixMilli() sessionID, err := e.services.Flaker.Compose(uint64(startTimeMili)) if err != nil { - ResponseWithError(w, http.StatusInternalServerError, err) + ResponseWithError(w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize) return } // TODO: if EXPIRED => send message for two sessions association @@ -163,29 +158,33 @@ func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) BeaconSizeLimit: e.getBeaconSize(tokenData.ID), StartTimestamp: int64(flakeid.ExtractTimestamp(tokenData.ID)), Delay: tokenData.Delay, - }) + }, startTime, r.URL.Path, bodySize) } func (e *Router) pushMessagesHandlerWeb(w http.ResponseWriter, r *http.Request) { + startTime := time.Now() + bodySize := 0 + // Check authorization sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r) if err != nil { - ResponseWithError(w, http.StatusUnauthorized, err) + ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize) return } // Check request body if r.Body == nil { - ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty")) + ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, bodySize) return } bodyBytes, err := e.readBody(w, r, e.getBeaconSize(sessionData.ID)) if err != nil { log.Printf("error while reading request body: %s", err) - ResponseWithError(w, http.StatusRequestEntityTooLarge, err) + ResponseWithError(w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize) return } + bodySize = len(bodyBytes) // Send processed messages to queue as array of bytes // TODO: check bytes for nonsense crap @@ -194,39 +193,43 @@ func (e *Router) pushMessagesHandlerWeb(w http.ResponseWriter, r *http.Request) log.Printf("can't send processed messages to queue: %s", err) } - w.WriteHeader(http.StatusOK) + ResponseOK(w, startTime, r.URL.Path, bodySize) } func (e *Router) notStartedHandlerWeb(w http.ResponseWriter, r *http.Request) { + startTime := time.Now() + bodySize := 0 + // Check request body if r.Body == nil { - ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty")) + ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, bodySize) return } bodyBytes, err := e.readBody(w, r, e.cfg.JsonSizeLimit) if err != nil { log.Printf("error while reading request body: %s", err) - ResponseWithError(w, http.StatusRequestEntityTooLarge, err) + ResponseWithError(w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize) return } + bodySize = len(bodyBytes) // Parse request body req := &NotStartedRequest{} if err := json.Unmarshal(bodyBytes, req); err != nil { - ResponseWithError(w, http.StatusBadRequest, err) + ResponseWithError(w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize) return } // Handler's logic if req.ProjectKey == nil { - ResponseWithError(w, http.StatusForbidden, errors.New("projectKey value required")) + ResponseWithError(w, http.StatusForbidden, errors.New("projectKey value required"), startTime, r.URL.Path, bodySize) return } ua := e.services.UaParser.ParseFromHTTPRequest(r) // TODO?: insert anyway if ua == nil { - ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized")) + ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"), startTime, r.URL.Path, bodySize) return } country := e.services.GeoIP.ExtractISOCodeFromHTTPRequest(r) @@ -248,5 +251,5 @@ func (e *Router) notStartedHandlerWeb(w http.ResponseWriter, r *http.Request) { log.Printf("Unable to insert Unstarted Session: %v\n", err) } - w.WriteHeader(http.StatusOK) + ResponseOK(w, startTime, r.URL.Path, bodySize) } diff --git a/backend/internal/http/router/handlers.go b/backend/internal/http/router/handlers.go index c36fdd668..425177341 100644 --- a/backend/internal/http/router/handlers.go +++ b/backend/internal/http/router/handlers.go @@ -6,9 +6,11 @@ import ( "io/ioutil" "log" "net/http" + "time" ) func (e *Router) pushMessages(w http.ResponseWriter, r *http.Request, sessionID uint64, topicName string) { + start := time.Now() body := http.MaxBytesReader(w, r.Body, e.cfg.BeaconSizeLimit) defer body.Close() @@ -21,7 +23,7 @@ func (e *Router) pushMessages(w http.ResponseWriter, r *http.Request, sessionID reader, err = gzip.NewReader(body) if err != nil { - ResponseWithError(w, http.StatusInternalServerError, err) // TODO: stage-dependent response + ResponseWithError(w, http.StatusInternalServerError, err, start, r.URL.Path, 0) // TODO: stage-dependent response return } //log.Println("Gzip reader init", reader) @@ -32,7 +34,7 @@ func (e *Router) pushMessages(w http.ResponseWriter, r *http.Request, sessionID //log.Println("Reader after switch:", reader) buf, err := ioutil.ReadAll(reader) if err != nil { - ResponseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging + ResponseWithError(w, http.StatusInternalServerError, err, start, r.URL.Path, 0) // TODO: send error here only on staging return } e.services.Producer.Produce(topicName, sessionID, buf) // What if not able to send? diff --git a/backend/internal/http/router/response.go b/backend/internal/http/router/response.go index 0b4725419..b66b7c563 100644 --- a/backend/internal/http/router/response.go +++ b/backend/internal/http/router/response.go @@ -4,21 +4,44 @@ import ( "encoding/json" "log" "net/http" + "time" + + metrics "openreplay/backend/pkg/metrics/http" ) -func ResponseWithJSON(w http.ResponseWriter, res interface{}) { +func recordMetrics(requestStart time.Time, url string, code, bodySize int) { + if bodySize > 0 { + metrics.RecordRequestSize(float64(bodySize), url, code) + } + metrics.IncreaseTotalRequests() + metrics.RecordRequestDuration(float64(time.Now().Sub(requestStart).Milliseconds()), url, code) +} + +func ResponseOK(w http.ResponseWriter, requestStart time.Time, url string, bodySize int) { + w.WriteHeader(http.StatusOK) + recordMetrics(requestStart, url, http.StatusOK, bodySize) +} + +func ResponseWithJSON(w http.ResponseWriter, res interface{}, requestStart time.Time, url string, bodySize int) { body, err := json.Marshal(res) if err != nil { log.Println(err) } w.Header().Set("Content-Type", "application/json") w.Write(body) + recordMetrics(requestStart, url, http.StatusOK, bodySize) } -func ResponseWithError(w http.ResponseWriter, code int, err error) { - type response struct { - Error string `json:"error"` +type response struct { + Error string `json:"error"` +} + +func ResponseWithError(w http.ResponseWriter, code int, err error, requestStart time.Time, url string, bodySize int) { + body, err := json.Marshal(&response{err.Error()}) + if err != nil { + log.Println(err) } w.WriteHeader(code) - ResponseWithJSON(w, &response{err.Error()}) + w.Write(body) + recordMetrics(requestStart, url, code, bodySize) } diff --git a/backend/internal/http/router/router.go b/backend/internal/http/router/router.go index 964016dfd..6cd7efe79 100644 --- a/backend/internal/http/router/router.go +++ b/backend/internal/http/router/router.go @@ -1,19 +1,16 @@ package router import ( - "context" "fmt" - "github.com/gorilla/mux" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/instrument/syncfloat64" "log" "net/http" + "sync" + "time" + + "github.com/gorilla/mux" http3 "openreplay/backend/internal/config/http" http2 "openreplay/backend/internal/http/services" "openreplay/backend/internal/http/util" - "openreplay/backend/pkg/monitoring" - "sync" - "time" ) type BeaconSize struct { @@ -25,21 +22,16 @@ type Router struct { router *mux.Router cfg *http3.Config services *http2.ServicesBuilder - requestSize syncfloat64.Histogram - requestDuration syncfloat64.Histogram - totalRequests syncfloat64.Counter mutex *sync.RWMutex beaconSizeCache map[uint64]*BeaconSize // Cache for session's beaconSize } -func NewRouter(cfg *http3.Config, services *http2.ServicesBuilder, metrics *monitoring.Metrics) (*Router, error) { +func NewRouter(cfg *http3.Config, services *http2.ServicesBuilder) (*Router, error) { switch { case cfg == nil: return nil, fmt.Errorf("config is empty") case services == nil: return nil, fmt.Errorf("services is empty") - case metrics == nil: - return nil, fmt.Errorf("metrics is empty") } e := &Router{ cfg: cfg, @@ -47,7 +39,6 @@ func NewRouter(cfg *http3.Config, services *http2.ServicesBuilder, metrics *moni mutex: &sync.RWMutex{}, beaconSizeCache: make(map[uint64]*BeaconSize), } - e.initMetrics(metrics) e.init() go e.clearBeaconSizes() return e, nil @@ -115,22 +106,6 @@ func (e *Router) init() { e.router.Use(e.corsMiddleware) } -func (e *Router) initMetrics(metrics *monitoring.Metrics) { - var err error - e.requestSize, err = metrics.RegisterHistogram("requests_body_size") - if err != nil { - log.Printf("can't create requests_body_size metric: %s", err) - } - e.requestDuration, err = metrics.RegisterHistogram("requests_duration") - if err != nil { - log.Printf("can't create requests_duration metric: %s", err) - } - e.totalRequests, err = metrics.RegisterCounter("requests_total") - if err != nil { - log.Printf("can't create requests_total metric: %s", err) - } -} - func (e *Router) root(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) } @@ -149,17 +124,8 @@ func (e *Router) corsMiddleware(next http.Handler) http.Handler { log.Printf("Request: %v - %v ", r.Method, util.SafeString(r.URL.Path)) - requestStart := time.Now() - // Serve request next.ServeHTTP(w, r) - - metricsContext, _ := context.WithTimeout(context.Background(), time.Millisecond*100) - e.totalRequests.Add(metricsContext, 1) - e.requestDuration.Record(metricsContext, - float64(time.Now().Sub(requestStart).Milliseconds()), - []attribute.KeyValue{attribute.String("method", r.URL.Path)}..., - ) }) } diff --git a/backend/internal/sessionender/ender.go b/backend/internal/sessionender/ender.go index c1c2c9b7f..e1ddb0ffe 100644 --- a/backend/internal/sessionender/ender.go +++ b/backend/internal/sessionender/ender.go @@ -1,13 +1,11 @@ package sessionender import ( - "context" - "fmt" - "go.opentelemetry.io/otel/metric/instrument/syncfloat64" "log" - "openreplay/backend/pkg/messages" - "openreplay/backend/pkg/monitoring" "time" + + "openreplay/backend/pkg/messages" + "openreplay/backend/pkg/metrics/ender" ) // EndedSessionHandler handler for ended sessions @@ -23,32 +21,16 @@ type session struct { // SessionEnder updates timestamp of last message for each session type SessionEnder struct { - timeout int64 - sessions map[uint64]*session // map[sessionID]session - timeCtrl *timeController - activeSessions syncfloat64.UpDownCounter - totalSessions syncfloat64.Counter + timeout int64 + sessions map[uint64]*session // map[sessionID]session + timeCtrl *timeController } -func New(metrics *monitoring.Metrics, timeout int64, parts int) (*SessionEnder, error) { - if metrics == nil { - return nil, fmt.Errorf("metrics module is empty") - } - activeSessions, err := metrics.RegisterUpDownCounter("sessions_active") - if err != nil { - return nil, fmt.Errorf("can't register session.active metric: %s", err) - } - totalSessions, err := metrics.RegisterCounter("sessions_total") - if err != nil { - return nil, fmt.Errorf("can't register session.total metric: %s", err) - } - +func New(timeout int64, parts int) (*SessionEnder, error) { return &SessionEnder{ - timeout: timeout, - sessions: make(map[uint64]*session), - timeCtrl: NewTimeController(parts), - activeSessions: activeSessions, - totalSessions: totalSessions, + timeout: timeout, + sessions: make(map[uint64]*session), + timeCtrl: NewTimeController(parts), }, nil } @@ -74,8 +56,8 @@ func (se *SessionEnder) UpdateSession(msg messages.Message) { lastUserTime: msgTimestamp, // last timestamp from user's machine isEnded: false, } - se.activeSessions.Add(context.Background(), 1) - se.totalSessions.Add(context.Background(), 1) + ender.IncreaseActiveSessions() + ender.IncreaseTotalSessions() return } // Keep the highest user's timestamp for correct session duration value @@ -100,7 +82,8 @@ func (se *SessionEnder) HandleEndedSessions(handler EndedSessionHandler) { sess.isEnded = true if handler(sessID, sess.lastUserTime) { delete(se.sessions, sessID) - se.activeSessions.Add(context.Background(), -1) + ender.DecreaseActiveSessions() + ender.IncreaseClosedSessions() removedSessions++ } else { log.Printf("sessID: %d, userTime: %d", sessID, sess.lastUserTime) diff --git a/backend/internal/sink/assetscache/assets.go b/backend/internal/sink/assetscache/assets.go index 4c63f6897..387ee5c92 100644 --- a/backend/internal/sink/assetscache/assets.go +++ b/backend/internal/sink/assetscache/assets.go @@ -1,20 +1,19 @@ package assetscache import ( - "context" "crypto/md5" - "go.opentelemetry.io/otel/metric/instrument/syncfloat64" "io" "log" "net/url" - "openreplay/backend/internal/config/sink" - "openreplay/backend/pkg/messages" - "openreplay/backend/pkg/monitoring" - "openreplay/backend/pkg/queue/types" - "openreplay/backend/pkg/url/assets" + metrics "openreplay/backend/pkg/metrics/sink" "strings" "sync" "time" + + "openreplay/backend/internal/config/sink" + "openreplay/backend/pkg/messages" + "openreplay/backend/pkg/queue/types" + "openreplay/backend/pkg/url/assets" ) type CachedAsset struct { @@ -23,52 +22,21 @@ type CachedAsset struct { } type AssetsCache struct { - mutex sync.RWMutex - cfg *sink.Config - rewriter *assets.Rewriter - producer types.Producer - cache map[string]*CachedAsset - blackList []string // use "example.com" to filter all domains or ".example.com" to filter only third-level domain - totalAssets syncfloat64.Counter - cachedAssets syncfloat64.Counter - skippedAssets syncfloat64.Counter - assetSize syncfloat64.Histogram - assetDuration syncfloat64.Histogram + mutex sync.RWMutex + cfg *sink.Config + rewriter *assets.Rewriter + producer types.Producer + cache map[string]*CachedAsset + blackList []string // use "example.com" to filter all domains or ".example.com" to filter only third-level domain } -func New(cfg *sink.Config, rewriter *assets.Rewriter, producer types.Producer, metrics *monitoring.Metrics) *AssetsCache { - // Assets metrics - totalAssets, err := metrics.RegisterCounter("assets_total") - if err != nil { - log.Printf("can't create assets_total metric: %s", err) - } - cachedAssets, err := metrics.RegisterCounter("assets_cached") - if err != nil { - log.Printf("can't create assets_cached metric: %s", err) - } - skippedAssets, err := metrics.RegisterCounter("assets_skipped") - if err != nil { - log.Printf("can't create assets_skipped metric: %s", err) - } - assetSize, err := metrics.RegisterHistogram("asset_size") - if err != nil { - log.Printf("can't create asset_size metric: %s", err) - } - assetDuration, err := metrics.RegisterHistogram("asset_duration") - if err != nil { - log.Printf("can't create asset_duration metric: %s", err) - } +func New(cfg *sink.Config, rewriter *assets.Rewriter, producer types.Producer) *AssetsCache { assetsCache := &AssetsCache{ - cfg: cfg, - rewriter: rewriter, - producer: producer, - cache: make(map[string]*CachedAsset, 64), - blackList: make([]string, 0), - totalAssets: totalAssets, - cachedAssets: cachedAssets, - skippedAssets: skippedAssets, - assetSize: assetSize, - assetDuration: assetDuration, + cfg: cfg, + rewriter: rewriter, + producer: producer, + cache: make(map[string]*CachedAsset, 64), + blackList: make([]string, 0), } // Parse black list for cache layer if len(cfg.CacheBlackList) > 0 { @@ -84,7 +52,7 @@ func New(cfg *sink.Config, rewriter *assets.Rewriter, producer types.Producer, m } func (e *AssetsCache) cleaner() { - cleanTick := time.Tick(time.Minute * 30) + cleanTick := time.Tick(time.Minute * 3) for { select { case <-cleanTick: @@ -105,6 +73,7 @@ func (e *AssetsCache) clearCache() { if int64(now.Sub(cache.ts).Minutes()) > e.cfg.CacheExpiration { deleted++ delete(e.cache, id) + metrics.DecreaseCachedAssets() } } log.Printf("cache cleaner: deleted %d/%d assets", deleted, cacheSize) @@ -232,8 +201,7 @@ func parseHost(baseURL string) (string, error) { } func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) string { - ctx := context.Background() - e.totalAssets.Add(ctx, 1) + metrics.IncreaseTotalAssets() // Try to find asset in cache h := md5.New() // Cut first part of url (scheme + host) @@ -255,7 +223,7 @@ func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) st e.mutex.RUnlock() if ok { if int64(time.Now().Sub(cachedAsset.ts).Minutes()) < e.cfg.CacheExpiration { - e.skippedAssets.Add(ctx, 1) + metrics.IncreaseSkippedAssets() return cachedAsset.msg } } @@ -267,8 +235,8 @@ func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) st start := time.Now() res := e.getRewrittenCSS(sessionID, baseURL, css) duration := time.Now().Sub(start).Milliseconds() - e.assetSize.Record(ctx, float64(len(res))) - e.assetDuration.Record(ctx, float64(duration)) + metrics.RecordAssetSize(float64(len(res))) + metrics.RecordProcessAssetDuration(float64(duration)) // Save asset to cache if we spent more than threshold if duration > e.cfg.CacheThreshold { e.mutex.Lock() @@ -277,7 +245,7 @@ func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) st ts: time.Now(), } e.mutex.Unlock() - e.cachedAssets.Add(ctx, 1) + metrics.IncreaseCachedAssets() } // Return rewritten asset return res diff --git a/backend/internal/storage/storage.go b/backend/internal/storage/storage.go index fbe9e2228..1e2507163 100644 --- a/backend/internal/storage/storage.go +++ b/backend/internal/storage/storage.go @@ -2,20 +2,20 @@ package storage import ( "bytes" - "context" "fmt" - gzip "github.com/klauspost/pgzip" - "go.opentelemetry.io/otel/metric/instrument/syncfloat64" "log" - config "openreplay/backend/internal/config/storage" - "openreplay/backend/pkg/messages" - "openreplay/backend/pkg/monitoring" - "openreplay/backend/pkg/storage" "os" "strconv" "strings" "sync" "time" + + config "openreplay/backend/internal/config/storage" + "openreplay/backend/pkg/messages" + metrics "openreplay/backend/pkg/metrics/storage" + "openreplay/backend/pkg/storage" + + gzip "github.com/klauspost/pgzip" ) type FileType string @@ -25,6 +25,13 @@ const ( DEV FileType = "/devtools.mob" ) +func (t FileType) String() string { + if t == DOM { + return "dom" + } + return "devtools" +} + type Task struct { id string doms *bytes.Buffer @@ -36,92 +43,23 @@ type Storage struct { cfg *config.Config s3 *storage.S3 startBytes []byte - - totalSessions syncfloat64.Counter - sessionDOMSize syncfloat64.Histogram - sessionDEVSize syncfloat64.Histogram - readingDOMTime syncfloat64.Histogram - readingDEVTime syncfloat64.Histogram - sortingDOMTime syncfloat64.Histogram - sortingDEVTime syncfloat64.Histogram - archivingDOMTime syncfloat64.Histogram - archivingDEVTime syncfloat64.Histogram - uploadingDOMTime syncfloat64.Histogram - uploadingDEVTime syncfloat64.Histogram - - tasks chan *Task - ready chan struct{} + tasks chan *Task + ready chan struct{} } -func New(cfg *config.Config, s3 *storage.S3, metrics *monitoring.Metrics) (*Storage, error) { +func New(cfg *config.Config, s3 *storage.S3) (*Storage, error) { switch { case cfg == nil: return nil, fmt.Errorf("config is empty") case s3 == nil: return nil, fmt.Errorf("s3 storage is empty") } - // Create metrics - totalSessions, err := metrics.RegisterCounter("sessions_total") - if err != nil { - log.Printf("can't create sessions_total metric: %s", err) - } - sessionDOMSize, err := metrics.RegisterHistogram("sessions_size") - if err != nil { - log.Printf("can't create session_size metric: %s", err) - } - sessionDevtoolsSize, err := metrics.RegisterHistogram("sessions_dt_size") - if err != nil { - log.Printf("can't create sessions_dt_size metric: %s", err) - } - readingDOMTime, err := metrics.RegisterHistogram("reading_duration") - if err != nil { - log.Printf("can't create reading_duration metric: %s", err) - } - readingDEVTime, err := metrics.RegisterHistogram("reading_dt_duration") - if err != nil { - log.Printf("can't create reading_duration metric: %s", err) - } - sortingDOMTime, err := metrics.RegisterHistogram("sorting_duration") - if err != nil { - log.Printf("can't create reading_duration metric: %s", err) - } - sortingDEVTime, err := metrics.RegisterHistogram("sorting_dt_duration") - if err != nil { - log.Printf("can't create reading_duration metric: %s", err) - } - archivingDOMTime, err := metrics.RegisterHistogram("archiving_duration") - if err != nil { - log.Printf("can't create archiving_duration metric: %s", err) - } - archivingDEVTime, err := metrics.RegisterHistogram("archiving_dt_duration") - if err != nil { - log.Printf("can't create archiving_duration metric: %s", err) - } - uploadingDOMTime, err := metrics.RegisterHistogram("uploading_duration") - if err != nil { - log.Printf("can't create uploading_duration metric: %s", err) - } - uploadingDEVTime, err := metrics.RegisterHistogram("uploading_dt_duration") - if err != nil { - log.Printf("can't create uploading_duration metric: %s", err) - } newStorage := &Storage{ - cfg: cfg, - s3: s3, - startBytes: make([]byte, cfg.FileSplitSize), - totalSessions: totalSessions, - sessionDOMSize: sessionDOMSize, - sessionDEVSize: sessionDevtoolsSize, - readingDOMTime: readingDOMTime, - readingDEVTime: readingDEVTime, - sortingDOMTime: sortingDOMTime, - sortingDEVTime: sortingDEVTime, - archivingDOMTime: archivingDOMTime, - archivingDEVTime: archivingDEVTime, - uploadingDOMTime: uploadingDOMTime, - uploadingDEVTime: uploadingDEVTime, - tasks: make(chan *Task, 1), - ready: make(chan struct{}), + cfg: cfg, + s3: s3, + startBytes: make([]byte, cfg.FileSplitSize), + tasks: make(chan *Task, 1), + ready: make(chan struct{}), } go newStorage.worker() return newStorage, nil @@ -187,11 +125,7 @@ func (s *Storage) openSession(filePath string, tp FileType) ([]byte, error) { if err != nil { return nil, fmt.Errorf("can't sort session, err: %s", err) } - if tp == DOM { - s.sortingDOMTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds())) - } else { - s.sortingDEVTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds())) - } + metrics.RecordSessionSortDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String()) return res, nil } @@ -215,26 +149,19 @@ func (s *Storage) prepareSession(path string, tp FileType, task *Task) error { if err != nil { return err } - durRead := time.Now().Sub(startRead).Milliseconds() - // Send metrics - ctx, _ := context.WithTimeout(context.Background(), time.Millisecond*200) - if tp == DOM { - s.sessionDOMSize.Record(ctx, float64(len(mob))) - s.readingDOMTime.Record(ctx, float64(durRead)) - } else { - s.sessionDEVSize.Record(ctx, float64(len(mob))) - s.readingDEVTime.Record(ctx, float64(durRead)) - } + metrics.RecordSessionSize(float64(len(mob)), tp.String()) + metrics.RecordSessionReadDuration(float64(time.Now().Sub(startRead).Milliseconds()), tp.String()) + // Encode and compress session if tp == DEV { - startCompress := time.Now() + start := time.Now() task.dev = s.compressSession(mob) - s.archivingDEVTime.Record(ctx, float64(time.Now().Sub(startCompress).Milliseconds())) + metrics.RecordSessionCompressDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String()) } else { if len(mob) <= s.cfg.FileSplitSize { - startCompress := time.Now() + start := time.Now() task.doms = s.compressSession(mob) - s.archivingDOMTime.Record(ctx, float64(time.Now().Sub(startCompress).Milliseconds())) + metrics.RecordSessionCompressDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String()) return nil } wg := &sync.WaitGroup{} @@ -253,7 +180,7 @@ func (s *Storage) prepareSession(path string, tp FileType, task *Task) error { wg.Done() }() wg.Wait() - s.archivingDOMTime.Record(ctx, float64(firstPart+secondPart)) + metrics.RecordSessionCompressDuration(float64(firstPart+secondPart), tp.String()) } return nil } @@ -324,11 +251,9 @@ func (s *Storage) uploadSession(task *Task) { wg.Done() }() wg.Wait() - // Record metrics - ctx, _ := context.WithTimeout(context.Background(), time.Millisecond*200) - s.uploadingDOMTime.Record(ctx, float64(uploadDoms+uploadDome)) - s.uploadingDEVTime.Record(ctx, float64(uploadDev)) - s.totalSessions.Add(ctx, 1) + metrics.RecordSessionUploadDuration(float64(uploadDoms+uploadDome), DOM.String()) + metrics.RecordSessionUploadDuration(float64(uploadDev), DEV.String()) + metrics.IncreaseStorageTotalSessions() } func (s *Storage) worker() { diff --git a/backend/pkg/db/postgres/batches.go b/backend/pkg/db/postgres/batches.go index c1283da10..abdee36f2 100644 --- a/backend/pkg/db/postgres/batches.go +++ b/backend/pkg/db/postgres/batches.go @@ -1,14 +1,13 @@ package postgres import ( - "context" - "github.com/jackc/pgx/v4" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/instrument/syncfloat64" "log" - "openreplay/backend/pkg/monitoring" "strings" "time" + + "openreplay/backend/pkg/metrics/database" + + "github.com/jackc/pgx/v4" ) type batchItem struct { @@ -78,21 +77,17 @@ func NewBatchesTask(size int) *batchesTask { } type BatchSet struct { - c Pool - batches map[uint64]*SessionBatch - batchQueueLimit int - batchSizeLimit int - batchSizeBytes syncfloat64.Histogram - batchSizeLines syncfloat64.Histogram - sqlRequestTime syncfloat64.Histogram - sqlRequestCounter syncfloat64.Counter - updates map[uint64]*sessionUpdates - workerTask chan *batchesTask - done chan struct{} - finished chan struct{} + c Pool + batches map[uint64]*SessionBatch + batchQueueLimit int + batchSizeLimit int + updates map[uint64]*sessionUpdates + workerTask chan *batchesTask + done chan struct{} + finished chan struct{} } -func NewBatchSet(c Pool, queueLimit, sizeLimit int, metrics *monitoring.Metrics) *BatchSet { +func NewBatchSet(c Pool, queueLimit, sizeLimit int) *BatchSet { bs := &BatchSet{ c: c, batches: make(map[uint64]*SessionBatch), @@ -103,31 +98,10 @@ func NewBatchSet(c Pool, queueLimit, sizeLimit int, metrics *monitoring.Metrics) finished: make(chan struct{}), updates: make(map[uint64]*sessionUpdates), } - bs.initMetrics(metrics) go bs.worker() return bs } -func (conn *BatchSet) initMetrics(metrics *monitoring.Metrics) { - var err error - conn.batchSizeBytes, err = metrics.RegisterHistogram("batch_size_bytes") - if err != nil { - log.Printf("can't create batchSizeBytes metric: %s", err) - } - conn.batchSizeLines, err = metrics.RegisterHistogram("batch_size_lines") - if err != nil { - log.Printf("can't create batchSizeLines metric: %s", err) - } - conn.sqlRequestTime, err = metrics.RegisterHistogram("sql_request_time") - if err != nil { - log.Printf("can't create sqlRequestTime metric: %s", err) - } - conn.sqlRequestCounter, err = metrics.RegisterCounter("sql_request_number") - if err != nil { - log.Printf("can't create sqlRequestNumber metric: %s", err) - } -} - func (conn *BatchSet) getBatch(sessionID uint64) *SessionBatch { sessionID = sessionID % 10 if _, ok := conn.batches[sessionID]; !ok { @@ -194,11 +168,10 @@ func (conn *BatchSet) sendBatches(t *batchesTask) { // Append session update sql request to the end of batch batch.Prepare() // Record batch size in bytes and number of lines - conn.batchSizeBytes.Record(context.Background(), float64(batch.Size())) - conn.batchSizeLines.Record(context.Background(), float64(batch.Len())) + database.RecordBatchSize(float64(batch.Size())) + database.RecordBatchElements(float64(batch.Len())) start := time.Now() - isFailed := false // Send batch to db and execute br := conn.c.SendBatch(batch.batch) @@ -209,15 +182,11 @@ func (conn *BatchSet) sendBatches(t *batchesTask) { failedSql := batch.items[i] query := strings.ReplaceAll(failedSql.query, "\n", " ") log.Println("failed sql req:", query, failedSql.arguments) - isFailed = true } } br.Close() // returns err - dur := time.Now().Sub(start).Milliseconds() - conn.sqlRequestTime.Record(context.Background(), float64(dur), - attribute.String("method", "batch"), attribute.Bool("failed", isFailed)) - conn.sqlRequestCounter.Add(context.Background(), 1, - attribute.String("method", "batch"), attribute.Bool("failed", isFailed)) + database.RecordBatchInsertDuration(float64(time.Now().Sub(start).Milliseconds())) + database.IncreaseTotalBatches() } } diff --git a/backend/pkg/db/postgres/bulk.go b/backend/pkg/db/postgres/bulk.go index 8c6c42f78..b6a2ddd35 100644 --- a/backend/pkg/db/postgres/bulk.go +++ b/backend/pkg/db/postgres/bulk.go @@ -2,13 +2,9 @@ package postgres import ( "bytes" - "context" "errors" "fmt" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/instrument/syncfloat64" - "log" - "openreplay/backend/pkg/monitoring" + "openreplay/backend/pkg/metrics/database" "time" ) @@ -25,15 +21,13 @@ type Bulk interface { } type bulkImpl struct { - conn Pool - table string - columns string - template string - setSize int - sizeLimit int - values []interface{} - bulkSize syncfloat64.Histogram - bulkDuration syncfloat64.Histogram + conn Pool + table string + columns string + template string + setSize int + sizeLimit int + values []interface{} } func (b *bulkImpl) Append(args ...interface{}) error { @@ -79,18 +73,15 @@ func (b *bulkImpl) send() error { return fmt.Errorf("send bulk err: %s", err) } // Save bulk metrics - ctx, _ := context.WithTimeout(context.Background(), time.Millisecond*200) - b.bulkDuration.Record(ctx, float64(time.Now().Sub(start).Milliseconds()), attribute.String("table", b.table)) - b.bulkSize.Record(ctx, float64(size), attribute.String("table", b.table)) + database.RecordBulkElements(float64(size), "pg", b.table) + database.RecordBulkInsertDuration(float64(time.Now().Sub(start).Milliseconds()), "pg", b.table) return nil } -func NewBulk(conn Pool, metrics *monitoring.Metrics, table, columns, template string, setSize, sizeLimit int) (Bulk, error) { +func NewBulk(conn Pool, table, columns, template string, setSize, sizeLimit int) (Bulk, error) { switch { case conn == nil: return nil, errors.New("db conn is empty") - case metrics == nil: - return nil, errors.New("metrics is empty") case table == "": return nil, errors.New("table is empty") case columns == "": @@ -102,23 +93,13 @@ func NewBulk(conn Pool, metrics *monitoring.Metrics, table, columns, template st case sizeLimit <= 0: return nil, errors.New("size limit is wrong") } - messagesInBulk, err := metrics.RegisterHistogram("messages_in_bulk") - if err != nil { - log.Printf("can't create messages_size metric: %s", err) - } - bulkInsertDuration, err := metrics.RegisterHistogram("bulk_insert_duration") - if err != nil { - log.Printf("can't create messages_size metric: %s", err) - } return &bulkImpl{ - conn: conn, - table: table, - columns: columns, - template: template, - setSize: setSize, - sizeLimit: sizeLimit, - values: make([]interface{}, 0, setSize*sizeLimit), - bulkSize: messagesInBulk, - bulkDuration: bulkInsertDuration, + conn: conn, + table: table, + columns: columns, + template: template, + setSize: setSize, + sizeLimit: sizeLimit, + values: make([]interface{}, 0, setSize*sizeLimit), }, nil } diff --git a/backend/pkg/db/postgres/bulks.go b/backend/pkg/db/postgres/bulks.go index 5774ba184..f3e9e95c9 100644 --- a/backend/pkg/db/postgres/bulks.go +++ b/backend/pkg/db/postgres/bulks.go @@ -2,7 +2,6 @@ package postgres import ( "log" - "openreplay/backend/pkg/monitoring" "time" ) @@ -30,16 +29,14 @@ type BulkSet struct { webCustomEvents Bulk webClickEvents Bulk webNetworkRequest Bulk - metrics *monitoring.Metrics workerTask chan *bulksTask done chan struct{} finished chan struct{} } -func NewBulkSet(c Pool, metrics *monitoring.Metrics) *BulkSet { +func NewBulkSet(c Pool) *BulkSet { bs := &BulkSet{ c: c, - metrics: metrics, workerTask: make(chan *bulksTask, 1), done: make(chan struct{}), finished: make(chan struct{}), @@ -86,7 +83,7 @@ func (conn *BulkSet) Get(name string) Bulk { func (conn *BulkSet) initBulks() { var err error - conn.autocompletes, err = NewBulk(conn.c, conn.metrics, + conn.autocompletes, err = NewBulk(conn.c, "autocomplete", "(value, type, project_id)", "($%d, $%d, $%d)", @@ -94,7 +91,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create autocomplete bulk: %s", err) } - conn.requests, err = NewBulk(conn.c, conn.metrics, + conn.requests, err = NewBulk(conn.c, "events_common.requests", "(session_id, timestamp, seq_index, url, duration, success)", "($%d, $%d, $%d, LEFT($%d, 8000), $%d, $%d)", @@ -102,7 +99,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create requests bulk: %s", err) } - conn.customEvents, err = NewBulk(conn.c, conn.metrics, + conn.customEvents, err = NewBulk(conn.c, "events_common.customs", "(session_id, timestamp, seq_index, name, payload)", "($%d, $%d, $%d, LEFT($%d, 2000), $%d)", @@ -110,7 +107,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create customEvents bulk: %s", err) } - conn.webPageEvents, err = NewBulk(conn.c, conn.metrics, + conn.webPageEvents, err = NewBulk(conn.c, "events.pages", "(session_id, message_id, timestamp, referrer, base_referrer, host, path, query, dom_content_loaded_time, "+ "load_time, response_end, first_paint_time, first_contentful_paint_time, speed_index, visually_complete, "+ @@ -122,7 +119,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create webPageEvents bulk: %s", err) } - conn.webInputEvents, err = NewBulk(conn.c, conn.metrics, + conn.webInputEvents, err = NewBulk(conn.c, "events.inputs", "(session_id, message_id, timestamp, value, label)", "($%d, $%d, $%d, LEFT($%d, 2000), NULLIF(LEFT($%d, 2000),''))", @@ -130,7 +127,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create webPageEvents bulk: %s", err) } - conn.webGraphQL, err = NewBulk(conn.c, conn.metrics, + conn.webGraphQL, err = NewBulk(conn.c, "events.graphql", "(session_id, timestamp, message_id, name, request_body, response_body)", "($%d, $%d, $%d, LEFT($%d, 2000), $%d, $%d)", @@ -138,7 +135,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create webPageEvents bulk: %s", err) } - conn.webErrors, err = NewBulk(conn.c, conn.metrics, + conn.webErrors, err = NewBulk(conn.c, "errors", "(error_id, project_id, source, name, message, payload)", "($%d, $%d, $%d, $%d, $%d, $%d::jsonb)", @@ -146,7 +143,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create webErrors bulk: %s", err) } - conn.webErrorEvents, err = NewBulk(conn.c, conn.metrics, + conn.webErrorEvents, err = NewBulk(conn.c, "events.errors", "(session_id, message_id, timestamp, error_id)", "($%d, $%d, $%d, $%d)", @@ -154,7 +151,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create webErrorEvents bulk: %s", err) } - conn.webErrorTags, err = NewBulk(conn.c, conn.metrics, + conn.webErrorTags, err = NewBulk(conn.c, "public.errors_tags", "(session_id, message_id, error_id, key, value)", "($%d, $%d, $%d, $%d, $%d)", @@ -162,7 +159,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create webErrorEvents bulk: %s", err) } - conn.webIssues, err = NewBulk(conn.c, conn.metrics, + conn.webIssues, err = NewBulk(conn.c, "issues", "(project_id, issue_id, type, context_string)", "($%d, $%d, $%d, $%d)", @@ -170,7 +167,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create webIssues bulk: %s", err) } - conn.webIssueEvents, err = NewBulk(conn.c, conn.metrics, + conn.webIssueEvents, err = NewBulk(conn.c, "events_common.issues", "(session_id, issue_id, timestamp, seq_index, payload)", "($%d, $%d, $%d, $%d, CAST($%d AS jsonb))", @@ -178,7 +175,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create webIssueEvents bulk: %s", err) } - conn.webCustomEvents, err = NewBulk(conn.c, conn.metrics, + conn.webCustomEvents, err = NewBulk(conn.c, "events_common.customs", "(session_id, seq_index, timestamp, name, payload, level)", "($%d, $%d, $%d, LEFT($%d, 2000), $%d, $%d)", @@ -186,7 +183,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create webCustomEvents bulk: %s", err) } - conn.webClickEvents, err = NewBulk(conn.c, conn.metrics, + conn.webClickEvents, err = NewBulk(conn.c, "events.clicks", "(session_id, message_id, timestamp, label, selector, url, path)", "($%d, $%d, $%d, NULLIF(LEFT($%d, 2000), ''), LEFT($%d, 8000), LEFT($%d, 2000), LEFT($%d, 2000))", @@ -194,7 +191,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create webClickEvents bulk: %s", err) } - conn.webNetworkRequest, err = NewBulk(conn.c, conn.metrics, + conn.webNetworkRequest, err = NewBulk(conn.c, "events_common.requests", "(session_id, timestamp, seq_index, url, host, path, query, request_body, response_body, status_code, method, duration, success)", "($%d, $%d, $%d, LEFT($%d, 8000), LEFT($%d, 300), LEFT($%d, 2000), LEFT($%d, 8000), $%d, $%d, $%d::smallint, NULLIF($%d, '')::http_method, $%d, $%d)", diff --git a/backend/pkg/db/postgres/connector.go b/backend/pkg/db/postgres/connector.go index 2e8f3d425..6904dc135 100644 --- a/backend/pkg/db/postgres/connector.go +++ b/backend/pkg/db/postgres/connector.go @@ -2,11 +2,10 @@ package postgres import ( "context" - "github.com/jackc/pgx/v4/pgxpool" - "go.opentelemetry.io/otel/metric/instrument/syncfloat64" "log" + + "github.com/jackc/pgx/v4/pgxpool" "openreplay/backend/pkg/db/types" - "openreplay/backend/pkg/monitoring" ) type CH interface { @@ -15,36 +14,28 @@ type CH interface { // Conn contains batches, bulks and cache for all sessions type Conn struct { - c Pool - batches *BatchSet - bulks *BulkSet - batchSizeBytes syncfloat64.Histogram - batchSizeLines syncfloat64.Histogram - sqlRequestTime syncfloat64.Histogram - sqlRequestCounter syncfloat64.Counter - chConn CH + c Pool + batches *BatchSet + bulks *BulkSet + chConn CH } func (conn *Conn) SetClickHouse(ch CH) { conn.chConn = ch } -func NewConn(url string, queueLimit, sizeLimit int, metrics *monitoring.Metrics) *Conn { - if metrics == nil { - log.Fatalf("metrics is nil") - } +func NewConn(url string, queueLimit, sizeLimit int) *Conn { c, err := pgxpool.Connect(context.Background(), url) if err != nil { log.Fatalf("pgxpool.Connect err: %s", err) } conn := &Conn{} - conn.initMetrics(metrics) - conn.c, err = NewPool(c, conn.sqlRequestTime, conn.sqlRequestCounter) + conn.c, err = NewPool(c) if err != nil { log.Fatalf("can't create new pool wrapper: %s", err) } - conn.bulks = NewBulkSet(conn.c, metrics) - conn.batches = NewBatchSet(conn.c, queueLimit, sizeLimit, metrics) + conn.bulks = NewBulkSet(conn.c) + conn.batches = NewBatchSet(conn.c, queueLimit, sizeLimit) return conn } @@ -55,26 +46,6 @@ func (conn *Conn) Close() error { return nil } -func (conn *Conn) initMetrics(metrics *monitoring.Metrics) { - var err error - conn.batchSizeBytes, err = metrics.RegisterHistogram("batch_size_bytes") - if err != nil { - log.Printf("can't create batchSizeBytes metric: %s", err) - } - conn.batchSizeLines, err = metrics.RegisterHistogram("batch_size_lines") - if err != nil { - log.Printf("can't create batchSizeLines metric: %s", err) - } - conn.sqlRequestTime, err = metrics.RegisterHistogram("sql_request_time") - if err != nil { - log.Printf("can't create sqlRequestTime metric: %s", err) - } - conn.sqlRequestCounter, err = metrics.RegisterCounter("sql_request_number") - if err != nil { - log.Printf("can't create sqlRequestNumber metric: %s", err) - } -} - func (conn *Conn) insertAutocompleteValue(sessionID uint64, projectID uint32, tp string, value string) { if len(value) == 0 { return diff --git a/backend/pkg/db/postgres/pool.go b/backend/pkg/db/postgres/pool.go index 5f9cbaa29..5214be8d0 100644 --- a/backend/pkg/db/postgres/pool.go +++ b/backend/pkg/db/postgres/pool.go @@ -3,12 +3,12 @@ package postgres import ( "context" "errors" - "github.com/jackc/pgx/v4" - "github.com/jackc/pgx/v4/pgxpool" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/instrument/syncfloat64" "strings" "time" + + "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v4/pgxpool" + "openreplay/backend/pkg/metrics/database" ) // Pool is a pgx.Pool wrapper with metrics integration @@ -22,19 +22,15 @@ type Pool interface { } type poolImpl struct { - conn *pgxpool.Pool - sqlRequestTime syncfloat64.Histogram - sqlRequestCounter syncfloat64.Counter + conn *pgxpool.Pool } func (p *poolImpl) Query(sql string, args ...interface{}) (pgx.Rows, error) { start := time.Now() res, err := p.conn.Query(getTimeoutContext(), sql, args...) method, table := methodName(sql) - p.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()), - attribute.String("method", method), attribute.String("table", table)) - p.sqlRequestCounter.Add(context.Background(), 1, - attribute.String("method", method), attribute.String("table", table)) + database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table) + database.IncreaseTotalRequests(method, table) return res, err } @@ -42,10 +38,8 @@ func (p *poolImpl) QueryRow(sql string, args ...interface{}) pgx.Row { start := time.Now() res := p.conn.QueryRow(getTimeoutContext(), sql, args...) method, table := methodName(sql) - p.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()), - attribute.String("method", method), attribute.String("table", table)) - p.sqlRequestCounter.Add(context.Background(), 1, - attribute.String("method", method), attribute.String("table", table)) + database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table) + database.IncreaseTotalRequests(method, table) return res } @@ -53,45 +47,37 @@ func (p *poolImpl) Exec(sql string, arguments ...interface{}) error { start := time.Now() _, err := p.conn.Exec(getTimeoutContext(), sql, arguments...) method, table := methodName(sql) - p.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()), - attribute.String("method", method), attribute.String("table", table)) - p.sqlRequestCounter.Add(context.Background(), 1, - attribute.String("method", method), attribute.String("table", table)) + database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table) + database.IncreaseTotalRequests(method, table) return err } func (p *poolImpl) SendBatch(b *pgx.Batch) pgx.BatchResults { start := time.Now() res := p.conn.SendBatch(getTimeoutContext(), b) - p.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()), - attribute.String("method", "sendBatch")) - p.sqlRequestCounter.Add(context.Background(), 1, - attribute.String("method", "sendBatch")) + database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "sendBatch", "") + database.IncreaseTotalRequests("sendBatch", "") return res } func (p *poolImpl) Begin() (*_Tx, error) { start := time.Now() tx, err := p.conn.Begin(context.Background()) - p.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()), - attribute.String("method", "begin")) - p.sqlRequestCounter.Add(context.Background(), 1, - attribute.String("method", "begin")) - return &_Tx{tx, p.sqlRequestTime, p.sqlRequestCounter}, err + database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "begin", "") + database.IncreaseTotalRequests("begin", "") + return &_Tx{tx}, err } func (p *poolImpl) Close() { p.conn.Close() } -func NewPool(conn *pgxpool.Pool, sqlRequestTime syncfloat64.Histogram, sqlRequestCounter syncfloat64.Counter) (Pool, error) { +func NewPool(conn *pgxpool.Pool) (Pool, error) { if conn == nil { return nil, errors.New("conn is empty") } return &poolImpl{ - conn: conn, - sqlRequestTime: sqlRequestTime, - sqlRequestCounter: sqlRequestCounter, + conn: conn, }, nil } @@ -99,38 +85,30 @@ func NewPool(conn *pgxpool.Pool, sqlRequestTime syncfloat64.Histogram, sqlReques type _Tx struct { pgx.Tx - sqlRequestTime syncfloat64.Histogram - sqlRequestCounter syncfloat64.Counter } func (tx *_Tx) exec(sql string, args ...interface{}) error { start := time.Now() _, err := tx.Exec(context.Background(), sql, args...) method, table := methodName(sql) - tx.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()), - attribute.String("method", method), attribute.String("table", table)) - tx.sqlRequestCounter.Add(context.Background(), 1, - attribute.String("method", method), attribute.String("table", table)) + database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table) + database.IncreaseTotalRequests(method, table) return err } func (tx *_Tx) rollback() error { start := time.Now() err := tx.Rollback(context.Background()) - tx.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()), - attribute.String("method", "rollback")) - tx.sqlRequestCounter.Add(context.Background(), 1, - attribute.String("method", "rollback")) + database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "rollback", "") + database.IncreaseTotalRequests("rollback", "") return err } func (tx *_Tx) commit() error { start := time.Now() err := tx.Commit(context.Background()) - tx.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()), - attribute.String("method", "commit")) - tx.sqlRequestCounter.Add(context.Background(), 1, - attribute.String("method", "commit")) + database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "commit", "") + database.IncreaseTotalRequests("commit", "") return err } @@ -169,7 +147,8 @@ func methodName(sql string) (string, string) { case "update": table = strings.TrimSpace(parts[1]) case "insert": - table = strings.TrimSpace(parts[2]) + tableNameParts := strings.Split(strings.TrimSpace(parts[2]), "(") + table = tableNameParts[0] } return cmd, table } diff --git a/backend/pkg/messages/iterator-sink.go b/backend/pkg/messages/iterator-sink.go index a5897c3b7..be12b63eb 100644 --- a/backend/pkg/messages/iterator-sink.go +++ b/backend/pkg/messages/iterator-sink.go @@ -3,6 +3,7 @@ package messages import ( "fmt" "log" + "openreplay/backend/pkg/metrics/sink" ) type sinkMessageIteratorImpl struct { @@ -53,6 +54,8 @@ func (i *sinkMessageIteratorImpl) sendBatchEnd() { } func (i *sinkMessageIteratorImpl) Iterate(batchData []byte, batchInfo *BatchInfo) { + sink.RecordBatchSize(float64(len(batchData))) + sink.IncreaseTotalBatches() // Create new message reader reader := NewMessageReader(batchData) diff --git a/backend/pkg/messages/iterator.go b/backend/pkg/messages/iterator.go index a6717257e..f7b014d30 100644 --- a/backend/pkg/messages/iterator.go +++ b/backend/pkg/messages/iterator.go @@ -74,12 +74,13 @@ func (i *messageIteratorImpl) Iterate(batchData []byte, batchInfo *BatchInfo) { i.messageInfo.Index++ msg := reader.Message() + msgType := msg.TypeID() // Preprocess "system" messages if _, ok := i.preFilter[msg.TypeID()]; ok { msg = msg.Decode() if msg == nil { - log.Printf("decode error, type: %d, info: %s", msg.TypeID(), i.batchInfo.Info()) + log.Printf("decode error, type: %d, info: %s", msgType, i.batchInfo.Info()) return } msg = transformDeprecated(msg) @@ -99,7 +100,7 @@ func (i *messageIteratorImpl) Iterate(batchData []byte, batchInfo *BatchInfo) { if i.autoDecode { msg = msg.Decode() if msg == nil { - log.Printf("decode error, type: %d, info: %s", msg.TypeID(), i.batchInfo.Info()) + log.Printf("decode error, type: %d, info: %s", msgType, i.batchInfo.Info()) return } } diff --git a/backend/pkg/metrics/assets/metrics.go b/backend/pkg/metrics/assets/metrics.go new file mode 100644 index 000000000..44af0dfa9 --- /dev/null +++ b/backend/pkg/metrics/assets/metrics.go @@ -0,0 +1,72 @@ +package assets + +import ( + "github.com/prometheus/client_golang/prometheus" + "openreplay/backend/pkg/metrics/common" + "strconv" +) + +var assetsProcessedSessions = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "assets", + Name: "processed_total", + Help: "A counter displaying the total count of processed assets.", + }, +) + +func IncreaseProcessesSessions() { + assetsProcessedSessions.Inc() +} + +var assetsSavedSessions = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "assets", + Name: "saved_total", + Help: "A counter displaying the total number of cached assets.", + }, +) + +func IncreaseSavedSessions() { + assetsSavedSessions.Inc() +} + +var assetsDownloadDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "assets", + Name: "download_duration_seconds", + Help: "A histogram displaying the duration of downloading for each asset in seconds.", + Buckets: common.DefaultDurationBuckets, + }, + []string{"response_code"}, +) + +func RecordDownloadDuration(durMillis float64, code int) { + assetsDownloadDuration.WithLabelValues(strconv.Itoa(code)).Observe(durMillis / 1000.0) +} + +var assetsUploadDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "assets", + Name: "upload_s3_duration_seconds", + Help: "A histogram displaying the duration of uploading to s3 for each asset in seconds.", + Buckets: common.DefaultDurationBuckets, + }, + []string{"failed"}, +) + +func RecordUploadDuration(durMillis float64, isFailed bool) { + failed := "false" + if isFailed { + failed = "true" + } + assetsUploadDuration.WithLabelValues(failed).Observe(durMillis / 1000.0) +} + +func List() []prometheus.Collector { + return []prometheus.Collector{ + assetsProcessedSessions, + assetsSavedSessions, + assetsDownloadDuration, + assetsUploadDuration, + } +} diff --git a/backend/pkg/metrics/common/metrics.go b/backend/pkg/metrics/common/metrics.go new file mode 100644 index 000000000..85b66c713 --- /dev/null +++ b/backend/pkg/metrics/common/metrics.go @@ -0,0 +1,11 @@ +package common + +// DefaultDurationBuckets is a set of buckets from 5 milliseconds to 1000 seconds (16.6667 minutes) +var DefaultDurationBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 25, 50, 100, 250, 500, 1000} + +// DefaultSizeBuckets is a set of buckets from 1 byte to 1_000_000_000 bytes (~1 Gb) +var DefaultSizeBuckets = []float64{1, 10, 50, 100, 250, 500, 1000, 2500, 5000, 10000, 25000, 50000, 100_000, 250_000, + 500_000, 1_000_000, 10_000_000, 100_000_000, 1_000_000_000} + +// DefaultBuckets is a set of buckets from 1 to 1_000_000 elements +var DefaultBuckets = []float64{1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10_000, 50_000, 100_000, 1_000_000} diff --git a/backend/pkg/metrics/database/metrics.go b/backend/pkg/metrics/database/metrics.go new file mode 100644 index 000000000..a9f3990cd --- /dev/null +++ b/backend/pkg/metrics/database/metrics.go @@ -0,0 +1,127 @@ +package database + +import ( + "github.com/prometheus/client_golang/prometheus" + "openreplay/backend/pkg/metrics/common" +) + +var dbBatchSize = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: "db", + Name: "batch_size_bytes", + Help: "A histogram displaying the batch size in bytes.", + Buckets: common.DefaultSizeBuckets, + }, +) + +func RecordBatchSize(size float64) { + dbBatchSize.Observe(size) +} + +var dbBatchElements = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: "db", + Name: "batch_size_elements", + Help: "A histogram displaying the number of SQL commands in each batch.", + Buckets: common.DefaultBuckets, + }, +) + +func RecordBatchElements(number float64) { + dbBatchElements.Observe(number) +} + +var dbBatchInsertDuration = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: "db", + Name: "batch_insert_duration_seconds", + Help: "A histogram displaying the duration of batch inserts in seconds.", + Buckets: common.DefaultDurationBuckets, + }, +) + +func RecordBatchInsertDuration(durMillis float64) { + dbBatchInsertDuration.Observe(durMillis / 1000.0) +} + +var dbBulkSize = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "db", + Name: "bulk_size_bytes", + Help: "A histogram displaying the bulk size in bytes.", + Buckets: common.DefaultSizeBuckets, + }, + []string{"db", "table"}, +) + +func RecordBulkSize(size float64, db, table string) { + dbBulkSize.WithLabelValues(db, table).Observe(size) +} + +var dbBulkElements = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "db", + Name: "bulk_size_elements", + Help: "A histogram displaying the size of data set in each bulk.", + Buckets: common.DefaultBuckets, + }, + []string{"db", "table"}, +) + +func RecordBulkElements(size float64, db, table string) { + dbBulkElements.WithLabelValues(db, table).Observe(size) +} + +var dbBulkInsertDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "db", + Name: "bulk_insert_duration_seconds", + Help: "A histogram displaying the duration of bulk inserts in seconds.", + Buckets: common.DefaultDurationBuckets, + }, + []string{"db", "table"}, +) + +func RecordBulkInsertDuration(durMillis float64, db, table string) { + dbBulkInsertDuration.WithLabelValues(db, table).Observe(durMillis / 1000.0) +} + +var dbRequestDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "db", + Name: "request_duration_seconds", + Help: "A histogram displaying the duration of each sql request in seconds.", + Buckets: common.DefaultDurationBuckets, + }, + []string{"method", "table"}, +) + +func RecordRequestDuration(durMillis float64, method, table string) { + dbRequestDuration.WithLabelValues(method, table).Observe(durMillis / 1000.0) +} + +var dbTotalRequests = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "db", + Name: "requests_total", + Help: "A counter showing the total number of all SQL requests.", + }, + []string{"method", "table"}, +) + +func IncreaseTotalRequests(method, table string) { + dbTotalRequests.WithLabelValues(method, table).Inc() +} + +func List() []prometheus.Collector { + return []prometheus.Collector{ + dbBatchSize, + dbBatchElements, + dbBatchInsertDuration, + dbBulkSize, + dbBulkElements, + dbBulkInsertDuration, + dbRequestDuration, + dbTotalRequests, + } +} diff --git a/backend/pkg/metrics/ender/metrics.go b/backend/pkg/metrics/ender/metrics.go new file mode 100644 index 000000000..5e3308554 --- /dev/null +++ b/backend/pkg/metrics/ender/metrics.go @@ -0,0 +1,51 @@ +package ender + +import "github.com/prometheus/client_golang/prometheus" + +var enderActiveSessions = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: "ender", + Name: "sessions_active", + Help: "A gauge displaying the number of active (live) sessions.", + }, +) + +func IncreaseActiveSessions() { + enderActiveSessions.Inc() +} + +func DecreaseActiveSessions() { + enderActiveSessions.Dec() +} + +var enderClosedSessions = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "ender", + Name: "sessions_closed", + Help: "A counter displaying the number of closed sessions (sent SessionEnd).", + }, +) + +func IncreaseClosedSessions() { + enderClosedSessions.Inc() +} + +var enderTotalSessions = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "ender", + Name: "sessions_total", + Help: "A counter displaying the number of all processed sessions.", + }, +) + +func IncreaseTotalSessions() { + enderTotalSessions.Inc() +} + +func List() []prometheus.Collector { + return []prometheus.Collector{ + enderActiveSessions, + enderClosedSessions, + enderTotalSessions, + } +} diff --git a/backend/pkg/metrics/http/metrics.go b/backend/pkg/metrics/http/metrics.go new file mode 100644 index 000000000..7a835d7f6 --- /dev/null +++ b/backend/pkg/metrics/http/metrics.go @@ -0,0 +1,55 @@ +package http + +import ( + "github.com/prometheus/client_golang/prometheus" + "openreplay/backend/pkg/metrics/common" + "strconv" +) + +var httpRequestSize = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "http", + Name: "request_size_bytes", + Help: "A histogram displaying the size of each HTTP request in bytes.", + Buckets: common.DefaultSizeBuckets, + }, + []string{"url", "response_code"}, +) + +func RecordRequestSize(size float64, url string, code int) { + httpRequestSize.WithLabelValues(url, strconv.Itoa(code)).Observe(size) +} + +var httpRequestDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "http", + Name: "request_duration_seconds", + Help: "A histogram displaying the duration of each HTTP request in seconds.", + Buckets: common.DefaultDurationBuckets, + }, + []string{"url", "response_code"}, +) + +func RecordRequestDuration(durMillis float64, url string, code int) { + httpRequestDuration.WithLabelValues(url, strconv.Itoa(code)).Observe(durMillis / 1000.0) +} + +var httpTotalRequests = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "http", + Name: "requests_total", + Help: "A counter displaying the number all HTTP requests.", + }, +) + +func IncreaseTotalRequests() { + httpTotalRequests.Inc() +} + +func List() []prometheus.Collector { + return []prometheus.Collector{ + httpRequestSize, + httpRequestDuration, + httpTotalRequests, + } +} diff --git a/backend/pkg/metrics/server.go b/backend/pkg/metrics/server.go new file mode 100644 index 000000000..fb3be5afc --- /dev/null +++ b/backend/pkg/metrics/server.go @@ -0,0 +1,40 @@ +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/collectors" + "github.com/prometheus/client_golang/prometheus/promhttp" + "log" + "net/http" +) + +type MetricServer struct { + registry *prometheus.Registry +} + +func New() *MetricServer { + registry := prometheus.NewRegistry() + // Add go runtime metrics and process collectors. + registry.MustRegister( + collectors.NewGoCollector(), + collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}), + ) + // Expose /metrics HTTP endpoint using the created custom registry. + http.Handle( + "/metrics", promhttp.HandlerFor( + registry, + promhttp.HandlerOpts{ + EnableOpenMetrics: true, + }), + ) + go func() { + log.Println(http.ListenAndServe(":8888", nil)) + }() + return &MetricServer{ + registry: registry, + } +} + +func (s *MetricServer) Register(cs []prometheus.Collector) { + s.registry.MustRegister(cs...) +} diff --git a/backend/pkg/metrics/sink/metrics.go b/backend/pkg/metrics/sink/metrics.go new file mode 100644 index 000000000..52cb73ba1 --- /dev/null +++ b/backend/pkg/metrics/sink/metrics.go @@ -0,0 +1,185 @@ +package sink + +import ( + "github.com/prometheus/client_golang/prometheus" + "openreplay/backend/pkg/metrics/common" +) + +var sinkMessageSize = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: "sink", + Name: "message_size_bytes", + Help: "A histogram displaying the size of each message in bytes.", + Buckets: common.DefaultSizeBuckets, + }, +) + +func RecordMessageSize(size float64) { + sinkMessageSize.Observe(size) +} + +var sinkWrittenMessages = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "sink", + Name: "messages_written", + Help: "A counter displaying the total number of all written messages.", + }, +) + +func IncreaseWrittenMessages() { + sinkWrittenMessages.Inc() +} + +var sinkTotalMessages = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "sink", + Name: "messages_total", + Help: "A counter displaying the total number of all processed messages.", + }, +) + +func IncreaseTotalMessages() { + sinkTotalMessages.Inc() +} + +var sinkBatchSize = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: "sink", + Name: "batch_size_bytes", + Help: "A histogram displaying the size of each batch in bytes.", + Buckets: common.DefaultSizeBuckets, + }, +) + +func RecordBatchSize(size float64) { + sinkBatchSize.Observe(size) +} + +var sinkTotalBatches = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "sink", + Name: "batches_total", + Help: "A counter displaying the total number of all written batches.", + }, +) + +func IncreaseTotalBatches() { + sinkTotalBatches.Inc() +} + +var sinkWrittenBytes = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "sink", + Name: "written_bytes", + Help: "A histogram displaying the size of buffer in bytes written to session file.", + Buckets: common.DefaultSizeBuckets, + }, + []string{"file_type"}, +) + +func RecordWrittenBytes(size float64, fileType string) { + if size == 0 { + return + } + sinkWrittenBytes.WithLabelValues(fileType).Observe(size) + IncreaseTotalWrittenBytes(size, fileType) +} + +var sinkTotalWrittenBytes = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "sink", + Name: "written_bytes_total", + Help: "A counter displaying the total number of bytes written to all session files.", + }, + []string{"file_type"}, +) + +func IncreaseTotalWrittenBytes(size float64, fileType string) { + if size == 0 { + return + } + sinkTotalWrittenBytes.WithLabelValues(fileType).Add(size) +} + +var sinkCachedAssets = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: "sink", + Name: "assets_cached", + Help: "A gauge displaying the current number of cached assets.", + }, +) + +func IncreaseCachedAssets() { + sinkCachedAssets.Inc() +} + +func DecreaseCachedAssets() { + sinkCachedAssets.Dec() +} + +var sinkSkippedAssets = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "sink", + Name: "assets_skipped", + Help: "A counter displaying the total number of all skipped assets.", + }, +) + +func IncreaseSkippedAssets() { + sinkSkippedAssets.Inc() +} + +var sinkTotalAssets = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "sink", + Name: "assets_total", + Help: "A counter displaying the total number of all processed assets.", + }, +) + +func IncreaseTotalAssets() { + sinkTotalAssets.Inc() +} + +var sinkAssetSize = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: "sink", + Name: "asset_size_bytes", + Help: "A histogram displaying the size of each asset in bytes.", + Buckets: common.DefaultSizeBuckets, + }, +) + +func RecordAssetSize(size float64) { + sinkAssetSize.Observe(size) +} + +var sinkProcessAssetDuration = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: "sink", + Name: "asset_process_duration_seconds", + Help: "A histogram displaying the duration of processing for each asset in seconds.", + Buckets: common.DefaultDurationBuckets, + }, +) + +func RecordProcessAssetDuration(durMillis float64) { + sinkProcessAssetDuration.Observe(durMillis / 1000.0) +} + +func List() []prometheus.Collector { + return []prometheus.Collector{ + sinkMessageSize, + sinkWrittenMessages, + sinkTotalMessages, + sinkBatchSize, + sinkTotalBatches, + sinkWrittenBytes, + sinkTotalWrittenBytes, + sinkCachedAssets, + sinkSkippedAssets, + sinkTotalAssets, + sinkAssetSize, + sinkProcessAssetDuration, + } +} diff --git a/backend/pkg/metrics/storage/metrics.go b/backend/pkg/metrics/storage/metrics.go new file mode 100644 index 000000000..26459c90d --- /dev/null +++ b/backend/pkg/metrics/storage/metrics.go @@ -0,0 +1,114 @@ +package storage + +import ( + "github.com/prometheus/client_golang/prometheus" + "openreplay/backend/pkg/metrics/common" +) + +var storageSessionSize = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "storage", + Name: "session_size_bytes", + Help: "A histogram displaying the size of each session file in bytes prior to any manipulation.", + Buckets: common.DefaultSizeBuckets, + }, + []string{"file_type"}, +) + +func RecordSessionSize(fileSize float64, fileType string) { + storageSessionSize.WithLabelValues(fileType).Observe(fileSize) +} + +var storageTotalSessions = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "storage", + Name: "sessions_total", + Help: "A counter displaying the total number of all processed sessions.", + }, +) + +func IncreaseStorageTotalSessions() { + storageTotalSessions.Inc() +} + +var storageSessionReadDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "storage", + Name: "read_duration_seconds", + Help: "A histogram displaying the duration of reading for each session in seconds.", + Buckets: common.DefaultDurationBuckets, + }, + []string{"file_type"}, +) + +func RecordSessionReadDuration(durMillis float64, fileType string) { + storageSessionReadDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0) +} + +var storageSessionSortDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "storage", + Name: "sort_duration_seconds", + Help: "A histogram displaying the duration of sorting for each session in seconds.", + Buckets: common.DefaultDurationBuckets, + }, + []string{"file_type"}, +) + +func RecordSessionSortDuration(durMillis float64, fileType string) { + storageSessionSortDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0) +} + +var storageSessionEncodeDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "storage", + Name: "encode_duration_seconds", + Help: "A histogram displaying the duration of encoding for each session in seconds.", + Buckets: common.DefaultDurationBuckets, + }, + []string{"file_type"}, +) + +func RecordSessionEncodeDuration(durMillis float64, fileType string) { + storageSessionEncodeDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0) +} + +var storageSessionCompressDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "storage", + Name: "compress_duration_seconds", + Help: "A histogram displaying the duration of compressing for each session in seconds.", + Buckets: common.DefaultDurationBuckets, + }, + []string{"file_type"}, +) + +func RecordSessionCompressDuration(durMillis float64, fileType string) { + storageSessionCompressDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0) +} + +var storageSessionUploadDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "storage", + Name: "upload_duration_seconds", + Help: "A histogram displaying the duration of uploading to s3 for each session in seconds.", + Buckets: common.DefaultDurationBuckets, + }, + []string{"file_type"}, +) + +func RecordSessionUploadDuration(durMillis float64, fileType string) { + storageSessionUploadDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0) +} + +func List() []prometheus.Collector { + return []prometheus.Collector{ + storageSessionSize, + storageTotalSessions, + storageSessionReadDuration, + storageSessionSortDuration, + storageSessionEncodeDuration, + storageSessionCompressDuration, + storageSessionUploadDuration, + } +} diff --git a/ee/backend/pkg/db/clickhouse/bulk.go b/ee/backend/pkg/db/clickhouse/bulk.go index 706b66f68..6eb8d98fd 100644 --- a/ee/backend/pkg/db/clickhouse/bulk.go +++ b/ee/backend/pkg/db/clickhouse/bulk.go @@ -5,6 +5,8 @@ import ( "errors" "fmt" "log" + "openreplay/backend/pkg/metrics/database" + "time" "github.com/ClickHouse/clickhouse-go/v2/lib/driver" ) @@ -16,19 +18,23 @@ type Bulk interface { type bulkImpl struct { conn driver.Conn + table string query string values [][]interface{} } -func NewBulk(conn driver.Conn, query string) (Bulk, error) { +func NewBulk(conn driver.Conn, table, query string) (Bulk, error) { switch { case conn == nil: return nil, errors.New("clickhouse connection is empty") + case table == "": + return nil, errors.New("table is empty") case query == "": return nil, errors.New("query is empty") } return &bulkImpl{ conn: conn, + table: table, query: query, values: make([][]interface{}, 0), }, nil @@ -40,6 +46,7 @@ func (b *bulkImpl) Append(args ...interface{}) error { } func (b *bulkImpl) Send() error { + start := time.Now() batch, err := b.conn.PrepareBatch(context.Background(), b.query) if err != nil { return fmt.Errorf("can't create new batch: %s", err) @@ -50,6 +57,11 @@ func (b *bulkImpl) Send() error { log.Printf("failed query: %s", b.query) } } + err = batch.Send() + // Save bulk metrics + database.RecordBulkElements(float64(len(b.values)), "ch", b.table) + database.RecordBulkInsertDuration(float64(time.Now().Sub(start).Milliseconds()), "ch", b.table) + // Prepare values slice for a new data b.values = make([][]interface{}, 0) - return batch.Send() + return err } diff --git a/ee/backend/pkg/db/clickhouse/connector.go b/ee/backend/pkg/db/clickhouse/connector.go index 157d384b9..b872adcc2 100644 --- a/ee/backend/pkg/db/clickhouse/connector.go +++ b/ee/backend/pkg/db/clickhouse/connector.go @@ -3,18 +3,16 @@ package clickhouse import ( "errors" "fmt" + "github.com/ClickHouse/clickhouse-go/v2" + "github.com/ClickHouse/clickhouse-go/v2/lib/driver" "log" "openreplay/backend/pkg/db/types" "openreplay/backend/pkg/hashid" "openreplay/backend/pkg/messages" "openreplay/backend/pkg/url" - "os" "strings" "time" - "github.com/ClickHouse/clickhouse-go/v2" - "github.com/ClickHouse/clickhouse-go/v2/lib/driver" - "openreplay/backend/pkg/license" ) @@ -52,28 +50,14 @@ type connectorImpl struct { finished chan struct{} } -// Check env variables. If not present, return default value. -func getEnv(key, fallback string) string { - if value, ok := os.LookupEnv(key); ok { - return value - } - return fallback -} - func NewConnector(url string) Connector { license.CheckLicense() - // Check username, password, database - userName := getEnv("CH_USERNAME", "default") - password := getEnv("CH_PASSWORD", "") - database := getEnv("CH_DATABASE", "default") url = strings.TrimPrefix(url, "tcp://") - url = strings.TrimSuffix(url, "/"+database) + url = strings.TrimSuffix(url, "/default") conn, err := clickhouse.Open(&clickhouse.Options{ Addr: []string{url}, Auth: clickhouse.Auth{ - Database: database, - Username: userName, - Password: password, + Database: "default", }, MaxOpenConns: 20, MaxIdleConns: 15, @@ -99,7 +83,7 @@ func NewConnector(url string) Connector { } func (c *connectorImpl) newBatch(name, query string) error { - batch, err := NewBulk(c.conn, query) + batch, err := NewBulk(c.conn, name, query) if err != nil { return fmt.Errorf("can't create new batch: %s", err) } From 3d9ea580a29609f9e065794279915d215c857de3 Mon Sep 17 00:00:00 2001 From: Alexander Zavorotynskiy Date: Mon, 20 Feb 2023 16:45:00 +0100 Subject: [PATCH 057/151] fix(backend): removed wrong line from batchSet --- backend/pkg/db/postgres/batches.go | 1 - 1 file changed, 1 deletion(-) diff --git a/backend/pkg/db/postgres/batches.go b/backend/pkg/db/postgres/batches.go index abdee36f2..8b9f2484d 100644 --- a/backend/pkg/db/postgres/batches.go +++ b/backend/pkg/db/postgres/batches.go @@ -186,7 +186,6 @@ func (conn *BatchSet) sendBatches(t *batchesTask) { } br.Close() // returns err database.RecordBatchInsertDuration(float64(time.Now().Sub(start).Milliseconds())) - database.IncreaseTotalBatches() } } From b813b50e017a68e873e1a338d36dab9097dba4dc Mon Sep 17 00:00:00 2001 From: Alexander Zavorotynskiy Date: Mon, 20 Feb 2023 16:56:00 +0100 Subject: [PATCH 058/151] fix(backend): upgrade /x/net library to avoid vulnerabilities --- backend/go.mod | 8 ++++---- backend/go.sum | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/backend/go.mod b/backend/go.mod index 0615fb0cb..e11b839fa 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -20,14 +20,14 @@ require ( github.com/klauspost/pgzip v1.2.5 github.com/oschwald/maxminddb-golang v1.7.0 github.com/pkg/errors v0.9.1 + github.com/prometheus/client_golang v1.12.1 github.com/sethvargo/go-envconfig v0.7.0 github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce github.com/ua-parser/uap-go v0.0.0-20200325213135-e1c09f13e2fe - go.opentelemetry.io/otel v1.7.0 go.opentelemetry.io/otel/exporters/prometheus v0.30.0 go.opentelemetry.io/otel/metric v0.30.0 go.opentelemetry.io/otel/sdk/metric v0.30.0 - golang.org/x/net v0.0.0-20220906165146-f3363e06e74c + golang.org/x/net v0.1.1-0.20221104162952-702349b0e862 google.golang.org/api v0.81.0 ) @@ -55,19 +55,19 @@ require ( github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect github.com/paulmach/orb v0.7.1 // indirect github.com/pierrec/lz4/v4 v4.1.15 // indirect - github.com/prometheus/client_golang v1.12.1 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.32.1 // indirect github.com/prometheus/procfs v0.7.3 // indirect github.com/shopspring/decimal v1.3.1 // indirect github.com/stretchr/testify v1.8.0 // indirect go.opencensus.io v0.23.0 // indirect + go.opentelemetry.io/otel v1.7.0 // indirect go.opentelemetry.io/otel/sdk v1.7.0 // indirect go.opentelemetry.io/otel/trace v1.7.0 // indirect golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 // indirect golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 // indirect golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 // indirect - golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 // indirect + golang.org/x/sys v0.1.0 // indirect golang.org/x/text v0.4.0 // indirect golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect google.golang.org/appengine v1.6.7 // indirect diff --git a/backend/go.sum b/backend/go.sum index 5aa3ae3de..7b33d881d 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -601,8 +601,8 @@ golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220906165146-f3363e06e74c h1:yKufUcDwucU5urd+50/Opbt4AYpqthk7wHpHok8f1lo= -golang.org/x/net v0.0.0-20220906165146-f3363e06e74c/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.1.1-0.20221104162952-702349b0e862 h1:KrLJ+iz8J6j6VVr/OCfULAcK+xozUmWE43fKpMR4MlI= +golang.org/x/net v0.1.1-0.20221104162952-702349b0e862/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -715,8 +715,8 @@ golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220429233432-b5fbb4746d32/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 h1:WIoqL4EROvwiPdUtaip4VcDdpZ4kha7wBWZrbVKCIZg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= From dab2707107c1402ec7c4a03e94dd6233d55f5b5d Mon Sep 17 00:00:00 2001 From: Alexander Zavorotynskiy Date: Mon, 20 Feb 2023 16:59:57 +0100 Subject: [PATCH 059/151] feat(backend): clean up go modules --- backend/go.mod | 6 -- backend/go.sum | 13 --- backend/pkg/monitoring/metrics.go | 138 ------------------------------ 3 files changed, 157 deletions(-) delete mode 100644 backend/pkg/monitoring/metrics.go diff --git a/backend/go.mod b/backend/go.mod index e11b839fa..161513ed8 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -24,9 +24,6 @@ require ( github.com/sethvargo/go-envconfig v0.7.0 github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce github.com/ua-parser/uap-go v0.0.0-20200325213135-e1c09f13e2fe - go.opentelemetry.io/otel/exporters/prometheus v0.30.0 - go.opentelemetry.io/otel/metric v0.30.0 - go.opentelemetry.io/otel/sdk/metric v0.30.0 golang.org/x/net v0.1.1-0.20221104162952-702349b0e862 google.golang.org/api v0.81.0 ) @@ -38,8 +35,6 @@ require ( cloud.google.com/go/storage v1.14.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect - github.com/go-logr/logr v1.2.3 // indirect - github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/google/go-cmp v0.5.8 // indirect @@ -62,7 +57,6 @@ require ( github.com/stretchr/testify v1.8.0 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.7.0 // indirect - go.opentelemetry.io/otel/sdk v1.7.0 // indirect go.opentelemetry.io/otel/trace v1.7.0 // indirect golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 // indirect golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 // indirect diff --git a/backend/go.sum b/backend/go.sum index 7b33d881d..de6d507d3 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -80,8 +80,6 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/aws/aws-sdk-go v1.44.98 h1:fX+NxebSdO/9T6DTNOLhpC+Vv6RNkKRfsMg0a7o/yBo= github.com/aws/aws-sdk-go v1.44.98/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= -github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -156,9 +154,7 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= @@ -489,14 +485,6 @@ go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/otel v1.7.0 h1:Z2lA3Tdch0iDcrhJXDIlC94XE+bxok1F9B+4Lz/lGsM= go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk= -go.opentelemetry.io/otel/exporters/prometheus v0.30.0 h1:YXo5ZY5nofaEYMCMTTMaRH2cLDZB8+0UGuk5RwMfIo0= -go.opentelemetry.io/otel/exporters/prometheus v0.30.0/go.mod h1:qN5feW+0/d661KDtJuATEmHtw5bKBK7NSvNEP927zSs= -go.opentelemetry.io/otel/metric v0.30.0 h1:Hs8eQZ8aQgs0U49diZoaS6Uaxw3+bBE3lcMUKBFIk3c= -go.opentelemetry.io/otel/metric v0.30.0/go.mod h1:/ShZ7+TS4dHzDFmfi1kSXMhMVubNoP0oIaBp70J6UXU= -go.opentelemetry.io/otel/sdk v1.7.0 h1:4OmStpcKVOfvDOgCt7UriAPtKolwIhxpnSNI/yK+1B0= -go.opentelemetry.io/otel/sdk v1.7.0/go.mod h1:uTEOTwaqIVuTGiJN7ii13Ibp75wJmYUDe374q6cZwUU= -go.opentelemetry.io/otel/sdk/metric v0.30.0 h1:XTqQ4y3erR2Oj8xSAOL5ovO5011ch2ELg51z4fVkpME= -go.opentelemetry.io/otel/sdk/metric v0.30.0/go.mod h1:8AKFRi5HyvTR0RRty3paN1aMC9HMT+NzcEhw/BLkLX8= go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJqC6o= go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= @@ -690,7 +678,6 @@ golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/backend/pkg/monitoring/metrics.go b/backend/pkg/monitoring/metrics.go deleted file mode 100644 index 803fba127..000000000 --- a/backend/pkg/monitoring/metrics.go +++ /dev/null @@ -1,138 +0,0 @@ -package monitoring - -import ( - "fmt" - "log" - "net/http" - - "go.opentelemetry.io/otel/exporters/prometheus" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/global" - "go.opentelemetry.io/otel/metric/instrument/syncfloat64" - "go.opentelemetry.io/otel/sdk/metric/aggregator/histogram" - controller "go.opentelemetry.io/otel/sdk/metric/controller/basic" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - processor "go.opentelemetry.io/otel/sdk/metric/processor/basic" - selector "go.opentelemetry.io/otel/sdk/metric/selector/simple" -) - -// Metrics stores all collected metrics -type Metrics struct { - meter metric.Meter - counters map[string]syncfloat64.Counter - upDownCounters map[string]syncfloat64.UpDownCounter - histograms map[string]syncfloat64.Histogram -} - -func New(name string) *Metrics { - m := &Metrics{ - counters: make(map[string]syncfloat64.Counter), - upDownCounters: make(map[string]syncfloat64.UpDownCounter), - histograms: make(map[string]syncfloat64.Histogram), - } - m.initPrometheusDataExporter() - m.initMetrics(name) - return m -} - -// initPrometheusDataExporter allows to use collected metrics in prometheus -func (m *Metrics) initPrometheusDataExporter() { - config := prometheus.Config{ - DefaultHistogramBoundaries: []float64{1, 2, 5, 10, 20, 50, 100, 250, 500, 1000}, - } - c := controller.New( - processor.NewFactory( - selector.NewWithHistogramDistribution( - histogram.WithExplicitBoundaries(config.DefaultHistogramBoundaries), - ), - aggregation.CumulativeTemporalitySelector(), - processor.WithMemory(true), - ), - ) - exporter, err := prometheus.New(config, c) - if err != nil { - log.Panicf("failed to initialize prometheus exporter %v", err) - } - - global.SetMeterProvider(exporter.MeterProvider()) - - http.HandleFunc("/metrics", exporter.ServeHTTP) - go func() { - _ = http.ListenAndServe(":8888", nil) - }() - - fmt.Println("Prometheus server running on :8888") -} - -func (m *Metrics) initMetrics(name string) { - m.meter = global.Meter(name) -} - -/* -Counter is a synchronous instrument that measures additive non-decreasing values, for example, the number of: -- processed requests -- received bytes -- disk reads -*/ - -func (m *Metrics) RegisterCounter(name string) (syncfloat64.Counter, error) { - if counter, ok := m.counters[name]; ok { - return counter, nil - } - counter, err := m.meter.SyncFloat64().Counter(name) - if err != nil { - return nil, fmt.Errorf("failed to initialize counter: %v", err) - } - m.counters[name] = counter - return counter, nil -} - -func (m *Metrics) GetCounter(name string) syncfloat64.Counter { - return m.counters[name] -} - -/* -UpDownCounter is a synchronous instrument which measures additive values that increase or decrease with time, -for example, the number of: -- active requests -- open connections -- memory in use (megabytes) -*/ - -func (m *Metrics) RegisterUpDownCounter(name string) (syncfloat64.UpDownCounter, error) { - if counter, ok := m.upDownCounters[name]; ok { - return counter, nil - } - counter, err := m.meter.SyncFloat64().UpDownCounter(name) - if err != nil { - return nil, fmt.Errorf("failed to initialize upDownCounter: %v", err) - } - m.upDownCounters[name] = counter - return counter, nil -} - -func (m *Metrics) GetUpDownCounter(name string) syncfloat64.UpDownCounter { - return m.upDownCounters[name] -} - -/* -Histogram is a synchronous instrument that produces a histogram from recorded values, for example: -- request latency -- request size -*/ - -func (m *Metrics) RegisterHistogram(name string) (syncfloat64.Histogram, error) { - if hist, ok := m.histograms[name]; ok { - return hist, nil - } - hist, err := m.meter.SyncFloat64().Histogram(name) - if err != nil { - return nil, fmt.Errorf("failed to initialize histogram: %v", err) - } - m.histograms[name] = hist - return hist, nil -} - -func (m *Metrics) GetHistogram(name string) syncfloat64.Histogram { - return m.histograms[name] -} From a6864a5c8eb24d7d79e6b6e8f5f927c6b19fa1ef Mon Sep 17 00:00:00 2001 From: nick-delirium Date: Mon, 20 Feb 2023 17:24:09 +0100 Subject: [PATCH 060/151] fix(ui): change clickmap fetch filter --- frontend/app/services/MetricService.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/app/services/MetricService.ts b/frontend/app/services/MetricService.ts index d8c6e099a..5b97ec4ec 100644 --- a/frontend/app/services/MetricService.ts +++ b/frontend/app/services/MetricService.ts @@ -75,7 +75,7 @@ export default class MetricService { getMetricChartData(metric: Widget, data: any, isWidget: boolean = false): Promise { if ( metric.metricType === CLICKMAP - && document.location.pathname.split('/').pop() !== 'metrics' + && document.location.pathname.split('/').pop() === 'metrics' && (document.location.pathname.indexOf('dashboard') !== -1 && document.location.pathname.indexOf('metric') === -1) ) { return Promise.resolve({}) From 11388bccec76c828aa23ea9ac21ed3765eb3b691 Mon Sep 17 00:00:00 2001 From: Shekar Siri Date: Mon, 20 Feb 2023 18:02:14 +0100 Subject: [PATCH 061/151] fix(ui) - search url unhandled filter key --- .../Filters/FilterSource/FilterSource.tsx | 2 +- frontend/app/types/filter/newFilter.js | 1 + frontend/app/utils/search.ts | 17 ++++++++++++----- 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/frontend/app/components/shared/Filters/FilterSource/FilterSource.tsx b/frontend/app/components/shared/Filters/FilterSource/FilterSource.tsx index 08c93d8df..7ae8d3a92 100644 --- a/frontend/app/components/shared/Filters/FilterSource/FilterSource.tsx +++ b/frontend/app/components/shared/Filters/FilterSource/FilterSource.tsx @@ -9,7 +9,7 @@ interface Props { } function FilterSource(props: Props) { const { filter } = props; - const [value, setValue] = useState(filter.source[0] || ''); + const [value, setValue] = useState(filter.source && filter.source[0] ? filter.source[0] : ''); useEffect(() => { setValue(filter.source[0] || ''); diff --git a/frontend/app/types/filter/newFilter.js b/frontend/app/types/filter/newFilter.js index 7a612c193..286f1cc13 100644 --- a/frontend/app/types/filter/newFilter.js +++ b/frontend/app/types/filter/newFilter.js @@ -195,6 +195,7 @@ export default Record({ _filter = filtersMap[type]; } } + return { ..._filter, ...filter, diff --git a/frontend/app/utils/search.ts b/frontend/app/utils/search.ts index 017a5a7f6..d688ee369 100644 --- a/frontend/app/utils/search.ts +++ b/frontend/app/utils/search.ts @@ -58,9 +58,6 @@ const getFiltersFromEntries = (entires: any) => { let filter: any = {}; const filterKey = getFilterKeyTypeByKey(item.key); - if (!filterKey) { - return; - } const tmp = item.value.split('^'); const valueArr = tmp[0].split('|'); const operator = valueArr.shift(); @@ -78,10 +75,20 @@ const getFiltersFromEntries = (entires: any) => { } } + if (!filter) { + return + } + filter.value = valueArr; filter.operator = operator; - filter.source = sourceArr && sourceArr.length > 0 ? sourceArr : null; - filter.sourceOperator = !!sourceOperator ? decodeURI(sourceOperator) : null; + if (filter.icon === "filters/metadata") { + filter.source = filter.type; + filter.type = 'metadata'; + } else { + filter.source = sourceArr && sourceArr.length > 0 ? sourceArr : null; + filter.sourceOperator = !!sourceOperator ? decodeURI(sourceOperator) : null; + } + if (!filter.filters || filter.filters.size === 0) { filters.push(filter); } From de9b14c80dd0bd4145cafdbe287f6624df48108e Mon Sep 17 00:00:00 2001 From: Shekar Siri Date: Mon, 20 Feb 2023 18:26:40 +0100 Subject: [PATCH 062/151] fix(ui) - search url unhandled filter key --- .../app/components/shared/Filters/FilterSource/FilterSource.tsx | 2 +- frontend/app/utils/search.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/frontend/app/components/shared/Filters/FilterSource/FilterSource.tsx b/frontend/app/components/shared/Filters/FilterSource/FilterSource.tsx index 7ae8d3a92..07ca61ec3 100644 --- a/frontend/app/components/shared/Filters/FilterSource/FilterSource.tsx +++ b/frontend/app/components/shared/Filters/FilterSource/FilterSource.tsx @@ -12,7 +12,7 @@ function FilterSource(props: Props) { const [value, setValue] = useState(filter.source && filter.source[0] ? filter.source[0] : ''); useEffect(() => { - setValue(filter.source[0] || ''); + setValue(filter.source && filter.source[0] ? filter.source[0] : ''); }, [filter]); const write = ({ target: { value, name } }: any) => setValue(value); diff --git a/frontend/app/utils/search.ts b/frontend/app/utils/search.ts index d688ee369..4b32f8d13 100644 --- a/frontend/app/utils/search.ts +++ b/frontend/app/utils/search.ts @@ -13,7 +13,7 @@ export const createUrlQuery = (filter: any) => { let str = `${f.operator}|${f.value.join('|')}`; if (f.hasSource) { - str = `${str}^${f.sourceOperator}|${f.source.join('|')}`; + str = `${str}^${f.sourceOperator ? f.sourceOperator : ''}|${f.source ? f.source.join('|') : ''}`; } let key: any = setQueryParamKeyFromFilterkey(f.key); From 2162fd0f4d98574e346cea31160a444758ca796c Mon Sep 17 00:00:00 2001 From: Shekar Siri Date: Mon, 20 Feb 2023 18:27:45 +0100 Subject: [PATCH 063/151] fix(ui) - modal scroll issue --- frontend/app/components/ui/Modal/Modal.tsx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/frontend/app/components/ui/Modal/Modal.tsx b/frontend/app/components/ui/Modal/Modal.tsx index 89ba9f5d9..c489aa216 100644 --- a/frontend/app/components/ui/Modal/Modal.tsx +++ b/frontend/app/components/ui/Modal/Modal.tsx @@ -13,7 +13,8 @@ function Modal(props: Props) { useEffect(() => { if (open) { document.body.style.overflow = 'hidden'; - } else { + } + return () => { document.body.style.overflow = 'auto'; } }, [open]); From 915399855a76d1e99efe93bdb7b223134aedf815 Mon Sep 17 00:00:00 2001 From: Shekar Siri Date: Mon, 20 Feb 2023 18:37:39 +0100 Subject: [PATCH 064/151] fix(ui) - card sessions pagination reset --- .../Dashboard/components/WidgetSessions/WidgetSessions.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/app/components/Dashboard/components/WidgetSessions/WidgetSessions.tsx b/frontend/app/components/Dashboard/components/WidgetSessions/WidgetSessions.tsx index 4052e7a7e..f563d688e 100644 --- a/frontend/app/components/Dashboard/components/WidgetSessions/WidgetSessions.tsx +++ b/frontend/app/components/Dashboard/components/WidgetSessions/WidgetSessions.tsx @@ -94,7 +94,7 @@ function WidgetSessions(props: Props) { useEffect(() => { metricStore.updateKey('sessionsPage', 1); loadData(); - }, [filter.startTimestamp, filter.endTimestamp, filter.filters, depsString, metricStore.clickMapSearch]); + }, [filter.startTimestamp, filter.endTimestamp, filter.filters, depsString, metricStore.clickMapSearch, activeSeries]); useEffect(loadData, [metricStore.sessionsPage]); return ( From 12fb774bd72109bef6f09d5a654855f29f14b321 Mon Sep 17 00:00:00 2001 From: Shekar Siri Date: Mon, 20 Feb 2023 18:46:52 +0100 Subject: [PATCH 065/151] fix(ui) - filters z-index that causing depth issue --- .../Filters/FilterValueDropdown/FilterValueDropdown.module.css | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/app/components/shared/Filters/FilterValueDropdown/FilterValueDropdown.module.css b/frontend/app/components/shared/Filters/FilterValueDropdown/FilterValueDropdown.module.css index b0ca01016..6e34010b3 100644 --- a/frontend/app/components/shared/Filters/FilterValueDropdown/FilterValueDropdown.module.css +++ b/frontend/app/components/shared/Filters/FilterValueDropdown/FilterValueDropdown.module.css @@ -6,7 +6,7 @@ align-items: center; height: 26px; width: 100%; - z-index: 3; + /* z-index: 3; TODO this has to be fixed in clickmaps @Nikita */ & .right { height: 24px; From d7ec5a81b258c25d040ce3b9940dbcdb2a621ee2 Mon Sep 17 00:00:00 2001 From: nick-delirium Date: Tue, 21 Feb 2023 10:33:11 +0100 Subject: [PATCH 066/151] fix(player): dont load devtools for clickmaps, fix scrolling overflow --- frontend/app/player/web/MessageManager.ts | 5 +++-- frontend/app/player/web/Screen/Screen.ts | 7 +++---- frontend/app/player/web/WebPlayer.ts | 2 +- frontend/app/player/web/addons/TargetMarker.ts | 2 -- 4 files changed, 7 insertions(+), 9 deletions(-) diff --git a/frontend/app/player/web/MessageManager.ts b/frontend/app/player/web/MessageManager.ts index d0ae18020..68ef0cbf8 100644 --- a/frontend/app/player/web/MessageManager.ts +++ b/frontend/app/player/web/MessageManager.ts @@ -193,9 +193,9 @@ export default class MessageManager { // this.state.update({ filesLoaded: true }) } - async loadMessages() { + async loadMessages(isClickmap: boolean = false) { this.setMessagesLoading(true) - // TODO: reuseable decryptor instance + // TODO: reusable decryptor instance const createNewParser = (shouldDecrypt = true) => { const decrypt = shouldDecrypt && this.session.fileKey ? (b: Uint8Array) => decryptSessionBytes(b, this.session.fileKey) @@ -233,6 +233,7 @@ export default class MessageManager { .finally(this.onFileReadFinally); // load devtools (TODO: start after the first DOM file download) + if (isClickmap) return; this.state.update({ devtoolsLoading: true }) loadFiles(this.session.devtoolsURL, createNewParser()) // EFS fallback diff --git a/frontend/app/player/web/Screen/Screen.ts b/frontend/app/player/web/Screen/Screen.ts index b095385b1..cca56d402 100644 --- a/frontend/app/player/web/Screen/Screen.ts +++ b/frontend/app/player/web/Screen/Screen.ts @@ -213,11 +213,12 @@ export default class Screen { case ScaleMode.Embed: this.scaleRatio = Math.min(offsetWidth / width, offsetHeight / height) translate = "translate(-50%, -50%)" + posStyles = { height: height + 'px' } break; case ScaleMode.AdjustParentHeight: this.scaleRatio = offsetWidth / width translate = "translate(-50%, 0)" - posStyles = { top: 0 } + posStyles = { top: 0, height: this.document!.documentElement.getBoundingClientRect().height + 'px', } break; } @@ -232,13 +233,11 @@ export default class Screen { } Object.assign(this.screen.style, posStyles, { - height: height + 'px', width: width + 'px', transform: `scale(${this.scaleRatio}) ${translate}`, }) - Object.assign(this.iframe.style, { + Object.assign(this.iframe.style, posStyles, { width: width + 'px', - height: height + 'px', }) this.boundingRect = this.overlay.getBoundingClientRect(); diff --git a/frontend/app/player/web/WebPlayer.ts b/frontend/app/player/web/WebPlayer.ts index c4da835ff..d94d10beb 100644 --- a/frontend/app/player/web/WebPlayer.ts +++ b/frontend/app/player/web/WebPlayer.ts @@ -46,7 +46,7 @@ export default class WebPlayer extends Player { this.screen = screen this.messageManager = messageManager if (!live) { // hack. TODO: split OfflinePlayer class - messageManager.loadMessages() + void messageManager.loadMessages(isClickMap) } this.targetMarker = new TargetMarker(this.screen, wpState) diff --git a/frontend/app/player/web/addons/TargetMarker.ts b/frontend/app/player/web/addons/TargetMarker.ts index c9315f01b..6629ceaec 100644 --- a/frontend/app/player/web/addons/TargetMarker.ts +++ b/frontend/app/player/web/addons/TargetMarker.ts @@ -161,7 +161,6 @@ export default class TargetMarker { const scaleRatio = this.screen.getScale() Object.assign(overlay.style, clickmapStyles.overlayStyle({ height: iframeSize.height, width: iframeSize.width, scale: scaleRatio })) - console.log(selections) this.clickMapOverlay = overlay selections.forEach((s, i) => { const el = this.screen.getElementBySelector(s.selector); @@ -189,7 +188,6 @@ export default class TargetMarker { const border = document.createElement("div") - let key = 0 if (width > 50) { From ec382279b8228be24f93f4d2f6b3239dcb96937a Mon Sep 17 00:00:00 2001 From: rjshrjndrn Date: Tue, 21 Feb 2023 10:30:54 +0000 Subject: [PATCH 067/151] Updating parallel script --- scripts/helmcharts/build_deploy_parallel.sh | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/scripts/helmcharts/build_deploy_parallel.sh b/scripts/helmcharts/build_deploy_parallel.sh index 38c1633bb..268811a34 100644 --- a/scripts/helmcharts/build_deploy_parallel.sh +++ b/scripts/helmcharts/build_deploy_parallel.sh @@ -8,6 +8,12 @@ set -e # Removing local alpine:latest image docker rmi alpine || true +# Signing image +# cosign sign --key awskms:///alias/openreplay-container-sign image_url:tag +export SIGN_IMAGE=1 +export PUSH_IMAGE=1 +export AWS_DEFAULT_REGION="eu-central-1" +export SIGN_KEY="awskms:///alias/openreplay-container-sign" echo $DOCKER_REPO [[ -z DOCKER_REPO ]] && { echo Set DOCKER_REPO="your docker registry" @@ -22,9 +28,9 @@ echo $DOCKER_REPO tmux split-window "cd ../../frontend && IMAGE_TAG=$IMAGE_TAG DOCKER_REPO=$DOCKER_REPO PUSH_IMAGE=1 bash build.sh $@" tmux select-layout tiled tmux split-window "cd ../../sourcemap-reader && IMAGE_TAG=$IMAGE_TAG DOCKER_REPO=$DOCKER_REPO PUSH_IMAGE=1 bash build.sh $@" - tmux split-window "cd ../../api && IMAGE_TAG=$IMAGE_TAG DOCKER_REPO=$DOCKER_REPO PUSH_IMAGE=1 bash build.sh $@" - tmux split-window "cd ../../api && IMAGE_TAG=$IMAGE_TAG DOCKER_REPO=$DOCKER_REPO PUSH_IMAGE=1 bash build_alerts.sh $@" - tmux split-window "cd ../../api && IMAGE_TAG=$IMAGE_TAG DOCKER_REPO=$DOCKER_REPO PUSH_IMAGE=1 bash build_crons.sh $@" + tmux split-window "cd ../../api && IMAGE_TAG=$IMAGE_TAG DOCKER_REPO=$DOCKER_REPO PUSH_IMAGE=1 bash build.sh $@ + && IMAGE_TAG=$IMAGE_TAG DOCKER_REPO=$DOCKER_REPO PUSH_IMAGE=1 bash build_alerts.sh $@ + && IMAGE_TAG=$IMAGE_TAG DOCKER_REPO=$DOCKER_REPO PUSH_IMAGE=1 bash build_crons.sh $@" tmux select-layout tiled } From 92674c3e6df95e1c25d50affd701c4e23201c739 Mon Sep 17 00:00:00 2001 From: rjshrjndrn Date: Tue, 21 Feb 2023 11:32:54 +0100 Subject: [PATCH 068/151] chore(build): ignoring ee folder for sourcemap-reader build Signed-off-by: rjshrjndrn --- sourcemap-reader/build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sourcemap-reader/build.sh b/sourcemap-reader/build.sh index 859347fd4..fbe8762e2 100644 --- a/sourcemap-reader/build.sh +++ b/sourcemap-reader/build.sh @@ -34,7 +34,7 @@ function build_api(){ tag="" # Copy enterprise code [[ $1 == "ee" ]] && { - cp -rf ../ee/sourcemap-reader/* ./ + cp -rf ../ee/sourcemap-reader/* ./ || true # We share same codebase for ee/foss envarg="default-ee" tag="ee-" } From 967134c280f15eb4a65781f9af8d15bb783e2e5a Mon Sep 17 00:00:00 2001 From: nick-delirium Date: Mon, 20 Feb 2023 15:30:45 +0100 Subject: [PATCH 069/151] fix(ui): keep share message after sharing --- frontend/app/components/shared/SharePopup/SharePopup.js | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/frontend/app/components/shared/SharePopup/SharePopup.js b/frontend/app/components/shared/SharePopup/SharePopup.js index 984ce0060..07726d14a 100644 --- a/frontend/app/components/shared/SharePopup/SharePopup.js +++ b/frontend/app/components/shared/SharePopup/SharePopup.js @@ -80,8 +80,8 @@ export default class SharePopup extends React.PureComponent { handleSuccess = (endpoint) => { const obj = endpoint === 'Slack' - ? { isOpen: false, comment: '', loadingSlack: false } - : { isOpen: false, comment: '', loadingTeams: false }; + ? { loadingSlack: false } + : { loadingTeams: false }; this.setState(obj); toast.success(`Sent to ${endpoint}.`); }; @@ -109,7 +109,7 @@ export default class SharePopup extends React.PureComponent { return ( this.setState({ isOpen: true })} - onClose={() => this.setState({ isOpen: false })} + onClose={() => this.setState({ isOpen: false, comment: '' })} render={() => (
{this.state.loadingTeams || this.state.loadingSlack ? ( From 022cb65314a5cde160a6ee261d5cecf725c2da08 Mon Sep 17 00:00:00 2001 From: Shekar Siri Date: Mon, 20 Feb 2023 16:37:19 +0100 Subject: [PATCH 070/151] fix(ui) - cards list filter by dashboard --- .../components/Dashboard/components/MetricsList/MetricsList.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/app/components/Dashboard/components/MetricsList/MetricsList.tsx b/frontend/app/components/Dashboard/components/MetricsList/MetricsList.tsx index f2639d37f..1b9f7dfc9 100644 --- a/frontend/app/components/Dashboard/components/MetricsList/MetricsList.tsx +++ b/frontend/app/components/Dashboard/components/MetricsList/MetricsList.tsx @@ -21,7 +21,7 @@ function MetricsList({ const dashboard = dashboardStore.selectedDashboard; const existingCardIds = useMemo(() => dashboard?.widgets?.map(i => parseInt(i.metricId)), [dashboard]); - const cards = useMemo(() => metricStore.filteredCards.filter(i => !existingCardIds?.includes(parseInt(i.metricId))), [metricStore.filteredCards]); + const cards = useMemo(() => !!onSelectionChange ? metricStore.filteredCards.filter(i => !existingCardIds?.includes(parseInt(i.metricId))) : metricStore.filteredCards, [metricStore.filteredCards]); useEffect(() => { metricStore.fetchList(); From 7a7910c4fd9f2f2b9d2e68a9ba487246a073ab01 Mon Sep 17 00:00:00 2001 From: Alexander Date: Mon, 20 Feb 2023 16:37:55 +0100 Subject: [PATCH 071/151] Draft: New metrics module (#982) * feat(backend): created new metrics module --- backend/cmd/assets/main.go | 19 +- backend/cmd/db/main.go | 13 +- backend/cmd/ender/main.go | 17 +- backend/cmd/http/main.go | 23 ++- backend/cmd/integrations/main.go | 18 +- backend/cmd/sink/main.go | 36 ++-- backend/cmd/storage/main.go | 15 +- backend/internal/assets/cacher/cacher.go | 47 ++--- backend/internal/http/router/handlers-ios.go | 37 ++-- backend/internal/http/router/handlers-web.go | 65 +++---- backend/internal/http/router/handlers.go | 6 +- backend/internal/http/router/response.go | 33 +++- backend/internal/http/router/router.go | 44 +---- backend/internal/sessionender/ender.go | 45 ++--- backend/internal/sink/assetscache/assets.go | 82 +++----- backend/internal/storage/storage.go | 143 ++++---------- backend/pkg/db/postgres/batches.go | 65 ++----- backend/pkg/db/postgres/bulk.go | 55 ++---- backend/pkg/db/postgres/bulks.go | 33 ++-- backend/pkg/db/postgres/connector.go | 49 +---- backend/pkg/db/postgres/pool.go | 73 +++----- backend/pkg/messages/iterator-sink.go | 3 + backend/pkg/messages/iterator.go | 5 +- backend/pkg/metrics/assets/metrics.go | 72 ++++++++ backend/pkg/metrics/common/metrics.go | 11 ++ backend/pkg/metrics/database/metrics.go | 127 +++++++++++++ backend/pkg/metrics/ender/metrics.go | 51 +++++ backend/pkg/metrics/http/metrics.go | 55 ++++++ backend/pkg/metrics/server.go | 40 ++++ backend/pkg/metrics/sink/metrics.go | 185 +++++++++++++++++++ backend/pkg/metrics/storage/metrics.go | 114 ++++++++++++ ee/backend/pkg/db/clickhouse/bulk.go | 16 +- ee/backend/pkg/db/clickhouse/connector.go | 26 +-- 33 files changed, 1021 insertions(+), 602 deletions(-) create mode 100644 backend/pkg/metrics/assets/metrics.go create mode 100644 backend/pkg/metrics/common/metrics.go create mode 100644 backend/pkg/metrics/database/metrics.go create mode 100644 backend/pkg/metrics/ender/metrics.go create mode 100644 backend/pkg/metrics/http/metrics.go create mode 100644 backend/pkg/metrics/server.go create mode 100644 backend/pkg/metrics/sink/metrics.go create mode 100644 backend/pkg/metrics/storage/metrics.go diff --git a/backend/cmd/assets/main.go b/backend/cmd/assets/main.go index b41dedd87..b05ecbe52 100644 --- a/backend/cmd/assets/main.go +++ b/backend/cmd/assets/main.go @@ -1,9 +1,7 @@ package main import ( - "context" "log" - "openreplay/backend/pkg/pprof" "os" "os/signal" "syscall" @@ -13,12 +11,16 @@ import ( "openreplay/backend/internal/assets/cacher" config "openreplay/backend/internal/config/assets" "openreplay/backend/pkg/messages" - "openreplay/backend/pkg/monitoring" + "openreplay/backend/pkg/metrics" + assetsMetrics "openreplay/backend/pkg/metrics/assets" + "openreplay/backend/pkg/pprof" "openreplay/backend/pkg/queue" ) func main() { - metrics := monitoring.New("assets") + m := metrics.New() + m.Register(assetsMetrics.List()) + log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) cfg := config.New() @@ -26,18 +28,13 @@ func main() { pprof.StartProfilingServer() } - cacher := cacher.NewCacher(cfg, metrics) - - totalAssets, err := metrics.RegisterCounter("assets_total") - if err != nil { - log.Printf("can't create assets_total metric: %s", err) - } + cacher := cacher.NewCacher(cfg) msgHandler := func(msg messages.Message) { switch m := msg.(type) { case *messages.AssetCache: cacher.CacheURL(m.SessionID(), m.URL) - totalAssets.Add(context.Background(), 1) + assetsMetrics.IncreaseProcessesSessions() // TODO: connect to "raw" topic in order to listen for JSException case *messages.JSException: sourceList, err := assets.ExtractJSExceptionSources(&m.Payload) diff --git a/backend/cmd/db/main.go b/backend/cmd/db/main.go index f9440a908..84b0d81ed 100644 --- a/backend/cmd/db/main.go +++ b/backend/cmd/db/main.go @@ -3,8 +3,6 @@ package main import ( "errors" "log" - types2 "openreplay/backend/pkg/db/types" - "openreplay/backend/pkg/pprof" "os" "os/signal" "syscall" @@ -14,16 +12,21 @@ import ( "openreplay/backend/internal/db/datasaver" "openreplay/backend/pkg/db/cache" "openreplay/backend/pkg/db/postgres" + types2 "openreplay/backend/pkg/db/types" "openreplay/backend/pkg/handlers" custom2 "openreplay/backend/pkg/handlers/custom" "openreplay/backend/pkg/messages" - "openreplay/backend/pkg/monitoring" + "openreplay/backend/pkg/metrics" + databaseMetrics "openreplay/backend/pkg/metrics/database" + "openreplay/backend/pkg/pprof" "openreplay/backend/pkg/queue" "openreplay/backend/pkg/sessions" ) func main() { - metrics := monitoring.New("db") + m := metrics.New() + m.Register(databaseMetrics.List()) + log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) cfg := db.New() @@ -33,7 +36,7 @@ func main() { // Init database pg := cache.NewPGCache( - postgres.NewConn(cfg.Postgres.String(), cfg.BatchQueueLimit, cfg.BatchSizeLimit, metrics), cfg.ProjectExpirationTimeoutMs) + postgres.NewConn(cfg.Postgres.String(), cfg.BatchQueueLimit, cfg.BatchSizeLimit), cfg.ProjectExpirationTimeoutMs) defer pg.Close() // HandlersFabric returns the list of message handlers we want to be applied to each incoming message. diff --git a/backend/cmd/ender/main.go b/backend/cmd/ender/main.go index 74b0b8bd2..da7ca9b89 100644 --- a/backend/cmd/ender/main.go +++ b/backend/cmd/ender/main.go @@ -2,8 +2,6 @@ package main import ( "log" - "openreplay/backend/internal/storage" - "openreplay/backend/pkg/pprof" "os" "os/signal" "strings" @@ -12,16 +10,23 @@ import ( "openreplay/backend/internal/config/ender" "openreplay/backend/internal/sessionender" + "openreplay/backend/internal/storage" "openreplay/backend/pkg/db/cache" "openreplay/backend/pkg/db/postgres" "openreplay/backend/pkg/intervals" "openreplay/backend/pkg/messages" - "openreplay/backend/pkg/monitoring" + "openreplay/backend/pkg/metrics" + databaseMetrics "openreplay/backend/pkg/metrics/database" + enderMetrics "openreplay/backend/pkg/metrics/ender" + "openreplay/backend/pkg/pprof" "openreplay/backend/pkg/queue" ) func main() { - metrics := monitoring.New("ender") + m := metrics.New() + m.Register(enderMetrics.List()) + m.Register(databaseMetrics.List()) + log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) cfg := ender.New() @@ -29,10 +34,10 @@ func main() { pprof.StartProfilingServer() } - pg := cache.NewPGCache(postgres.NewConn(cfg.Postgres.String(), 0, 0, metrics), cfg.ProjectExpirationTimeoutMs) + pg := cache.NewPGCache(postgres.NewConn(cfg.Postgres.String(), 0, 0), cfg.ProjectExpirationTimeoutMs) defer pg.Close() - sessions, err := sessionender.New(metrics, intervals.EVENTS_SESSION_END_TIMEOUT, cfg.PartitionsNumber) + sessions, err := sessionender.New(intervals.EVENTS_SESSION_END_TIMEOUT, cfg.PartitionsNumber) if err != nil { log.Printf("can't init ender service: %s", err) return diff --git a/backend/cmd/http/main.go b/backend/cmd/http/main.go index 4fb82b635..83eedaf29 100644 --- a/backend/cmd/http/main.go +++ b/backend/cmd/http/main.go @@ -2,23 +2,28 @@ package main import ( "log" - "openreplay/backend/internal/config/http" - "openreplay/backend/internal/http/router" - "openreplay/backend/internal/http/server" - "openreplay/backend/internal/http/services" - "openreplay/backend/pkg/monitoring" - "openreplay/backend/pkg/pprof" "os" "os/signal" "syscall" + "openreplay/backend/internal/config/http" + "openreplay/backend/internal/http/router" + "openreplay/backend/internal/http/server" + "openreplay/backend/internal/http/services" "openreplay/backend/pkg/db/cache" "openreplay/backend/pkg/db/postgres" + "openreplay/backend/pkg/metrics" + databaseMetrics "openreplay/backend/pkg/metrics/database" + httpMetrics "openreplay/backend/pkg/metrics/http" + "openreplay/backend/pkg/pprof" "openreplay/backend/pkg/queue" ) func main() { - metrics := monitoring.New("http") + m := metrics.New() + m.Register(httpMetrics.List()) + m.Register(databaseMetrics.List()) + log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) cfg := http.New() @@ -31,14 +36,14 @@ func main() { defer producer.Close(15000) // Connect to database - dbConn := cache.NewPGCache(postgres.NewConn(cfg.Postgres.String(), 0, 0, metrics), 1000*60*20) + dbConn := cache.NewPGCache(postgres.NewConn(cfg.Postgres.String(), 0, 0), 1000*60*20) defer dbConn.Close() // Build all services services := services.New(cfg, producer, dbConn) // Init server's routes - router, err := router.NewRouter(cfg, services, metrics) + router, err := router.NewRouter(cfg, services) if err != nil { log.Fatalf("failed while creating engine: %s", err) } diff --git a/backend/cmd/integrations/main.go b/backend/cmd/integrations/main.go index 8c6d56966..3fa07ee9c 100644 --- a/backend/cmd/integrations/main.go +++ b/backend/cmd/integrations/main.go @@ -2,24 +2,26 @@ package main import ( "log" - config "openreplay/backend/internal/config/integrations" - "openreplay/backend/internal/integrations/clientManager" - "openreplay/backend/pkg/monitoring" - "openreplay/backend/pkg/pprof" - "time" - "os" "os/signal" "syscall" + "time" + config "openreplay/backend/internal/config/integrations" + "openreplay/backend/internal/integrations/clientManager" "openreplay/backend/pkg/db/postgres" "openreplay/backend/pkg/intervals" + "openreplay/backend/pkg/metrics" + databaseMetrics "openreplay/backend/pkg/metrics/database" + "openreplay/backend/pkg/pprof" "openreplay/backend/pkg/queue" "openreplay/backend/pkg/token" ) func main() { - metrics := monitoring.New("integrations") + m := metrics.New() + m.Register(databaseMetrics.List()) + log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) cfg := config.New() @@ -27,7 +29,7 @@ func main() { pprof.StartProfilingServer() } - pg := postgres.NewConn(cfg.Postgres.String(), 0, 0, metrics) + pg := postgres.NewConn(cfg.Postgres.String(), 0, 0) defer pg.Close() tokenizer := token.NewTokenizer(cfg.TokenSecret) diff --git a/backend/cmd/sink/main.go b/backend/cmd/sink/main.go index 74e0b1db1..4bbaeeee4 100644 --- a/backend/cmd/sink/main.go +++ b/backend/cmd/sink/main.go @@ -2,10 +2,8 @@ package main import ( "bytes" - "context" "encoding/binary" "log" - "openreplay/backend/pkg/pprof" "os" "os/signal" "syscall" @@ -16,13 +14,16 @@ import ( "openreplay/backend/internal/sink/sessionwriter" "openreplay/backend/internal/storage" "openreplay/backend/pkg/messages" - "openreplay/backend/pkg/monitoring" + "openreplay/backend/pkg/metrics" + sinkMetrics "openreplay/backend/pkg/metrics/sink" + "openreplay/backend/pkg/pprof" "openreplay/backend/pkg/queue" "openreplay/backend/pkg/url/assets" ) func main() { - metrics := monitoring.New("sink") + m := metrics.New() + m.Register(sinkMetrics.List()) log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) cfg := sink.New() @@ -39,22 +40,8 @@ func main() { producer := queue.NewProducer(cfg.MessageSizeLimit, true) defer producer.Close(cfg.ProducerCloseTimeout) rewriter := assets.NewRewriter(cfg.AssetsOrigin) - assetMessageHandler := assetscache.New(cfg, rewriter, producer, metrics) - + assetMessageHandler := assetscache.New(cfg, rewriter, producer) counter := storage.NewLogCounter() - // Session message metrics - totalMessages, err := metrics.RegisterCounter("messages_total") - if err != nil { - log.Printf("can't create messages_total metric: %s", err) - } - savedMessages, err := metrics.RegisterCounter("messages_saved") - if err != nil { - log.Printf("can't create messages_saved metric: %s", err) - } - messageSize, err := metrics.RegisterHistogram("messages_size") - if err != nil { - log.Printf("can't create messages_size metric: %s", err) - } var ( sessionID uint64 @@ -74,11 +61,12 @@ func main() { if domBuffer.Len() <= 0 && devBuffer.Len() <= 0 { return } + sinkMetrics.RecordWrittenBytes(float64(domBuffer.Len()), "dom") + sinkMetrics.RecordWrittenBytes(float64(devBuffer.Len()), "devtools") // Write buffered batches to the session if err := writer.Write(sessionID, domBuffer.Bytes(), devBuffer.Bytes()); err != nil { log.Printf("writer error: %s", err) - return } // Prepare buffer for the next batch @@ -88,8 +76,7 @@ func main() { return } - // [METRICS] Increase the number of processed messages - totalMessages.Add(context.Background(), 1) + sinkMetrics.IncreaseTotalMessages() // Send SessionEnd trigger to storage service if msg.TypeID() == messages.MsgSessionEnd { @@ -187,9 +174,8 @@ func main() { } } - // [METRICS] Increase the number of written to the files messages and the message size - messageSize.Record(context.Background(), float64(len(msg.Encode()))) - savedMessages.Add(context.Background(), 1) + sinkMetrics.IncreaseWrittenMessages() + sinkMetrics.RecordMessageSize(float64(len(msg.Encode()))) } consumer := queue.NewConsumer( diff --git a/backend/cmd/storage/main.go b/backend/cmd/storage/main.go index dcb1b53ed..472324b95 100644 --- a/backend/cmd/storage/main.go +++ b/backend/cmd/storage/main.go @@ -2,7 +2,6 @@ package main import ( "log" - "openreplay/backend/pkg/pprof" "os" "os/signal" "syscall" @@ -12,13 +11,17 @@ import ( "openreplay/backend/internal/storage" "openreplay/backend/pkg/failover" "openreplay/backend/pkg/messages" - "openreplay/backend/pkg/monitoring" + "openreplay/backend/pkg/metrics" + storageMetrics "openreplay/backend/pkg/metrics/storage" + "openreplay/backend/pkg/pprof" "openreplay/backend/pkg/queue" - s3storage "openreplay/backend/pkg/storage" + cloud "openreplay/backend/pkg/storage" ) func main() { - metrics := monitoring.New("storage") + m := metrics.New() + m.Register(storageMetrics.List()) + log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) cfg := config.New() @@ -26,8 +29,8 @@ func main() { pprof.StartProfilingServer() } - s3 := s3storage.NewS3(cfg.S3Region, cfg.S3Bucket) - srv, err := storage.New(cfg, s3, metrics) + s3 := cloud.NewS3(cfg.S3Region, cfg.S3Bucket) + srv, err := storage.New(cfg, s3) if err != nil { log.Printf("can't init storage service: %s", err) return diff --git a/backend/internal/assets/cacher/cacher.go b/backend/internal/assets/cacher/cacher.go index 8bbee092f..4b0353a9a 100644 --- a/backend/internal/assets/cacher/cacher.go +++ b/backend/internal/assets/cacher/cacher.go @@ -1,16 +1,13 @@ package cacher import ( - "context" "crypto/tls" "fmt" - "go.opentelemetry.io/otel/metric/instrument/syncfloat64" "io" "io/ioutil" - "log" "mime" "net/http" - "openreplay/backend/pkg/monitoring" + metrics "openreplay/backend/pkg/metrics/assets" "path/filepath" "strings" "time" @@ -25,30 +22,22 @@ import ( const MAX_CACHE_DEPTH = 5 type cacher struct { - timeoutMap *timeoutMap // Concurrency implemented - s3 *storage.S3 // AWS Docs: "These clients are safe to use concurrently." - httpClient *http.Client // Docs: "Clients are safe for concurrent use by multiple goroutines." - rewriter *assets.Rewriter // Read only - Errors chan error - sizeLimit int - downloadedAssets syncfloat64.Counter - requestHeaders map[string]string - workers *WorkerPool + timeoutMap *timeoutMap // Concurrency implemented + s3 *storage.S3 // AWS Docs: "These clients are safe to use concurrently." + httpClient *http.Client // Docs: "Clients are safe for concurrent use by multiple goroutines." + rewriter *assets.Rewriter // Read only + Errors chan error + sizeLimit int + requestHeaders map[string]string + workers *WorkerPool } func (c *cacher) CanCache() bool { return c.workers.CanAddTask() } -func NewCacher(cfg *config.Config, metrics *monitoring.Metrics) *cacher { +func NewCacher(cfg *config.Config) *cacher { rewriter := assets.NewRewriter(cfg.AssetsOrigin) - if metrics == nil { - log.Fatalf("metrics are empty") - } - downloadedAssets, err := metrics.RegisterCounter("assets_downloaded") - if err != nil { - log.Printf("can't create downloaded_assets metric: %s", err) - } c := &cacher{ timeoutMap: newTimeoutMap(), s3: storage.NewS3(cfg.AWSRegion, cfg.S3BucketAssets), @@ -59,11 +48,10 @@ func NewCacher(cfg *config.Config, metrics *monitoring.Metrics) *cacher { TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, }, }, - rewriter: rewriter, - Errors: make(chan error), - sizeLimit: cfg.AssetsSizeLimit, - downloadedAssets: downloadedAssets, - requestHeaders: cfg.AssetsRequestHeaders, + rewriter: rewriter, + Errors: make(chan error), + sizeLimit: cfg.AssetsSizeLimit, + requestHeaders: cfg.AssetsRequestHeaders, } c.workers = NewPool(64, c.CacheFile) return c @@ -75,6 +63,7 @@ func (c *cacher) CacheFile(task *Task) { func (c *cacher) cacheURL(t *Task) { t.retries-- + start := time.Now() req, _ := http.NewRequest("GET", t.requestURL, nil) if t.retries%2 == 0 { req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 6.1; rv:31.0) Gecko/20100101 Firefox/31.0") @@ -87,6 +76,7 @@ func (c *cacher) cacheURL(t *Task) { c.Errors <- errors.Wrap(err, t.urlContext) return } + metrics.RecordDownloadDuration(float64(time.Now().Sub(start).Milliseconds()), res.StatusCode) defer res.Body.Close() if res.StatusCode >= 400 { printErr := true @@ -122,12 +112,15 @@ func (c *cacher) cacheURL(t *Task) { } // TODO: implement in streams + start = time.Now() err = c.s3.Upload(strings.NewReader(strData), t.cachePath, contentType, false) if err != nil { + metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), true) c.Errors <- errors.Wrap(err, t.urlContext) return } - c.downloadedAssets.Add(context.Background(), 1) + metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), false) + metrics.IncreaseSavedSessions() if isCSS { if t.depth > 0 { diff --git a/backend/internal/http/router/handlers-ios.go b/backend/internal/http/router/handlers-ios.go index e0fc73b6f..b11918d54 100644 --- a/backend/internal/http/router/handlers-ios.go +++ b/backend/internal/http/router/handlers-ios.go @@ -22,28 +22,28 @@ func (e *Router) startSessionHandlerIOS(w http.ResponseWriter, r *http.Request) req := &StartIOSSessionRequest{} if r.Body == nil { - ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty")) + ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, 0) return } body := http.MaxBytesReader(w, r.Body, e.cfg.JsonSizeLimit) defer body.Close() if err := json.NewDecoder(body).Decode(req); err != nil { - ResponseWithError(w, http.StatusBadRequest, err) + ResponseWithError(w, http.StatusBadRequest, err, startTime, r.URL.Path, 0) return } if req.ProjectKey == nil { - ResponseWithError(w, http.StatusForbidden, errors.New("ProjectKey value required")) + ResponseWithError(w, http.StatusForbidden, errors.New("ProjectKey value required"), startTime, r.URL.Path, 0) return } p, err := e.services.Database.GetProjectByKey(*req.ProjectKey) if err != nil { if postgres.IsNoRowsErr(err) { - ResponseWithError(w, http.StatusNotFound, errors.New("Project doesn't exist or is not active")) + ResponseWithError(w, http.StatusNotFound, errors.New("Project doesn't exist or is not active"), startTime, r.URL.Path, 0) } else { - ResponseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging + ResponseWithError(w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0) // TODO: send error here only on staging } return } @@ -53,18 +53,18 @@ func (e *Router) startSessionHandlerIOS(w http.ResponseWriter, r *http.Request) if err != nil { // Starting the new one dice := byte(rand.Intn(100)) // [0, 100) if dice >= p.SampleRate { - ResponseWithError(w, http.StatusForbidden, errors.New("cancel")) + ResponseWithError(w, http.StatusForbidden, errors.New("cancel"), startTime, r.URL.Path, 0) return } ua := e.services.UaParser.ParseFromHTTPRequest(r) if ua == nil { - ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized")) + ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"), startTime, r.URL.Path, 0) return } sessionID, err := e.services.Flaker.Compose(uint64(startTime.UnixMilli())) if err != nil { - ResponseWithError(w, http.StatusInternalServerError, err) + ResponseWithError(w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0) return } // TODO: if EXPIRED => send message for two sessions association @@ -94,22 +94,24 @@ func (e *Router) startSessionHandlerIOS(w http.ResponseWriter, r *http.Request) UserUUID: userUUID, SessionID: strconv.FormatUint(tokenData.ID, 10), BeaconSizeLimit: e.cfg.BeaconSizeLimit, - }) + }, startTime, r.URL.Path, 0) } func (e *Router) pushMessagesHandlerIOS(w http.ResponseWriter, r *http.Request) { + startTime := time.Now() sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r) if err != nil { - ResponseWithError(w, http.StatusUnauthorized, err) + ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0) return } e.pushMessages(w, r, sessionData.ID, e.cfg.TopicRawIOS) } func (e *Router) pushLateMessagesHandlerIOS(w http.ResponseWriter, r *http.Request) { + startTime := time.Now() sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r) if err != nil && err != token.EXPIRED { - ResponseWithError(w, http.StatusUnauthorized, err) + ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0) return } // Check timestamps here? @@ -117,16 +119,17 @@ func (e *Router) pushLateMessagesHandlerIOS(w http.ResponseWriter, r *http.Reque } func (e *Router) imagesUploadHandlerIOS(w http.ResponseWriter, r *http.Request) { + startTime := time.Now() log.Printf("recieved imagerequest") sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r) if err != nil { // Should accept expired token? - ResponseWithError(w, http.StatusUnauthorized, err) + ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0) return } if r.Body == nil { - ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty")) + ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, 0) return } r.Body = http.MaxBytesReader(w, r.Body, e.cfg.FileSizeLimit) @@ -134,21 +137,21 @@ func (e *Router) imagesUploadHandlerIOS(w http.ResponseWriter, r *http.Request) err = r.ParseMultipartForm(1e6) // ~1Mb if err == http.ErrNotMultipart || err == http.ErrMissingBoundary { - ResponseWithError(w, http.StatusUnsupportedMediaType, err) + ResponseWithError(w, http.StatusUnsupportedMediaType, err, startTime, r.URL.Path, 0) return // } else if err == multipart.ErrMessageTooLarge // if non-files part exceeds 10 MB } else if err != nil { - ResponseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging + ResponseWithError(w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0) // TODO: send error here only on staging return } if r.MultipartForm == nil { - ResponseWithError(w, http.StatusInternalServerError, errors.New("Multipart not parsed")) + ResponseWithError(w, http.StatusInternalServerError, errors.New("Multipart not parsed"), startTime, r.URL.Path, 0) return } if len(r.MultipartForm.Value["projectKey"]) == 0 { - ResponseWithError(w, http.StatusBadRequest, errors.New("projectKey parameter missing")) // status for missing/wrong parameter? + ResponseWithError(w, http.StatusBadRequest, errors.New("projectKey parameter missing"), startTime, r.URL.Path, 0) // status for missing/wrong parameter? return } diff --git a/backend/internal/http/router/handlers-web.go b/backend/internal/http/router/handlers-web.go index 7afd184e5..52a37b7f0 100644 --- a/backend/internal/http/router/handlers-web.go +++ b/backend/internal/http/router/handlers-web.go @@ -3,18 +3,17 @@ package router import ( "encoding/json" "errors" - "github.com/Masterminds/semver" - "go.opentelemetry.io/otel/attribute" "io" "log" "math/rand" "net/http" - "openreplay/backend/internal/http/uuid" - "openreplay/backend/pkg/flakeid" "strconv" "time" + "github.com/Masterminds/semver" + "openreplay/backend/internal/http/uuid" "openreplay/backend/pkg/db/postgres" + "openreplay/backend/pkg/flakeid" . "openreplay/backend/pkg/messages" "openreplay/backend/pkg/token" ) @@ -28,13 +27,6 @@ func (e *Router) readBody(w http.ResponseWriter, r *http.Request, limit int64) ( if err != nil { return nil, err } - - reqSize := len(bodyBytes) - e.requestSize.Record( - r.Context(), - float64(reqSize), - []attribute.KeyValue{attribute.String("method", r.URL.Path)}..., - ) return bodyBytes, nil } @@ -56,40 +48,43 @@ func getSessionTimestamp(req *StartSessionRequest, startTimeMili int64) (ts uint func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) { startTime := time.Now() + bodySize := 0 // Check request body if r.Body == nil { - ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty")) + ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, bodySize) return } bodyBytes, err := e.readBody(w, r, e.cfg.JsonSizeLimit) if err != nil { log.Printf("error while reading request body: %s", err) - ResponseWithError(w, http.StatusRequestEntityTooLarge, err) + ResponseWithError(w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize) return } + bodySize = len(bodyBytes) // Parse request body req := &StartSessionRequest{} if err := json.Unmarshal(bodyBytes, req); err != nil { - ResponseWithError(w, http.StatusBadRequest, err) + ResponseWithError(w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize) return } // Handler's logic if req.ProjectKey == nil { - ResponseWithError(w, http.StatusForbidden, errors.New("ProjectKey value required")) + ResponseWithError(w, http.StatusForbidden, errors.New("ProjectKey value required"), startTime, r.URL.Path, bodySize) return } p, err := e.services.Database.GetProjectByKey(*req.ProjectKey) if err != nil { if postgres.IsNoRowsErr(err) { - ResponseWithError(w, http.StatusNotFound, errors.New("project doesn't exist or capture limit has been reached")) + ResponseWithError(w, http.StatusNotFound, + errors.New("project doesn't exist or capture limit has been reached"), startTime, r.URL.Path, bodySize) } else { log.Printf("can't get project by key: %s", err) - ResponseWithError(w, http.StatusInternalServerError, errors.New("can't get project by key")) + ResponseWithError(w, http.StatusInternalServerError, errors.New("can't get project by key"), startTime, r.URL.Path, bodySize) } return } @@ -99,19 +94,19 @@ func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) if err != nil || req.Reset { // Starting the new one dice := byte(rand.Intn(100)) // [0, 100) if dice >= p.SampleRate { - ResponseWithError(w, http.StatusForbidden, errors.New("cancel")) + ResponseWithError(w, http.StatusForbidden, errors.New("cancel"), startTime, r.URL.Path, bodySize) return } ua := e.services.UaParser.ParseFromHTTPRequest(r) if ua == nil { - ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized")) + ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"), startTime, r.URL.Path, bodySize) return } startTimeMili := startTime.UnixMilli() sessionID, err := e.services.Flaker.Compose(uint64(startTimeMili)) if err != nil { - ResponseWithError(w, http.StatusInternalServerError, err) + ResponseWithError(w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize) return } // TODO: if EXPIRED => send message for two sessions association @@ -163,29 +158,33 @@ func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) BeaconSizeLimit: e.getBeaconSize(tokenData.ID), StartTimestamp: int64(flakeid.ExtractTimestamp(tokenData.ID)), Delay: tokenData.Delay, - }) + }, startTime, r.URL.Path, bodySize) } func (e *Router) pushMessagesHandlerWeb(w http.ResponseWriter, r *http.Request) { + startTime := time.Now() + bodySize := 0 + // Check authorization sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r) if err != nil { - ResponseWithError(w, http.StatusUnauthorized, err) + ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize) return } // Check request body if r.Body == nil { - ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty")) + ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, bodySize) return } bodyBytes, err := e.readBody(w, r, e.getBeaconSize(sessionData.ID)) if err != nil { log.Printf("error while reading request body: %s", err) - ResponseWithError(w, http.StatusRequestEntityTooLarge, err) + ResponseWithError(w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize) return } + bodySize = len(bodyBytes) // Send processed messages to queue as array of bytes // TODO: check bytes for nonsense crap @@ -194,39 +193,43 @@ func (e *Router) pushMessagesHandlerWeb(w http.ResponseWriter, r *http.Request) log.Printf("can't send processed messages to queue: %s", err) } - w.WriteHeader(http.StatusOK) + ResponseOK(w, startTime, r.URL.Path, bodySize) } func (e *Router) notStartedHandlerWeb(w http.ResponseWriter, r *http.Request) { + startTime := time.Now() + bodySize := 0 + // Check request body if r.Body == nil { - ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty")) + ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, bodySize) return } bodyBytes, err := e.readBody(w, r, e.cfg.JsonSizeLimit) if err != nil { log.Printf("error while reading request body: %s", err) - ResponseWithError(w, http.StatusRequestEntityTooLarge, err) + ResponseWithError(w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize) return } + bodySize = len(bodyBytes) // Parse request body req := &NotStartedRequest{} if err := json.Unmarshal(bodyBytes, req); err != nil { - ResponseWithError(w, http.StatusBadRequest, err) + ResponseWithError(w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize) return } // Handler's logic if req.ProjectKey == nil { - ResponseWithError(w, http.StatusForbidden, errors.New("projectKey value required")) + ResponseWithError(w, http.StatusForbidden, errors.New("projectKey value required"), startTime, r.URL.Path, bodySize) return } ua := e.services.UaParser.ParseFromHTTPRequest(r) // TODO?: insert anyway if ua == nil { - ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized")) + ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"), startTime, r.URL.Path, bodySize) return } country := e.services.GeoIP.ExtractISOCodeFromHTTPRequest(r) @@ -248,5 +251,5 @@ func (e *Router) notStartedHandlerWeb(w http.ResponseWriter, r *http.Request) { log.Printf("Unable to insert Unstarted Session: %v\n", err) } - w.WriteHeader(http.StatusOK) + ResponseOK(w, startTime, r.URL.Path, bodySize) } diff --git a/backend/internal/http/router/handlers.go b/backend/internal/http/router/handlers.go index c36fdd668..425177341 100644 --- a/backend/internal/http/router/handlers.go +++ b/backend/internal/http/router/handlers.go @@ -6,9 +6,11 @@ import ( "io/ioutil" "log" "net/http" + "time" ) func (e *Router) pushMessages(w http.ResponseWriter, r *http.Request, sessionID uint64, topicName string) { + start := time.Now() body := http.MaxBytesReader(w, r.Body, e.cfg.BeaconSizeLimit) defer body.Close() @@ -21,7 +23,7 @@ func (e *Router) pushMessages(w http.ResponseWriter, r *http.Request, sessionID reader, err = gzip.NewReader(body) if err != nil { - ResponseWithError(w, http.StatusInternalServerError, err) // TODO: stage-dependent response + ResponseWithError(w, http.StatusInternalServerError, err, start, r.URL.Path, 0) // TODO: stage-dependent response return } //log.Println("Gzip reader init", reader) @@ -32,7 +34,7 @@ func (e *Router) pushMessages(w http.ResponseWriter, r *http.Request, sessionID //log.Println("Reader after switch:", reader) buf, err := ioutil.ReadAll(reader) if err != nil { - ResponseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging + ResponseWithError(w, http.StatusInternalServerError, err, start, r.URL.Path, 0) // TODO: send error here only on staging return } e.services.Producer.Produce(topicName, sessionID, buf) // What if not able to send? diff --git a/backend/internal/http/router/response.go b/backend/internal/http/router/response.go index 0b4725419..b66b7c563 100644 --- a/backend/internal/http/router/response.go +++ b/backend/internal/http/router/response.go @@ -4,21 +4,44 @@ import ( "encoding/json" "log" "net/http" + "time" + + metrics "openreplay/backend/pkg/metrics/http" ) -func ResponseWithJSON(w http.ResponseWriter, res interface{}) { +func recordMetrics(requestStart time.Time, url string, code, bodySize int) { + if bodySize > 0 { + metrics.RecordRequestSize(float64(bodySize), url, code) + } + metrics.IncreaseTotalRequests() + metrics.RecordRequestDuration(float64(time.Now().Sub(requestStart).Milliseconds()), url, code) +} + +func ResponseOK(w http.ResponseWriter, requestStart time.Time, url string, bodySize int) { + w.WriteHeader(http.StatusOK) + recordMetrics(requestStart, url, http.StatusOK, bodySize) +} + +func ResponseWithJSON(w http.ResponseWriter, res interface{}, requestStart time.Time, url string, bodySize int) { body, err := json.Marshal(res) if err != nil { log.Println(err) } w.Header().Set("Content-Type", "application/json") w.Write(body) + recordMetrics(requestStart, url, http.StatusOK, bodySize) } -func ResponseWithError(w http.ResponseWriter, code int, err error) { - type response struct { - Error string `json:"error"` +type response struct { + Error string `json:"error"` +} + +func ResponseWithError(w http.ResponseWriter, code int, err error, requestStart time.Time, url string, bodySize int) { + body, err := json.Marshal(&response{err.Error()}) + if err != nil { + log.Println(err) } w.WriteHeader(code) - ResponseWithJSON(w, &response{err.Error()}) + w.Write(body) + recordMetrics(requestStart, url, code, bodySize) } diff --git a/backend/internal/http/router/router.go b/backend/internal/http/router/router.go index 964016dfd..6cd7efe79 100644 --- a/backend/internal/http/router/router.go +++ b/backend/internal/http/router/router.go @@ -1,19 +1,16 @@ package router import ( - "context" "fmt" - "github.com/gorilla/mux" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/instrument/syncfloat64" "log" "net/http" + "sync" + "time" + + "github.com/gorilla/mux" http3 "openreplay/backend/internal/config/http" http2 "openreplay/backend/internal/http/services" "openreplay/backend/internal/http/util" - "openreplay/backend/pkg/monitoring" - "sync" - "time" ) type BeaconSize struct { @@ -25,21 +22,16 @@ type Router struct { router *mux.Router cfg *http3.Config services *http2.ServicesBuilder - requestSize syncfloat64.Histogram - requestDuration syncfloat64.Histogram - totalRequests syncfloat64.Counter mutex *sync.RWMutex beaconSizeCache map[uint64]*BeaconSize // Cache for session's beaconSize } -func NewRouter(cfg *http3.Config, services *http2.ServicesBuilder, metrics *monitoring.Metrics) (*Router, error) { +func NewRouter(cfg *http3.Config, services *http2.ServicesBuilder) (*Router, error) { switch { case cfg == nil: return nil, fmt.Errorf("config is empty") case services == nil: return nil, fmt.Errorf("services is empty") - case metrics == nil: - return nil, fmt.Errorf("metrics is empty") } e := &Router{ cfg: cfg, @@ -47,7 +39,6 @@ func NewRouter(cfg *http3.Config, services *http2.ServicesBuilder, metrics *moni mutex: &sync.RWMutex{}, beaconSizeCache: make(map[uint64]*BeaconSize), } - e.initMetrics(metrics) e.init() go e.clearBeaconSizes() return e, nil @@ -115,22 +106,6 @@ func (e *Router) init() { e.router.Use(e.corsMiddleware) } -func (e *Router) initMetrics(metrics *monitoring.Metrics) { - var err error - e.requestSize, err = metrics.RegisterHistogram("requests_body_size") - if err != nil { - log.Printf("can't create requests_body_size metric: %s", err) - } - e.requestDuration, err = metrics.RegisterHistogram("requests_duration") - if err != nil { - log.Printf("can't create requests_duration metric: %s", err) - } - e.totalRequests, err = metrics.RegisterCounter("requests_total") - if err != nil { - log.Printf("can't create requests_total metric: %s", err) - } -} - func (e *Router) root(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) } @@ -149,17 +124,8 @@ func (e *Router) corsMiddleware(next http.Handler) http.Handler { log.Printf("Request: %v - %v ", r.Method, util.SafeString(r.URL.Path)) - requestStart := time.Now() - // Serve request next.ServeHTTP(w, r) - - metricsContext, _ := context.WithTimeout(context.Background(), time.Millisecond*100) - e.totalRequests.Add(metricsContext, 1) - e.requestDuration.Record(metricsContext, - float64(time.Now().Sub(requestStart).Milliseconds()), - []attribute.KeyValue{attribute.String("method", r.URL.Path)}..., - ) }) } diff --git a/backend/internal/sessionender/ender.go b/backend/internal/sessionender/ender.go index c1c2c9b7f..e1ddb0ffe 100644 --- a/backend/internal/sessionender/ender.go +++ b/backend/internal/sessionender/ender.go @@ -1,13 +1,11 @@ package sessionender import ( - "context" - "fmt" - "go.opentelemetry.io/otel/metric/instrument/syncfloat64" "log" - "openreplay/backend/pkg/messages" - "openreplay/backend/pkg/monitoring" "time" + + "openreplay/backend/pkg/messages" + "openreplay/backend/pkg/metrics/ender" ) // EndedSessionHandler handler for ended sessions @@ -23,32 +21,16 @@ type session struct { // SessionEnder updates timestamp of last message for each session type SessionEnder struct { - timeout int64 - sessions map[uint64]*session // map[sessionID]session - timeCtrl *timeController - activeSessions syncfloat64.UpDownCounter - totalSessions syncfloat64.Counter + timeout int64 + sessions map[uint64]*session // map[sessionID]session + timeCtrl *timeController } -func New(metrics *monitoring.Metrics, timeout int64, parts int) (*SessionEnder, error) { - if metrics == nil { - return nil, fmt.Errorf("metrics module is empty") - } - activeSessions, err := metrics.RegisterUpDownCounter("sessions_active") - if err != nil { - return nil, fmt.Errorf("can't register session.active metric: %s", err) - } - totalSessions, err := metrics.RegisterCounter("sessions_total") - if err != nil { - return nil, fmt.Errorf("can't register session.total metric: %s", err) - } - +func New(timeout int64, parts int) (*SessionEnder, error) { return &SessionEnder{ - timeout: timeout, - sessions: make(map[uint64]*session), - timeCtrl: NewTimeController(parts), - activeSessions: activeSessions, - totalSessions: totalSessions, + timeout: timeout, + sessions: make(map[uint64]*session), + timeCtrl: NewTimeController(parts), }, nil } @@ -74,8 +56,8 @@ func (se *SessionEnder) UpdateSession(msg messages.Message) { lastUserTime: msgTimestamp, // last timestamp from user's machine isEnded: false, } - se.activeSessions.Add(context.Background(), 1) - se.totalSessions.Add(context.Background(), 1) + ender.IncreaseActiveSessions() + ender.IncreaseTotalSessions() return } // Keep the highest user's timestamp for correct session duration value @@ -100,7 +82,8 @@ func (se *SessionEnder) HandleEndedSessions(handler EndedSessionHandler) { sess.isEnded = true if handler(sessID, sess.lastUserTime) { delete(se.sessions, sessID) - se.activeSessions.Add(context.Background(), -1) + ender.DecreaseActiveSessions() + ender.IncreaseClosedSessions() removedSessions++ } else { log.Printf("sessID: %d, userTime: %d", sessID, sess.lastUserTime) diff --git a/backend/internal/sink/assetscache/assets.go b/backend/internal/sink/assetscache/assets.go index 4c63f6897..387ee5c92 100644 --- a/backend/internal/sink/assetscache/assets.go +++ b/backend/internal/sink/assetscache/assets.go @@ -1,20 +1,19 @@ package assetscache import ( - "context" "crypto/md5" - "go.opentelemetry.io/otel/metric/instrument/syncfloat64" "io" "log" "net/url" - "openreplay/backend/internal/config/sink" - "openreplay/backend/pkg/messages" - "openreplay/backend/pkg/monitoring" - "openreplay/backend/pkg/queue/types" - "openreplay/backend/pkg/url/assets" + metrics "openreplay/backend/pkg/metrics/sink" "strings" "sync" "time" + + "openreplay/backend/internal/config/sink" + "openreplay/backend/pkg/messages" + "openreplay/backend/pkg/queue/types" + "openreplay/backend/pkg/url/assets" ) type CachedAsset struct { @@ -23,52 +22,21 @@ type CachedAsset struct { } type AssetsCache struct { - mutex sync.RWMutex - cfg *sink.Config - rewriter *assets.Rewriter - producer types.Producer - cache map[string]*CachedAsset - blackList []string // use "example.com" to filter all domains or ".example.com" to filter only third-level domain - totalAssets syncfloat64.Counter - cachedAssets syncfloat64.Counter - skippedAssets syncfloat64.Counter - assetSize syncfloat64.Histogram - assetDuration syncfloat64.Histogram + mutex sync.RWMutex + cfg *sink.Config + rewriter *assets.Rewriter + producer types.Producer + cache map[string]*CachedAsset + blackList []string // use "example.com" to filter all domains or ".example.com" to filter only third-level domain } -func New(cfg *sink.Config, rewriter *assets.Rewriter, producer types.Producer, metrics *monitoring.Metrics) *AssetsCache { - // Assets metrics - totalAssets, err := metrics.RegisterCounter("assets_total") - if err != nil { - log.Printf("can't create assets_total metric: %s", err) - } - cachedAssets, err := metrics.RegisterCounter("assets_cached") - if err != nil { - log.Printf("can't create assets_cached metric: %s", err) - } - skippedAssets, err := metrics.RegisterCounter("assets_skipped") - if err != nil { - log.Printf("can't create assets_skipped metric: %s", err) - } - assetSize, err := metrics.RegisterHistogram("asset_size") - if err != nil { - log.Printf("can't create asset_size metric: %s", err) - } - assetDuration, err := metrics.RegisterHistogram("asset_duration") - if err != nil { - log.Printf("can't create asset_duration metric: %s", err) - } +func New(cfg *sink.Config, rewriter *assets.Rewriter, producer types.Producer) *AssetsCache { assetsCache := &AssetsCache{ - cfg: cfg, - rewriter: rewriter, - producer: producer, - cache: make(map[string]*CachedAsset, 64), - blackList: make([]string, 0), - totalAssets: totalAssets, - cachedAssets: cachedAssets, - skippedAssets: skippedAssets, - assetSize: assetSize, - assetDuration: assetDuration, + cfg: cfg, + rewriter: rewriter, + producer: producer, + cache: make(map[string]*CachedAsset, 64), + blackList: make([]string, 0), } // Parse black list for cache layer if len(cfg.CacheBlackList) > 0 { @@ -84,7 +52,7 @@ func New(cfg *sink.Config, rewriter *assets.Rewriter, producer types.Producer, m } func (e *AssetsCache) cleaner() { - cleanTick := time.Tick(time.Minute * 30) + cleanTick := time.Tick(time.Minute * 3) for { select { case <-cleanTick: @@ -105,6 +73,7 @@ func (e *AssetsCache) clearCache() { if int64(now.Sub(cache.ts).Minutes()) > e.cfg.CacheExpiration { deleted++ delete(e.cache, id) + metrics.DecreaseCachedAssets() } } log.Printf("cache cleaner: deleted %d/%d assets", deleted, cacheSize) @@ -232,8 +201,7 @@ func parseHost(baseURL string) (string, error) { } func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) string { - ctx := context.Background() - e.totalAssets.Add(ctx, 1) + metrics.IncreaseTotalAssets() // Try to find asset in cache h := md5.New() // Cut first part of url (scheme + host) @@ -255,7 +223,7 @@ func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) st e.mutex.RUnlock() if ok { if int64(time.Now().Sub(cachedAsset.ts).Minutes()) < e.cfg.CacheExpiration { - e.skippedAssets.Add(ctx, 1) + metrics.IncreaseSkippedAssets() return cachedAsset.msg } } @@ -267,8 +235,8 @@ func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) st start := time.Now() res := e.getRewrittenCSS(sessionID, baseURL, css) duration := time.Now().Sub(start).Milliseconds() - e.assetSize.Record(ctx, float64(len(res))) - e.assetDuration.Record(ctx, float64(duration)) + metrics.RecordAssetSize(float64(len(res))) + metrics.RecordProcessAssetDuration(float64(duration)) // Save asset to cache if we spent more than threshold if duration > e.cfg.CacheThreshold { e.mutex.Lock() @@ -277,7 +245,7 @@ func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) st ts: time.Now(), } e.mutex.Unlock() - e.cachedAssets.Add(ctx, 1) + metrics.IncreaseCachedAssets() } // Return rewritten asset return res diff --git a/backend/internal/storage/storage.go b/backend/internal/storage/storage.go index fbe9e2228..1e2507163 100644 --- a/backend/internal/storage/storage.go +++ b/backend/internal/storage/storage.go @@ -2,20 +2,20 @@ package storage import ( "bytes" - "context" "fmt" - gzip "github.com/klauspost/pgzip" - "go.opentelemetry.io/otel/metric/instrument/syncfloat64" "log" - config "openreplay/backend/internal/config/storage" - "openreplay/backend/pkg/messages" - "openreplay/backend/pkg/monitoring" - "openreplay/backend/pkg/storage" "os" "strconv" "strings" "sync" "time" + + config "openreplay/backend/internal/config/storage" + "openreplay/backend/pkg/messages" + metrics "openreplay/backend/pkg/metrics/storage" + "openreplay/backend/pkg/storage" + + gzip "github.com/klauspost/pgzip" ) type FileType string @@ -25,6 +25,13 @@ const ( DEV FileType = "/devtools.mob" ) +func (t FileType) String() string { + if t == DOM { + return "dom" + } + return "devtools" +} + type Task struct { id string doms *bytes.Buffer @@ -36,92 +43,23 @@ type Storage struct { cfg *config.Config s3 *storage.S3 startBytes []byte - - totalSessions syncfloat64.Counter - sessionDOMSize syncfloat64.Histogram - sessionDEVSize syncfloat64.Histogram - readingDOMTime syncfloat64.Histogram - readingDEVTime syncfloat64.Histogram - sortingDOMTime syncfloat64.Histogram - sortingDEVTime syncfloat64.Histogram - archivingDOMTime syncfloat64.Histogram - archivingDEVTime syncfloat64.Histogram - uploadingDOMTime syncfloat64.Histogram - uploadingDEVTime syncfloat64.Histogram - - tasks chan *Task - ready chan struct{} + tasks chan *Task + ready chan struct{} } -func New(cfg *config.Config, s3 *storage.S3, metrics *monitoring.Metrics) (*Storage, error) { +func New(cfg *config.Config, s3 *storage.S3) (*Storage, error) { switch { case cfg == nil: return nil, fmt.Errorf("config is empty") case s3 == nil: return nil, fmt.Errorf("s3 storage is empty") } - // Create metrics - totalSessions, err := metrics.RegisterCounter("sessions_total") - if err != nil { - log.Printf("can't create sessions_total metric: %s", err) - } - sessionDOMSize, err := metrics.RegisterHistogram("sessions_size") - if err != nil { - log.Printf("can't create session_size metric: %s", err) - } - sessionDevtoolsSize, err := metrics.RegisterHistogram("sessions_dt_size") - if err != nil { - log.Printf("can't create sessions_dt_size metric: %s", err) - } - readingDOMTime, err := metrics.RegisterHistogram("reading_duration") - if err != nil { - log.Printf("can't create reading_duration metric: %s", err) - } - readingDEVTime, err := metrics.RegisterHistogram("reading_dt_duration") - if err != nil { - log.Printf("can't create reading_duration metric: %s", err) - } - sortingDOMTime, err := metrics.RegisterHistogram("sorting_duration") - if err != nil { - log.Printf("can't create reading_duration metric: %s", err) - } - sortingDEVTime, err := metrics.RegisterHistogram("sorting_dt_duration") - if err != nil { - log.Printf("can't create reading_duration metric: %s", err) - } - archivingDOMTime, err := metrics.RegisterHistogram("archiving_duration") - if err != nil { - log.Printf("can't create archiving_duration metric: %s", err) - } - archivingDEVTime, err := metrics.RegisterHistogram("archiving_dt_duration") - if err != nil { - log.Printf("can't create archiving_duration metric: %s", err) - } - uploadingDOMTime, err := metrics.RegisterHistogram("uploading_duration") - if err != nil { - log.Printf("can't create uploading_duration metric: %s", err) - } - uploadingDEVTime, err := metrics.RegisterHistogram("uploading_dt_duration") - if err != nil { - log.Printf("can't create uploading_duration metric: %s", err) - } newStorage := &Storage{ - cfg: cfg, - s3: s3, - startBytes: make([]byte, cfg.FileSplitSize), - totalSessions: totalSessions, - sessionDOMSize: sessionDOMSize, - sessionDEVSize: sessionDevtoolsSize, - readingDOMTime: readingDOMTime, - readingDEVTime: readingDEVTime, - sortingDOMTime: sortingDOMTime, - sortingDEVTime: sortingDEVTime, - archivingDOMTime: archivingDOMTime, - archivingDEVTime: archivingDEVTime, - uploadingDOMTime: uploadingDOMTime, - uploadingDEVTime: uploadingDEVTime, - tasks: make(chan *Task, 1), - ready: make(chan struct{}), + cfg: cfg, + s3: s3, + startBytes: make([]byte, cfg.FileSplitSize), + tasks: make(chan *Task, 1), + ready: make(chan struct{}), } go newStorage.worker() return newStorage, nil @@ -187,11 +125,7 @@ func (s *Storage) openSession(filePath string, tp FileType) ([]byte, error) { if err != nil { return nil, fmt.Errorf("can't sort session, err: %s", err) } - if tp == DOM { - s.sortingDOMTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds())) - } else { - s.sortingDEVTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds())) - } + metrics.RecordSessionSortDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String()) return res, nil } @@ -215,26 +149,19 @@ func (s *Storage) prepareSession(path string, tp FileType, task *Task) error { if err != nil { return err } - durRead := time.Now().Sub(startRead).Milliseconds() - // Send metrics - ctx, _ := context.WithTimeout(context.Background(), time.Millisecond*200) - if tp == DOM { - s.sessionDOMSize.Record(ctx, float64(len(mob))) - s.readingDOMTime.Record(ctx, float64(durRead)) - } else { - s.sessionDEVSize.Record(ctx, float64(len(mob))) - s.readingDEVTime.Record(ctx, float64(durRead)) - } + metrics.RecordSessionSize(float64(len(mob)), tp.String()) + metrics.RecordSessionReadDuration(float64(time.Now().Sub(startRead).Milliseconds()), tp.String()) + // Encode and compress session if tp == DEV { - startCompress := time.Now() + start := time.Now() task.dev = s.compressSession(mob) - s.archivingDEVTime.Record(ctx, float64(time.Now().Sub(startCompress).Milliseconds())) + metrics.RecordSessionCompressDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String()) } else { if len(mob) <= s.cfg.FileSplitSize { - startCompress := time.Now() + start := time.Now() task.doms = s.compressSession(mob) - s.archivingDOMTime.Record(ctx, float64(time.Now().Sub(startCompress).Milliseconds())) + metrics.RecordSessionCompressDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String()) return nil } wg := &sync.WaitGroup{} @@ -253,7 +180,7 @@ func (s *Storage) prepareSession(path string, tp FileType, task *Task) error { wg.Done() }() wg.Wait() - s.archivingDOMTime.Record(ctx, float64(firstPart+secondPart)) + metrics.RecordSessionCompressDuration(float64(firstPart+secondPart), tp.String()) } return nil } @@ -324,11 +251,9 @@ func (s *Storage) uploadSession(task *Task) { wg.Done() }() wg.Wait() - // Record metrics - ctx, _ := context.WithTimeout(context.Background(), time.Millisecond*200) - s.uploadingDOMTime.Record(ctx, float64(uploadDoms+uploadDome)) - s.uploadingDEVTime.Record(ctx, float64(uploadDev)) - s.totalSessions.Add(ctx, 1) + metrics.RecordSessionUploadDuration(float64(uploadDoms+uploadDome), DOM.String()) + metrics.RecordSessionUploadDuration(float64(uploadDev), DEV.String()) + metrics.IncreaseStorageTotalSessions() } func (s *Storage) worker() { diff --git a/backend/pkg/db/postgres/batches.go b/backend/pkg/db/postgres/batches.go index c1283da10..abdee36f2 100644 --- a/backend/pkg/db/postgres/batches.go +++ b/backend/pkg/db/postgres/batches.go @@ -1,14 +1,13 @@ package postgres import ( - "context" - "github.com/jackc/pgx/v4" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/instrument/syncfloat64" "log" - "openreplay/backend/pkg/monitoring" "strings" "time" + + "openreplay/backend/pkg/metrics/database" + + "github.com/jackc/pgx/v4" ) type batchItem struct { @@ -78,21 +77,17 @@ func NewBatchesTask(size int) *batchesTask { } type BatchSet struct { - c Pool - batches map[uint64]*SessionBatch - batchQueueLimit int - batchSizeLimit int - batchSizeBytes syncfloat64.Histogram - batchSizeLines syncfloat64.Histogram - sqlRequestTime syncfloat64.Histogram - sqlRequestCounter syncfloat64.Counter - updates map[uint64]*sessionUpdates - workerTask chan *batchesTask - done chan struct{} - finished chan struct{} + c Pool + batches map[uint64]*SessionBatch + batchQueueLimit int + batchSizeLimit int + updates map[uint64]*sessionUpdates + workerTask chan *batchesTask + done chan struct{} + finished chan struct{} } -func NewBatchSet(c Pool, queueLimit, sizeLimit int, metrics *monitoring.Metrics) *BatchSet { +func NewBatchSet(c Pool, queueLimit, sizeLimit int) *BatchSet { bs := &BatchSet{ c: c, batches: make(map[uint64]*SessionBatch), @@ -103,31 +98,10 @@ func NewBatchSet(c Pool, queueLimit, sizeLimit int, metrics *monitoring.Metrics) finished: make(chan struct{}), updates: make(map[uint64]*sessionUpdates), } - bs.initMetrics(metrics) go bs.worker() return bs } -func (conn *BatchSet) initMetrics(metrics *monitoring.Metrics) { - var err error - conn.batchSizeBytes, err = metrics.RegisterHistogram("batch_size_bytes") - if err != nil { - log.Printf("can't create batchSizeBytes metric: %s", err) - } - conn.batchSizeLines, err = metrics.RegisterHistogram("batch_size_lines") - if err != nil { - log.Printf("can't create batchSizeLines metric: %s", err) - } - conn.sqlRequestTime, err = metrics.RegisterHistogram("sql_request_time") - if err != nil { - log.Printf("can't create sqlRequestTime metric: %s", err) - } - conn.sqlRequestCounter, err = metrics.RegisterCounter("sql_request_number") - if err != nil { - log.Printf("can't create sqlRequestNumber metric: %s", err) - } -} - func (conn *BatchSet) getBatch(sessionID uint64) *SessionBatch { sessionID = sessionID % 10 if _, ok := conn.batches[sessionID]; !ok { @@ -194,11 +168,10 @@ func (conn *BatchSet) sendBatches(t *batchesTask) { // Append session update sql request to the end of batch batch.Prepare() // Record batch size in bytes and number of lines - conn.batchSizeBytes.Record(context.Background(), float64(batch.Size())) - conn.batchSizeLines.Record(context.Background(), float64(batch.Len())) + database.RecordBatchSize(float64(batch.Size())) + database.RecordBatchElements(float64(batch.Len())) start := time.Now() - isFailed := false // Send batch to db and execute br := conn.c.SendBatch(batch.batch) @@ -209,15 +182,11 @@ func (conn *BatchSet) sendBatches(t *batchesTask) { failedSql := batch.items[i] query := strings.ReplaceAll(failedSql.query, "\n", " ") log.Println("failed sql req:", query, failedSql.arguments) - isFailed = true } } br.Close() // returns err - dur := time.Now().Sub(start).Milliseconds() - conn.sqlRequestTime.Record(context.Background(), float64(dur), - attribute.String("method", "batch"), attribute.Bool("failed", isFailed)) - conn.sqlRequestCounter.Add(context.Background(), 1, - attribute.String("method", "batch"), attribute.Bool("failed", isFailed)) + database.RecordBatchInsertDuration(float64(time.Now().Sub(start).Milliseconds())) + database.IncreaseTotalBatches() } } diff --git a/backend/pkg/db/postgres/bulk.go b/backend/pkg/db/postgres/bulk.go index 8c6c42f78..b6a2ddd35 100644 --- a/backend/pkg/db/postgres/bulk.go +++ b/backend/pkg/db/postgres/bulk.go @@ -2,13 +2,9 @@ package postgres import ( "bytes" - "context" "errors" "fmt" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/instrument/syncfloat64" - "log" - "openreplay/backend/pkg/monitoring" + "openreplay/backend/pkg/metrics/database" "time" ) @@ -25,15 +21,13 @@ type Bulk interface { } type bulkImpl struct { - conn Pool - table string - columns string - template string - setSize int - sizeLimit int - values []interface{} - bulkSize syncfloat64.Histogram - bulkDuration syncfloat64.Histogram + conn Pool + table string + columns string + template string + setSize int + sizeLimit int + values []interface{} } func (b *bulkImpl) Append(args ...interface{}) error { @@ -79,18 +73,15 @@ func (b *bulkImpl) send() error { return fmt.Errorf("send bulk err: %s", err) } // Save bulk metrics - ctx, _ := context.WithTimeout(context.Background(), time.Millisecond*200) - b.bulkDuration.Record(ctx, float64(time.Now().Sub(start).Milliseconds()), attribute.String("table", b.table)) - b.bulkSize.Record(ctx, float64(size), attribute.String("table", b.table)) + database.RecordBulkElements(float64(size), "pg", b.table) + database.RecordBulkInsertDuration(float64(time.Now().Sub(start).Milliseconds()), "pg", b.table) return nil } -func NewBulk(conn Pool, metrics *monitoring.Metrics, table, columns, template string, setSize, sizeLimit int) (Bulk, error) { +func NewBulk(conn Pool, table, columns, template string, setSize, sizeLimit int) (Bulk, error) { switch { case conn == nil: return nil, errors.New("db conn is empty") - case metrics == nil: - return nil, errors.New("metrics is empty") case table == "": return nil, errors.New("table is empty") case columns == "": @@ -102,23 +93,13 @@ func NewBulk(conn Pool, metrics *monitoring.Metrics, table, columns, template st case sizeLimit <= 0: return nil, errors.New("size limit is wrong") } - messagesInBulk, err := metrics.RegisterHistogram("messages_in_bulk") - if err != nil { - log.Printf("can't create messages_size metric: %s", err) - } - bulkInsertDuration, err := metrics.RegisterHistogram("bulk_insert_duration") - if err != nil { - log.Printf("can't create messages_size metric: %s", err) - } return &bulkImpl{ - conn: conn, - table: table, - columns: columns, - template: template, - setSize: setSize, - sizeLimit: sizeLimit, - values: make([]interface{}, 0, setSize*sizeLimit), - bulkSize: messagesInBulk, - bulkDuration: bulkInsertDuration, + conn: conn, + table: table, + columns: columns, + template: template, + setSize: setSize, + sizeLimit: sizeLimit, + values: make([]interface{}, 0, setSize*sizeLimit), }, nil } diff --git a/backend/pkg/db/postgres/bulks.go b/backend/pkg/db/postgres/bulks.go index 5774ba184..f3e9e95c9 100644 --- a/backend/pkg/db/postgres/bulks.go +++ b/backend/pkg/db/postgres/bulks.go @@ -2,7 +2,6 @@ package postgres import ( "log" - "openreplay/backend/pkg/monitoring" "time" ) @@ -30,16 +29,14 @@ type BulkSet struct { webCustomEvents Bulk webClickEvents Bulk webNetworkRequest Bulk - metrics *monitoring.Metrics workerTask chan *bulksTask done chan struct{} finished chan struct{} } -func NewBulkSet(c Pool, metrics *monitoring.Metrics) *BulkSet { +func NewBulkSet(c Pool) *BulkSet { bs := &BulkSet{ c: c, - metrics: metrics, workerTask: make(chan *bulksTask, 1), done: make(chan struct{}), finished: make(chan struct{}), @@ -86,7 +83,7 @@ func (conn *BulkSet) Get(name string) Bulk { func (conn *BulkSet) initBulks() { var err error - conn.autocompletes, err = NewBulk(conn.c, conn.metrics, + conn.autocompletes, err = NewBulk(conn.c, "autocomplete", "(value, type, project_id)", "($%d, $%d, $%d)", @@ -94,7 +91,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create autocomplete bulk: %s", err) } - conn.requests, err = NewBulk(conn.c, conn.metrics, + conn.requests, err = NewBulk(conn.c, "events_common.requests", "(session_id, timestamp, seq_index, url, duration, success)", "($%d, $%d, $%d, LEFT($%d, 8000), $%d, $%d)", @@ -102,7 +99,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create requests bulk: %s", err) } - conn.customEvents, err = NewBulk(conn.c, conn.metrics, + conn.customEvents, err = NewBulk(conn.c, "events_common.customs", "(session_id, timestamp, seq_index, name, payload)", "($%d, $%d, $%d, LEFT($%d, 2000), $%d)", @@ -110,7 +107,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create customEvents bulk: %s", err) } - conn.webPageEvents, err = NewBulk(conn.c, conn.metrics, + conn.webPageEvents, err = NewBulk(conn.c, "events.pages", "(session_id, message_id, timestamp, referrer, base_referrer, host, path, query, dom_content_loaded_time, "+ "load_time, response_end, first_paint_time, first_contentful_paint_time, speed_index, visually_complete, "+ @@ -122,7 +119,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create webPageEvents bulk: %s", err) } - conn.webInputEvents, err = NewBulk(conn.c, conn.metrics, + conn.webInputEvents, err = NewBulk(conn.c, "events.inputs", "(session_id, message_id, timestamp, value, label)", "($%d, $%d, $%d, LEFT($%d, 2000), NULLIF(LEFT($%d, 2000),''))", @@ -130,7 +127,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create webPageEvents bulk: %s", err) } - conn.webGraphQL, err = NewBulk(conn.c, conn.metrics, + conn.webGraphQL, err = NewBulk(conn.c, "events.graphql", "(session_id, timestamp, message_id, name, request_body, response_body)", "($%d, $%d, $%d, LEFT($%d, 2000), $%d, $%d)", @@ -138,7 +135,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create webPageEvents bulk: %s", err) } - conn.webErrors, err = NewBulk(conn.c, conn.metrics, + conn.webErrors, err = NewBulk(conn.c, "errors", "(error_id, project_id, source, name, message, payload)", "($%d, $%d, $%d, $%d, $%d, $%d::jsonb)", @@ -146,7 +143,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create webErrors bulk: %s", err) } - conn.webErrorEvents, err = NewBulk(conn.c, conn.metrics, + conn.webErrorEvents, err = NewBulk(conn.c, "events.errors", "(session_id, message_id, timestamp, error_id)", "($%d, $%d, $%d, $%d)", @@ -154,7 +151,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create webErrorEvents bulk: %s", err) } - conn.webErrorTags, err = NewBulk(conn.c, conn.metrics, + conn.webErrorTags, err = NewBulk(conn.c, "public.errors_tags", "(session_id, message_id, error_id, key, value)", "($%d, $%d, $%d, $%d, $%d)", @@ -162,7 +159,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create webErrorEvents bulk: %s", err) } - conn.webIssues, err = NewBulk(conn.c, conn.metrics, + conn.webIssues, err = NewBulk(conn.c, "issues", "(project_id, issue_id, type, context_string)", "($%d, $%d, $%d, $%d)", @@ -170,7 +167,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create webIssues bulk: %s", err) } - conn.webIssueEvents, err = NewBulk(conn.c, conn.metrics, + conn.webIssueEvents, err = NewBulk(conn.c, "events_common.issues", "(session_id, issue_id, timestamp, seq_index, payload)", "($%d, $%d, $%d, $%d, CAST($%d AS jsonb))", @@ -178,7 +175,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create webIssueEvents bulk: %s", err) } - conn.webCustomEvents, err = NewBulk(conn.c, conn.metrics, + conn.webCustomEvents, err = NewBulk(conn.c, "events_common.customs", "(session_id, seq_index, timestamp, name, payload, level)", "($%d, $%d, $%d, LEFT($%d, 2000), $%d, $%d)", @@ -186,7 +183,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create webCustomEvents bulk: %s", err) } - conn.webClickEvents, err = NewBulk(conn.c, conn.metrics, + conn.webClickEvents, err = NewBulk(conn.c, "events.clicks", "(session_id, message_id, timestamp, label, selector, url, path)", "($%d, $%d, $%d, NULLIF(LEFT($%d, 2000), ''), LEFT($%d, 8000), LEFT($%d, 2000), LEFT($%d, 2000))", @@ -194,7 +191,7 @@ func (conn *BulkSet) initBulks() { if err != nil { log.Fatalf("can't create webClickEvents bulk: %s", err) } - conn.webNetworkRequest, err = NewBulk(conn.c, conn.metrics, + conn.webNetworkRequest, err = NewBulk(conn.c, "events_common.requests", "(session_id, timestamp, seq_index, url, host, path, query, request_body, response_body, status_code, method, duration, success)", "($%d, $%d, $%d, LEFT($%d, 8000), LEFT($%d, 300), LEFT($%d, 2000), LEFT($%d, 8000), $%d, $%d, $%d::smallint, NULLIF($%d, '')::http_method, $%d, $%d)", diff --git a/backend/pkg/db/postgres/connector.go b/backend/pkg/db/postgres/connector.go index 2e8f3d425..6904dc135 100644 --- a/backend/pkg/db/postgres/connector.go +++ b/backend/pkg/db/postgres/connector.go @@ -2,11 +2,10 @@ package postgres import ( "context" - "github.com/jackc/pgx/v4/pgxpool" - "go.opentelemetry.io/otel/metric/instrument/syncfloat64" "log" + + "github.com/jackc/pgx/v4/pgxpool" "openreplay/backend/pkg/db/types" - "openreplay/backend/pkg/monitoring" ) type CH interface { @@ -15,36 +14,28 @@ type CH interface { // Conn contains batches, bulks and cache for all sessions type Conn struct { - c Pool - batches *BatchSet - bulks *BulkSet - batchSizeBytes syncfloat64.Histogram - batchSizeLines syncfloat64.Histogram - sqlRequestTime syncfloat64.Histogram - sqlRequestCounter syncfloat64.Counter - chConn CH + c Pool + batches *BatchSet + bulks *BulkSet + chConn CH } func (conn *Conn) SetClickHouse(ch CH) { conn.chConn = ch } -func NewConn(url string, queueLimit, sizeLimit int, metrics *monitoring.Metrics) *Conn { - if metrics == nil { - log.Fatalf("metrics is nil") - } +func NewConn(url string, queueLimit, sizeLimit int) *Conn { c, err := pgxpool.Connect(context.Background(), url) if err != nil { log.Fatalf("pgxpool.Connect err: %s", err) } conn := &Conn{} - conn.initMetrics(metrics) - conn.c, err = NewPool(c, conn.sqlRequestTime, conn.sqlRequestCounter) + conn.c, err = NewPool(c) if err != nil { log.Fatalf("can't create new pool wrapper: %s", err) } - conn.bulks = NewBulkSet(conn.c, metrics) - conn.batches = NewBatchSet(conn.c, queueLimit, sizeLimit, metrics) + conn.bulks = NewBulkSet(conn.c) + conn.batches = NewBatchSet(conn.c, queueLimit, sizeLimit) return conn } @@ -55,26 +46,6 @@ func (conn *Conn) Close() error { return nil } -func (conn *Conn) initMetrics(metrics *monitoring.Metrics) { - var err error - conn.batchSizeBytes, err = metrics.RegisterHistogram("batch_size_bytes") - if err != nil { - log.Printf("can't create batchSizeBytes metric: %s", err) - } - conn.batchSizeLines, err = metrics.RegisterHistogram("batch_size_lines") - if err != nil { - log.Printf("can't create batchSizeLines metric: %s", err) - } - conn.sqlRequestTime, err = metrics.RegisterHistogram("sql_request_time") - if err != nil { - log.Printf("can't create sqlRequestTime metric: %s", err) - } - conn.sqlRequestCounter, err = metrics.RegisterCounter("sql_request_number") - if err != nil { - log.Printf("can't create sqlRequestNumber metric: %s", err) - } -} - func (conn *Conn) insertAutocompleteValue(sessionID uint64, projectID uint32, tp string, value string) { if len(value) == 0 { return diff --git a/backend/pkg/db/postgres/pool.go b/backend/pkg/db/postgres/pool.go index 5f9cbaa29..5214be8d0 100644 --- a/backend/pkg/db/postgres/pool.go +++ b/backend/pkg/db/postgres/pool.go @@ -3,12 +3,12 @@ package postgres import ( "context" "errors" - "github.com/jackc/pgx/v4" - "github.com/jackc/pgx/v4/pgxpool" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/instrument/syncfloat64" "strings" "time" + + "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v4/pgxpool" + "openreplay/backend/pkg/metrics/database" ) // Pool is a pgx.Pool wrapper with metrics integration @@ -22,19 +22,15 @@ type Pool interface { } type poolImpl struct { - conn *pgxpool.Pool - sqlRequestTime syncfloat64.Histogram - sqlRequestCounter syncfloat64.Counter + conn *pgxpool.Pool } func (p *poolImpl) Query(sql string, args ...interface{}) (pgx.Rows, error) { start := time.Now() res, err := p.conn.Query(getTimeoutContext(), sql, args...) method, table := methodName(sql) - p.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()), - attribute.String("method", method), attribute.String("table", table)) - p.sqlRequestCounter.Add(context.Background(), 1, - attribute.String("method", method), attribute.String("table", table)) + database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table) + database.IncreaseTotalRequests(method, table) return res, err } @@ -42,10 +38,8 @@ func (p *poolImpl) QueryRow(sql string, args ...interface{}) pgx.Row { start := time.Now() res := p.conn.QueryRow(getTimeoutContext(), sql, args...) method, table := methodName(sql) - p.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()), - attribute.String("method", method), attribute.String("table", table)) - p.sqlRequestCounter.Add(context.Background(), 1, - attribute.String("method", method), attribute.String("table", table)) + database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table) + database.IncreaseTotalRequests(method, table) return res } @@ -53,45 +47,37 @@ func (p *poolImpl) Exec(sql string, arguments ...interface{}) error { start := time.Now() _, err := p.conn.Exec(getTimeoutContext(), sql, arguments...) method, table := methodName(sql) - p.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()), - attribute.String("method", method), attribute.String("table", table)) - p.sqlRequestCounter.Add(context.Background(), 1, - attribute.String("method", method), attribute.String("table", table)) + database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table) + database.IncreaseTotalRequests(method, table) return err } func (p *poolImpl) SendBatch(b *pgx.Batch) pgx.BatchResults { start := time.Now() res := p.conn.SendBatch(getTimeoutContext(), b) - p.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()), - attribute.String("method", "sendBatch")) - p.sqlRequestCounter.Add(context.Background(), 1, - attribute.String("method", "sendBatch")) + database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "sendBatch", "") + database.IncreaseTotalRequests("sendBatch", "") return res } func (p *poolImpl) Begin() (*_Tx, error) { start := time.Now() tx, err := p.conn.Begin(context.Background()) - p.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()), - attribute.String("method", "begin")) - p.sqlRequestCounter.Add(context.Background(), 1, - attribute.String("method", "begin")) - return &_Tx{tx, p.sqlRequestTime, p.sqlRequestCounter}, err + database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "begin", "") + database.IncreaseTotalRequests("begin", "") + return &_Tx{tx}, err } func (p *poolImpl) Close() { p.conn.Close() } -func NewPool(conn *pgxpool.Pool, sqlRequestTime syncfloat64.Histogram, sqlRequestCounter syncfloat64.Counter) (Pool, error) { +func NewPool(conn *pgxpool.Pool) (Pool, error) { if conn == nil { return nil, errors.New("conn is empty") } return &poolImpl{ - conn: conn, - sqlRequestTime: sqlRequestTime, - sqlRequestCounter: sqlRequestCounter, + conn: conn, }, nil } @@ -99,38 +85,30 @@ func NewPool(conn *pgxpool.Pool, sqlRequestTime syncfloat64.Histogram, sqlReques type _Tx struct { pgx.Tx - sqlRequestTime syncfloat64.Histogram - sqlRequestCounter syncfloat64.Counter } func (tx *_Tx) exec(sql string, args ...interface{}) error { start := time.Now() _, err := tx.Exec(context.Background(), sql, args...) method, table := methodName(sql) - tx.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()), - attribute.String("method", method), attribute.String("table", table)) - tx.sqlRequestCounter.Add(context.Background(), 1, - attribute.String("method", method), attribute.String("table", table)) + database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table) + database.IncreaseTotalRequests(method, table) return err } func (tx *_Tx) rollback() error { start := time.Now() err := tx.Rollback(context.Background()) - tx.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()), - attribute.String("method", "rollback")) - tx.sqlRequestCounter.Add(context.Background(), 1, - attribute.String("method", "rollback")) + database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "rollback", "") + database.IncreaseTotalRequests("rollback", "") return err } func (tx *_Tx) commit() error { start := time.Now() err := tx.Commit(context.Background()) - tx.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()), - attribute.String("method", "commit")) - tx.sqlRequestCounter.Add(context.Background(), 1, - attribute.String("method", "commit")) + database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "commit", "") + database.IncreaseTotalRequests("commit", "") return err } @@ -169,7 +147,8 @@ func methodName(sql string) (string, string) { case "update": table = strings.TrimSpace(parts[1]) case "insert": - table = strings.TrimSpace(parts[2]) + tableNameParts := strings.Split(strings.TrimSpace(parts[2]), "(") + table = tableNameParts[0] } return cmd, table } diff --git a/backend/pkg/messages/iterator-sink.go b/backend/pkg/messages/iterator-sink.go index a5897c3b7..be12b63eb 100644 --- a/backend/pkg/messages/iterator-sink.go +++ b/backend/pkg/messages/iterator-sink.go @@ -3,6 +3,7 @@ package messages import ( "fmt" "log" + "openreplay/backend/pkg/metrics/sink" ) type sinkMessageIteratorImpl struct { @@ -53,6 +54,8 @@ func (i *sinkMessageIteratorImpl) sendBatchEnd() { } func (i *sinkMessageIteratorImpl) Iterate(batchData []byte, batchInfo *BatchInfo) { + sink.RecordBatchSize(float64(len(batchData))) + sink.IncreaseTotalBatches() // Create new message reader reader := NewMessageReader(batchData) diff --git a/backend/pkg/messages/iterator.go b/backend/pkg/messages/iterator.go index a6717257e..f7b014d30 100644 --- a/backend/pkg/messages/iterator.go +++ b/backend/pkg/messages/iterator.go @@ -74,12 +74,13 @@ func (i *messageIteratorImpl) Iterate(batchData []byte, batchInfo *BatchInfo) { i.messageInfo.Index++ msg := reader.Message() + msgType := msg.TypeID() // Preprocess "system" messages if _, ok := i.preFilter[msg.TypeID()]; ok { msg = msg.Decode() if msg == nil { - log.Printf("decode error, type: %d, info: %s", msg.TypeID(), i.batchInfo.Info()) + log.Printf("decode error, type: %d, info: %s", msgType, i.batchInfo.Info()) return } msg = transformDeprecated(msg) @@ -99,7 +100,7 @@ func (i *messageIteratorImpl) Iterate(batchData []byte, batchInfo *BatchInfo) { if i.autoDecode { msg = msg.Decode() if msg == nil { - log.Printf("decode error, type: %d, info: %s", msg.TypeID(), i.batchInfo.Info()) + log.Printf("decode error, type: %d, info: %s", msgType, i.batchInfo.Info()) return } } diff --git a/backend/pkg/metrics/assets/metrics.go b/backend/pkg/metrics/assets/metrics.go new file mode 100644 index 000000000..44af0dfa9 --- /dev/null +++ b/backend/pkg/metrics/assets/metrics.go @@ -0,0 +1,72 @@ +package assets + +import ( + "github.com/prometheus/client_golang/prometheus" + "openreplay/backend/pkg/metrics/common" + "strconv" +) + +var assetsProcessedSessions = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "assets", + Name: "processed_total", + Help: "A counter displaying the total count of processed assets.", + }, +) + +func IncreaseProcessesSessions() { + assetsProcessedSessions.Inc() +} + +var assetsSavedSessions = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "assets", + Name: "saved_total", + Help: "A counter displaying the total number of cached assets.", + }, +) + +func IncreaseSavedSessions() { + assetsSavedSessions.Inc() +} + +var assetsDownloadDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "assets", + Name: "download_duration_seconds", + Help: "A histogram displaying the duration of downloading for each asset in seconds.", + Buckets: common.DefaultDurationBuckets, + }, + []string{"response_code"}, +) + +func RecordDownloadDuration(durMillis float64, code int) { + assetsDownloadDuration.WithLabelValues(strconv.Itoa(code)).Observe(durMillis / 1000.0) +} + +var assetsUploadDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "assets", + Name: "upload_s3_duration_seconds", + Help: "A histogram displaying the duration of uploading to s3 for each asset in seconds.", + Buckets: common.DefaultDurationBuckets, + }, + []string{"failed"}, +) + +func RecordUploadDuration(durMillis float64, isFailed bool) { + failed := "false" + if isFailed { + failed = "true" + } + assetsUploadDuration.WithLabelValues(failed).Observe(durMillis / 1000.0) +} + +func List() []prometheus.Collector { + return []prometheus.Collector{ + assetsProcessedSessions, + assetsSavedSessions, + assetsDownloadDuration, + assetsUploadDuration, + } +} diff --git a/backend/pkg/metrics/common/metrics.go b/backend/pkg/metrics/common/metrics.go new file mode 100644 index 000000000..85b66c713 --- /dev/null +++ b/backend/pkg/metrics/common/metrics.go @@ -0,0 +1,11 @@ +package common + +// DefaultDurationBuckets is a set of buckets from 5 milliseconds to 1000 seconds (16.6667 minutes) +var DefaultDurationBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 25, 50, 100, 250, 500, 1000} + +// DefaultSizeBuckets is a set of buckets from 1 byte to 1_000_000_000 bytes (~1 Gb) +var DefaultSizeBuckets = []float64{1, 10, 50, 100, 250, 500, 1000, 2500, 5000, 10000, 25000, 50000, 100_000, 250_000, + 500_000, 1_000_000, 10_000_000, 100_000_000, 1_000_000_000} + +// DefaultBuckets is a set of buckets from 1 to 1_000_000 elements +var DefaultBuckets = []float64{1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10_000, 50_000, 100_000, 1_000_000} diff --git a/backend/pkg/metrics/database/metrics.go b/backend/pkg/metrics/database/metrics.go new file mode 100644 index 000000000..a9f3990cd --- /dev/null +++ b/backend/pkg/metrics/database/metrics.go @@ -0,0 +1,127 @@ +package database + +import ( + "github.com/prometheus/client_golang/prometheus" + "openreplay/backend/pkg/metrics/common" +) + +var dbBatchSize = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: "db", + Name: "batch_size_bytes", + Help: "A histogram displaying the batch size in bytes.", + Buckets: common.DefaultSizeBuckets, + }, +) + +func RecordBatchSize(size float64) { + dbBatchSize.Observe(size) +} + +var dbBatchElements = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: "db", + Name: "batch_size_elements", + Help: "A histogram displaying the number of SQL commands in each batch.", + Buckets: common.DefaultBuckets, + }, +) + +func RecordBatchElements(number float64) { + dbBatchElements.Observe(number) +} + +var dbBatchInsertDuration = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: "db", + Name: "batch_insert_duration_seconds", + Help: "A histogram displaying the duration of batch inserts in seconds.", + Buckets: common.DefaultDurationBuckets, + }, +) + +func RecordBatchInsertDuration(durMillis float64) { + dbBatchInsertDuration.Observe(durMillis / 1000.0) +} + +var dbBulkSize = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "db", + Name: "bulk_size_bytes", + Help: "A histogram displaying the bulk size in bytes.", + Buckets: common.DefaultSizeBuckets, + }, + []string{"db", "table"}, +) + +func RecordBulkSize(size float64, db, table string) { + dbBulkSize.WithLabelValues(db, table).Observe(size) +} + +var dbBulkElements = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "db", + Name: "bulk_size_elements", + Help: "A histogram displaying the size of data set in each bulk.", + Buckets: common.DefaultBuckets, + }, + []string{"db", "table"}, +) + +func RecordBulkElements(size float64, db, table string) { + dbBulkElements.WithLabelValues(db, table).Observe(size) +} + +var dbBulkInsertDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "db", + Name: "bulk_insert_duration_seconds", + Help: "A histogram displaying the duration of bulk inserts in seconds.", + Buckets: common.DefaultDurationBuckets, + }, + []string{"db", "table"}, +) + +func RecordBulkInsertDuration(durMillis float64, db, table string) { + dbBulkInsertDuration.WithLabelValues(db, table).Observe(durMillis / 1000.0) +} + +var dbRequestDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "db", + Name: "request_duration_seconds", + Help: "A histogram displaying the duration of each sql request in seconds.", + Buckets: common.DefaultDurationBuckets, + }, + []string{"method", "table"}, +) + +func RecordRequestDuration(durMillis float64, method, table string) { + dbRequestDuration.WithLabelValues(method, table).Observe(durMillis / 1000.0) +} + +var dbTotalRequests = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "db", + Name: "requests_total", + Help: "A counter showing the total number of all SQL requests.", + }, + []string{"method", "table"}, +) + +func IncreaseTotalRequests(method, table string) { + dbTotalRequests.WithLabelValues(method, table).Inc() +} + +func List() []prometheus.Collector { + return []prometheus.Collector{ + dbBatchSize, + dbBatchElements, + dbBatchInsertDuration, + dbBulkSize, + dbBulkElements, + dbBulkInsertDuration, + dbRequestDuration, + dbTotalRequests, + } +} diff --git a/backend/pkg/metrics/ender/metrics.go b/backend/pkg/metrics/ender/metrics.go new file mode 100644 index 000000000..5e3308554 --- /dev/null +++ b/backend/pkg/metrics/ender/metrics.go @@ -0,0 +1,51 @@ +package ender + +import "github.com/prometheus/client_golang/prometheus" + +var enderActiveSessions = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: "ender", + Name: "sessions_active", + Help: "A gauge displaying the number of active (live) sessions.", + }, +) + +func IncreaseActiveSessions() { + enderActiveSessions.Inc() +} + +func DecreaseActiveSessions() { + enderActiveSessions.Dec() +} + +var enderClosedSessions = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "ender", + Name: "sessions_closed", + Help: "A counter displaying the number of closed sessions (sent SessionEnd).", + }, +) + +func IncreaseClosedSessions() { + enderClosedSessions.Inc() +} + +var enderTotalSessions = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "ender", + Name: "sessions_total", + Help: "A counter displaying the number of all processed sessions.", + }, +) + +func IncreaseTotalSessions() { + enderTotalSessions.Inc() +} + +func List() []prometheus.Collector { + return []prometheus.Collector{ + enderActiveSessions, + enderClosedSessions, + enderTotalSessions, + } +} diff --git a/backend/pkg/metrics/http/metrics.go b/backend/pkg/metrics/http/metrics.go new file mode 100644 index 000000000..7a835d7f6 --- /dev/null +++ b/backend/pkg/metrics/http/metrics.go @@ -0,0 +1,55 @@ +package http + +import ( + "github.com/prometheus/client_golang/prometheus" + "openreplay/backend/pkg/metrics/common" + "strconv" +) + +var httpRequestSize = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "http", + Name: "request_size_bytes", + Help: "A histogram displaying the size of each HTTP request in bytes.", + Buckets: common.DefaultSizeBuckets, + }, + []string{"url", "response_code"}, +) + +func RecordRequestSize(size float64, url string, code int) { + httpRequestSize.WithLabelValues(url, strconv.Itoa(code)).Observe(size) +} + +var httpRequestDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "http", + Name: "request_duration_seconds", + Help: "A histogram displaying the duration of each HTTP request in seconds.", + Buckets: common.DefaultDurationBuckets, + }, + []string{"url", "response_code"}, +) + +func RecordRequestDuration(durMillis float64, url string, code int) { + httpRequestDuration.WithLabelValues(url, strconv.Itoa(code)).Observe(durMillis / 1000.0) +} + +var httpTotalRequests = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "http", + Name: "requests_total", + Help: "A counter displaying the number all HTTP requests.", + }, +) + +func IncreaseTotalRequests() { + httpTotalRequests.Inc() +} + +func List() []prometheus.Collector { + return []prometheus.Collector{ + httpRequestSize, + httpRequestDuration, + httpTotalRequests, + } +} diff --git a/backend/pkg/metrics/server.go b/backend/pkg/metrics/server.go new file mode 100644 index 000000000..fb3be5afc --- /dev/null +++ b/backend/pkg/metrics/server.go @@ -0,0 +1,40 @@ +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/collectors" + "github.com/prometheus/client_golang/prometheus/promhttp" + "log" + "net/http" +) + +type MetricServer struct { + registry *prometheus.Registry +} + +func New() *MetricServer { + registry := prometheus.NewRegistry() + // Add go runtime metrics and process collectors. + registry.MustRegister( + collectors.NewGoCollector(), + collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}), + ) + // Expose /metrics HTTP endpoint using the created custom registry. + http.Handle( + "/metrics", promhttp.HandlerFor( + registry, + promhttp.HandlerOpts{ + EnableOpenMetrics: true, + }), + ) + go func() { + log.Println(http.ListenAndServe(":8888", nil)) + }() + return &MetricServer{ + registry: registry, + } +} + +func (s *MetricServer) Register(cs []prometheus.Collector) { + s.registry.MustRegister(cs...) +} diff --git a/backend/pkg/metrics/sink/metrics.go b/backend/pkg/metrics/sink/metrics.go new file mode 100644 index 000000000..52cb73ba1 --- /dev/null +++ b/backend/pkg/metrics/sink/metrics.go @@ -0,0 +1,185 @@ +package sink + +import ( + "github.com/prometheus/client_golang/prometheus" + "openreplay/backend/pkg/metrics/common" +) + +var sinkMessageSize = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: "sink", + Name: "message_size_bytes", + Help: "A histogram displaying the size of each message in bytes.", + Buckets: common.DefaultSizeBuckets, + }, +) + +func RecordMessageSize(size float64) { + sinkMessageSize.Observe(size) +} + +var sinkWrittenMessages = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "sink", + Name: "messages_written", + Help: "A counter displaying the total number of all written messages.", + }, +) + +func IncreaseWrittenMessages() { + sinkWrittenMessages.Inc() +} + +var sinkTotalMessages = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "sink", + Name: "messages_total", + Help: "A counter displaying the total number of all processed messages.", + }, +) + +func IncreaseTotalMessages() { + sinkTotalMessages.Inc() +} + +var sinkBatchSize = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: "sink", + Name: "batch_size_bytes", + Help: "A histogram displaying the size of each batch in bytes.", + Buckets: common.DefaultSizeBuckets, + }, +) + +func RecordBatchSize(size float64) { + sinkBatchSize.Observe(size) +} + +var sinkTotalBatches = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "sink", + Name: "batches_total", + Help: "A counter displaying the total number of all written batches.", + }, +) + +func IncreaseTotalBatches() { + sinkTotalBatches.Inc() +} + +var sinkWrittenBytes = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "sink", + Name: "written_bytes", + Help: "A histogram displaying the size of buffer in bytes written to session file.", + Buckets: common.DefaultSizeBuckets, + }, + []string{"file_type"}, +) + +func RecordWrittenBytes(size float64, fileType string) { + if size == 0 { + return + } + sinkWrittenBytes.WithLabelValues(fileType).Observe(size) + IncreaseTotalWrittenBytes(size, fileType) +} + +var sinkTotalWrittenBytes = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "sink", + Name: "written_bytes_total", + Help: "A counter displaying the total number of bytes written to all session files.", + }, + []string{"file_type"}, +) + +func IncreaseTotalWrittenBytes(size float64, fileType string) { + if size == 0 { + return + } + sinkTotalWrittenBytes.WithLabelValues(fileType).Add(size) +} + +var sinkCachedAssets = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: "sink", + Name: "assets_cached", + Help: "A gauge displaying the current number of cached assets.", + }, +) + +func IncreaseCachedAssets() { + sinkCachedAssets.Inc() +} + +func DecreaseCachedAssets() { + sinkCachedAssets.Dec() +} + +var sinkSkippedAssets = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "sink", + Name: "assets_skipped", + Help: "A counter displaying the total number of all skipped assets.", + }, +) + +func IncreaseSkippedAssets() { + sinkSkippedAssets.Inc() +} + +var sinkTotalAssets = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "sink", + Name: "assets_total", + Help: "A counter displaying the total number of all processed assets.", + }, +) + +func IncreaseTotalAssets() { + sinkTotalAssets.Inc() +} + +var sinkAssetSize = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: "sink", + Name: "asset_size_bytes", + Help: "A histogram displaying the size of each asset in bytes.", + Buckets: common.DefaultSizeBuckets, + }, +) + +func RecordAssetSize(size float64) { + sinkAssetSize.Observe(size) +} + +var sinkProcessAssetDuration = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: "sink", + Name: "asset_process_duration_seconds", + Help: "A histogram displaying the duration of processing for each asset in seconds.", + Buckets: common.DefaultDurationBuckets, + }, +) + +func RecordProcessAssetDuration(durMillis float64) { + sinkProcessAssetDuration.Observe(durMillis / 1000.0) +} + +func List() []prometheus.Collector { + return []prometheus.Collector{ + sinkMessageSize, + sinkWrittenMessages, + sinkTotalMessages, + sinkBatchSize, + sinkTotalBatches, + sinkWrittenBytes, + sinkTotalWrittenBytes, + sinkCachedAssets, + sinkSkippedAssets, + sinkTotalAssets, + sinkAssetSize, + sinkProcessAssetDuration, + } +} diff --git a/backend/pkg/metrics/storage/metrics.go b/backend/pkg/metrics/storage/metrics.go new file mode 100644 index 000000000..26459c90d --- /dev/null +++ b/backend/pkg/metrics/storage/metrics.go @@ -0,0 +1,114 @@ +package storage + +import ( + "github.com/prometheus/client_golang/prometheus" + "openreplay/backend/pkg/metrics/common" +) + +var storageSessionSize = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "storage", + Name: "session_size_bytes", + Help: "A histogram displaying the size of each session file in bytes prior to any manipulation.", + Buckets: common.DefaultSizeBuckets, + }, + []string{"file_type"}, +) + +func RecordSessionSize(fileSize float64, fileType string) { + storageSessionSize.WithLabelValues(fileType).Observe(fileSize) +} + +var storageTotalSessions = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "storage", + Name: "sessions_total", + Help: "A counter displaying the total number of all processed sessions.", + }, +) + +func IncreaseStorageTotalSessions() { + storageTotalSessions.Inc() +} + +var storageSessionReadDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "storage", + Name: "read_duration_seconds", + Help: "A histogram displaying the duration of reading for each session in seconds.", + Buckets: common.DefaultDurationBuckets, + }, + []string{"file_type"}, +) + +func RecordSessionReadDuration(durMillis float64, fileType string) { + storageSessionReadDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0) +} + +var storageSessionSortDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "storage", + Name: "sort_duration_seconds", + Help: "A histogram displaying the duration of sorting for each session in seconds.", + Buckets: common.DefaultDurationBuckets, + }, + []string{"file_type"}, +) + +func RecordSessionSortDuration(durMillis float64, fileType string) { + storageSessionSortDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0) +} + +var storageSessionEncodeDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "storage", + Name: "encode_duration_seconds", + Help: "A histogram displaying the duration of encoding for each session in seconds.", + Buckets: common.DefaultDurationBuckets, + }, + []string{"file_type"}, +) + +func RecordSessionEncodeDuration(durMillis float64, fileType string) { + storageSessionEncodeDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0) +} + +var storageSessionCompressDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "storage", + Name: "compress_duration_seconds", + Help: "A histogram displaying the duration of compressing for each session in seconds.", + Buckets: common.DefaultDurationBuckets, + }, + []string{"file_type"}, +) + +func RecordSessionCompressDuration(durMillis float64, fileType string) { + storageSessionCompressDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0) +} + +var storageSessionUploadDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "storage", + Name: "upload_duration_seconds", + Help: "A histogram displaying the duration of uploading to s3 for each session in seconds.", + Buckets: common.DefaultDurationBuckets, + }, + []string{"file_type"}, +) + +func RecordSessionUploadDuration(durMillis float64, fileType string) { + storageSessionUploadDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0) +} + +func List() []prometheus.Collector { + return []prometheus.Collector{ + storageSessionSize, + storageTotalSessions, + storageSessionReadDuration, + storageSessionSortDuration, + storageSessionEncodeDuration, + storageSessionCompressDuration, + storageSessionUploadDuration, + } +} diff --git a/ee/backend/pkg/db/clickhouse/bulk.go b/ee/backend/pkg/db/clickhouse/bulk.go index 706b66f68..6eb8d98fd 100644 --- a/ee/backend/pkg/db/clickhouse/bulk.go +++ b/ee/backend/pkg/db/clickhouse/bulk.go @@ -5,6 +5,8 @@ import ( "errors" "fmt" "log" + "openreplay/backend/pkg/metrics/database" + "time" "github.com/ClickHouse/clickhouse-go/v2/lib/driver" ) @@ -16,19 +18,23 @@ type Bulk interface { type bulkImpl struct { conn driver.Conn + table string query string values [][]interface{} } -func NewBulk(conn driver.Conn, query string) (Bulk, error) { +func NewBulk(conn driver.Conn, table, query string) (Bulk, error) { switch { case conn == nil: return nil, errors.New("clickhouse connection is empty") + case table == "": + return nil, errors.New("table is empty") case query == "": return nil, errors.New("query is empty") } return &bulkImpl{ conn: conn, + table: table, query: query, values: make([][]interface{}, 0), }, nil @@ -40,6 +46,7 @@ func (b *bulkImpl) Append(args ...interface{}) error { } func (b *bulkImpl) Send() error { + start := time.Now() batch, err := b.conn.PrepareBatch(context.Background(), b.query) if err != nil { return fmt.Errorf("can't create new batch: %s", err) @@ -50,6 +57,11 @@ func (b *bulkImpl) Send() error { log.Printf("failed query: %s", b.query) } } + err = batch.Send() + // Save bulk metrics + database.RecordBulkElements(float64(len(b.values)), "ch", b.table) + database.RecordBulkInsertDuration(float64(time.Now().Sub(start).Milliseconds()), "ch", b.table) + // Prepare values slice for a new data b.values = make([][]interface{}, 0) - return batch.Send() + return err } diff --git a/ee/backend/pkg/db/clickhouse/connector.go b/ee/backend/pkg/db/clickhouse/connector.go index 157d384b9..b872adcc2 100644 --- a/ee/backend/pkg/db/clickhouse/connector.go +++ b/ee/backend/pkg/db/clickhouse/connector.go @@ -3,18 +3,16 @@ package clickhouse import ( "errors" "fmt" + "github.com/ClickHouse/clickhouse-go/v2" + "github.com/ClickHouse/clickhouse-go/v2/lib/driver" "log" "openreplay/backend/pkg/db/types" "openreplay/backend/pkg/hashid" "openreplay/backend/pkg/messages" "openreplay/backend/pkg/url" - "os" "strings" "time" - "github.com/ClickHouse/clickhouse-go/v2" - "github.com/ClickHouse/clickhouse-go/v2/lib/driver" - "openreplay/backend/pkg/license" ) @@ -52,28 +50,14 @@ type connectorImpl struct { finished chan struct{} } -// Check env variables. If not present, return default value. -func getEnv(key, fallback string) string { - if value, ok := os.LookupEnv(key); ok { - return value - } - return fallback -} - func NewConnector(url string) Connector { license.CheckLicense() - // Check username, password, database - userName := getEnv("CH_USERNAME", "default") - password := getEnv("CH_PASSWORD", "") - database := getEnv("CH_DATABASE", "default") url = strings.TrimPrefix(url, "tcp://") - url = strings.TrimSuffix(url, "/"+database) + url = strings.TrimSuffix(url, "/default") conn, err := clickhouse.Open(&clickhouse.Options{ Addr: []string{url}, Auth: clickhouse.Auth{ - Database: database, - Username: userName, - Password: password, + Database: "default", }, MaxOpenConns: 20, MaxIdleConns: 15, @@ -99,7 +83,7 @@ func NewConnector(url string) Connector { } func (c *connectorImpl) newBatch(name, query string) error { - batch, err := NewBulk(c.conn, query) + batch, err := NewBulk(c.conn, name, query) if err != nil { return fmt.Errorf("can't create new batch: %s", err) } From 5e846245e6262b01d1b4cdb71ac4f08898783856 Mon Sep 17 00:00:00 2001 From: Alexander Zavorotynskiy Date: Mon, 20 Feb 2023 16:45:00 +0100 Subject: [PATCH 072/151] fix(backend): removed wrong line from batchSet --- backend/pkg/db/postgres/batches.go | 1 - 1 file changed, 1 deletion(-) diff --git a/backend/pkg/db/postgres/batches.go b/backend/pkg/db/postgres/batches.go index abdee36f2..8b9f2484d 100644 --- a/backend/pkg/db/postgres/batches.go +++ b/backend/pkg/db/postgres/batches.go @@ -186,7 +186,6 @@ func (conn *BatchSet) sendBatches(t *batchesTask) { } br.Close() // returns err database.RecordBatchInsertDuration(float64(time.Now().Sub(start).Milliseconds())) - database.IncreaseTotalBatches() } } From 596ddb284da241d326fa5e7176f0b27ea5665573 Mon Sep 17 00:00:00 2001 From: Alexander Zavorotynskiy Date: Mon, 20 Feb 2023 16:56:00 +0100 Subject: [PATCH 073/151] fix(backend): upgrade /x/net library to avoid vulnerabilities --- backend/go.mod | 8 ++++---- backend/go.sum | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/backend/go.mod b/backend/go.mod index 0615fb0cb..e11b839fa 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -20,14 +20,14 @@ require ( github.com/klauspost/pgzip v1.2.5 github.com/oschwald/maxminddb-golang v1.7.0 github.com/pkg/errors v0.9.1 + github.com/prometheus/client_golang v1.12.1 github.com/sethvargo/go-envconfig v0.7.0 github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce github.com/ua-parser/uap-go v0.0.0-20200325213135-e1c09f13e2fe - go.opentelemetry.io/otel v1.7.0 go.opentelemetry.io/otel/exporters/prometheus v0.30.0 go.opentelemetry.io/otel/metric v0.30.0 go.opentelemetry.io/otel/sdk/metric v0.30.0 - golang.org/x/net v0.0.0-20220906165146-f3363e06e74c + golang.org/x/net v0.1.1-0.20221104162952-702349b0e862 google.golang.org/api v0.81.0 ) @@ -55,19 +55,19 @@ require ( github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect github.com/paulmach/orb v0.7.1 // indirect github.com/pierrec/lz4/v4 v4.1.15 // indirect - github.com/prometheus/client_golang v1.12.1 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.32.1 // indirect github.com/prometheus/procfs v0.7.3 // indirect github.com/shopspring/decimal v1.3.1 // indirect github.com/stretchr/testify v1.8.0 // indirect go.opencensus.io v0.23.0 // indirect + go.opentelemetry.io/otel v1.7.0 // indirect go.opentelemetry.io/otel/sdk v1.7.0 // indirect go.opentelemetry.io/otel/trace v1.7.0 // indirect golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 // indirect golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 // indirect golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 // indirect - golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 // indirect + golang.org/x/sys v0.1.0 // indirect golang.org/x/text v0.4.0 // indirect golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect google.golang.org/appengine v1.6.7 // indirect diff --git a/backend/go.sum b/backend/go.sum index 5aa3ae3de..7b33d881d 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -601,8 +601,8 @@ golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220906165146-f3363e06e74c h1:yKufUcDwucU5urd+50/Opbt4AYpqthk7wHpHok8f1lo= -golang.org/x/net v0.0.0-20220906165146-f3363e06e74c/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.1.1-0.20221104162952-702349b0e862 h1:KrLJ+iz8J6j6VVr/OCfULAcK+xozUmWE43fKpMR4MlI= +golang.org/x/net v0.1.1-0.20221104162952-702349b0e862/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -715,8 +715,8 @@ golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220429233432-b5fbb4746d32/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 h1:WIoqL4EROvwiPdUtaip4VcDdpZ4kha7wBWZrbVKCIZg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= From dc9f3f79eeba1d125511aa0606beda8ec90ae50a Mon Sep 17 00:00:00 2001 From: Alexander Zavorotynskiy Date: Mon, 20 Feb 2023 16:59:57 +0100 Subject: [PATCH 074/151] feat(backend): clean up go modules --- backend/go.mod | 6 -- backend/go.sum | 13 --- backend/pkg/monitoring/metrics.go | 138 ------------------------------ 3 files changed, 157 deletions(-) delete mode 100644 backend/pkg/monitoring/metrics.go diff --git a/backend/go.mod b/backend/go.mod index e11b839fa..161513ed8 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -24,9 +24,6 @@ require ( github.com/sethvargo/go-envconfig v0.7.0 github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce github.com/ua-parser/uap-go v0.0.0-20200325213135-e1c09f13e2fe - go.opentelemetry.io/otel/exporters/prometheus v0.30.0 - go.opentelemetry.io/otel/metric v0.30.0 - go.opentelemetry.io/otel/sdk/metric v0.30.0 golang.org/x/net v0.1.1-0.20221104162952-702349b0e862 google.golang.org/api v0.81.0 ) @@ -38,8 +35,6 @@ require ( cloud.google.com/go/storage v1.14.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect - github.com/go-logr/logr v1.2.3 // indirect - github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/google/go-cmp v0.5.8 // indirect @@ -62,7 +57,6 @@ require ( github.com/stretchr/testify v1.8.0 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.7.0 // indirect - go.opentelemetry.io/otel/sdk v1.7.0 // indirect go.opentelemetry.io/otel/trace v1.7.0 // indirect golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 // indirect golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 // indirect diff --git a/backend/go.sum b/backend/go.sum index 7b33d881d..de6d507d3 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -80,8 +80,6 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/aws/aws-sdk-go v1.44.98 h1:fX+NxebSdO/9T6DTNOLhpC+Vv6RNkKRfsMg0a7o/yBo= github.com/aws/aws-sdk-go v1.44.98/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= -github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -156,9 +154,7 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= @@ -489,14 +485,6 @@ go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/otel v1.7.0 h1:Z2lA3Tdch0iDcrhJXDIlC94XE+bxok1F9B+4Lz/lGsM= go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk= -go.opentelemetry.io/otel/exporters/prometheus v0.30.0 h1:YXo5ZY5nofaEYMCMTTMaRH2cLDZB8+0UGuk5RwMfIo0= -go.opentelemetry.io/otel/exporters/prometheus v0.30.0/go.mod h1:qN5feW+0/d661KDtJuATEmHtw5bKBK7NSvNEP927zSs= -go.opentelemetry.io/otel/metric v0.30.0 h1:Hs8eQZ8aQgs0U49diZoaS6Uaxw3+bBE3lcMUKBFIk3c= -go.opentelemetry.io/otel/metric v0.30.0/go.mod h1:/ShZ7+TS4dHzDFmfi1kSXMhMVubNoP0oIaBp70J6UXU= -go.opentelemetry.io/otel/sdk v1.7.0 h1:4OmStpcKVOfvDOgCt7UriAPtKolwIhxpnSNI/yK+1B0= -go.opentelemetry.io/otel/sdk v1.7.0/go.mod h1:uTEOTwaqIVuTGiJN7ii13Ibp75wJmYUDe374q6cZwUU= -go.opentelemetry.io/otel/sdk/metric v0.30.0 h1:XTqQ4y3erR2Oj8xSAOL5ovO5011ch2ELg51z4fVkpME= -go.opentelemetry.io/otel/sdk/metric v0.30.0/go.mod h1:8AKFRi5HyvTR0RRty3paN1aMC9HMT+NzcEhw/BLkLX8= go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJqC6o= go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= @@ -690,7 +678,6 @@ golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/backend/pkg/monitoring/metrics.go b/backend/pkg/monitoring/metrics.go deleted file mode 100644 index 803fba127..000000000 --- a/backend/pkg/monitoring/metrics.go +++ /dev/null @@ -1,138 +0,0 @@ -package monitoring - -import ( - "fmt" - "log" - "net/http" - - "go.opentelemetry.io/otel/exporters/prometheus" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/global" - "go.opentelemetry.io/otel/metric/instrument/syncfloat64" - "go.opentelemetry.io/otel/sdk/metric/aggregator/histogram" - controller "go.opentelemetry.io/otel/sdk/metric/controller/basic" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - processor "go.opentelemetry.io/otel/sdk/metric/processor/basic" - selector "go.opentelemetry.io/otel/sdk/metric/selector/simple" -) - -// Metrics stores all collected metrics -type Metrics struct { - meter metric.Meter - counters map[string]syncfloat64.Counter - upDownCounters map[string]syncfloat64.UpDownCounter - histograms map[string]syncfloat64.Histogram -} - -func New(name string) *Metrics { - m := &Metrics{ - counters: make(map[string]syncfloat64.Counter), - upDownCounters: make(map[string]syncfloat64.UpDownCounter), - histograms: make(map[string]syncfloat64.Histogram), - } - m.initPrometheusDataExporter() - m.initMetrics(name) - return m -} - -// initPrometheusDataExporter allows to use collected metrics in prometheus -func (m *Metrics) initPrometheusDataExporter() { - config := prometheus.Config{ - DefaultHistogramBoundaries: []float64{1, 2, 5, 10, 20, 50, 100, 250, 500, 1000}, - } - c := controller.New( - processor.NewFactory( - selector.NewWithHistogramDistribution( - histogram.WithExplicitBoundaries(config.DefaultHistogramBoundaries), - ), - aggregation.CumulativeTemporalitySelector(), - processor.WithMemory(true), - ), - ) - exporter, err := prometheus.New(config, c) - if err != nil { - log.Panicf("failed to initialize prometheus exporter %v", err) - } - - global.SetMeterProvider(exporter.MeterProvider()) - - http.HandleFunc("/metrics", exporter.ServeHTTP) - go func() { - _ = http.ListenAndServe(":8888", nil) - }() - - fmt.Println("Prometheus server running on :8888") -} - -func (m *Metrics) initMetrics(name string) { - m.meter = global.Meter(name) -} - -/* -Counter is a synchronous instrument that measures additive non-decreasing values, for example, the number of: -- processed requests -- received bytes -- disk reads -*/ - -func (m *Metrics) RegisterCounter(name string) (syncfloat64.Counter, error) { - if counter, ok := m.counters[name]; ok { - return counter, nil - } - counter, err := m.meter.SyncFloat64().Counter(name) - if err != nil { - return nil, fmt.Errorf("failed to initialize counter: %v", err) - } - m.counters[name] = counter - return counter, nil -} - -func (m *Metrics) GetCounter(name string) syncfloat64.Counter { - return m.counters[name] -} - -/* -UpDownCounter is a synchronous instrument which measures additive values that increase or decrease with time, -for example, the number of: -- active requests -- open connections -- memory in use (megabytes) -*/ - -func (m *Metrics) RegisterUpDownCounter(name string) (syncfloat64.UpDownCounter, error) { - if counter, ok := m.upDownCounters[name]; ok { - return counter, nil - } - counter, err := m.meter.SyncFloat64().UpDownCounter(name) - if err != nil { - return nil, fmt.Errorf("failed to initialize upDownCounter: %v", err) - } - m.upDownCounters[name] = counter - return counter, nil -} - -func (m *Metrics) GetUpDownCounter(name string) syncfloat64.UpDownCounter { - return m.upDownCounters[name] -} - -/* -Histogram is a synchronous instrument that produces a histogram from recorded values, for example: -- request latency -- request size -*/ - -func (m *Metrics) RegisterHistogram(name string) (syncfloat64.Histogram, error) { - if hist, ok := m.histograms[name]; ok { - return hist, nil - } - hist, err := m.meter.SyncFloat64().Histogram(name) - if err != nil { - return nil, fmt.Errorf("failed to initialize histogram: %v", err) - } - m.histograms[name] = hist - return hist, nil -} - -func (m *Metrics) GetHistogram(name string) syncfloat64.Histogram { - return m.histograms[name] -} From b93c2ed269172fb72adc886e973037ec3d82de12 Mon Sep 17 00:00:00 2001 From: nick-delirium Date: Mon, 20 Feb 2023 17:24:09 +0100 Subject: [PATCH 075/151] fix(ui): change clickmap fetch filter --- frontend/app/services/MetricService.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/app/services/MetricService.ts b/frontend/app/services/MetricService.ts index d8c6e099a..5b97ec4ec 100644 --- a/frontend/app/services/MetricService.ts +++ b/frontend/app/services/MetricService.ts @@ -75,7 +75,7 @@ export default class MetricService { getMetricChartData(metric: Widget, data: any, isWidget: boolean = false): Promise { if ( metric.metricType === CLICKMAP - && document.location.pathname.split('/').pop() !== 'metrics' + && document.location.pathname.split('/').pop() === 'metrics' && (document.location.pathname.indexOf('dashboard') !== -1 && document.location.pathname.indexOf('metric') === -1) ) { return Promise.resolve({}) From 3f6156dbf7e6586a16884a532d2ab1f51fa0659f Mon Sep 17 00:00:00 2001 From: Shekar Siri Date: Mon, 20 Feb 2023 18:02:14 +0100 Subject: [PATCH 076/151] fix(ui) - search url unhandled filter key --- .../Filters/FilterSource/FilterSource.tsx | 2 +- frontend/app/types/filter/newFilter.js | 1 + frontend/app/utils/search.ts | 17 ++++++++++++----- 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/frontend/app/components/shared/Filters/FilterSource/FilterSource.tsx b/frontend/app/components/shared/Filters/FilterSource/FilterSource.tsx index 08c93d8df..7ae8d3a92 100644 --- a/frontend/app/components/shared/Filters/FilterSource/FilterSource.tsx +++ b/frontend/app/components/shared/Filters/FilterSource/FilterSource.tsx @@ -9,7 +9,7 @@ interface Props { } function FilterSource(props: Props) { const { filter } = props; - const [value, setValue] = useState(filter.source[0] || ''); + const [value, setValue] = useState(filter.source && filter.source[0] ? filter.source[0] : ''); useEffect(() => { setValue(filter.source[0] || ''); diff --git a/frontend/app/types/filter/newFilter.js b/frontend/app/types/filter/newFilter.js index 7a612c193..286f1cc13 100644 --- a/frontend/app/types/filter/newFilter.js +++ b/frontend/app/types/filter/newFilter.js @@ -195,6 +195,7 @@ export default Record({ _filter = filtersMap[type]; } } + return { ..._filter, ...filter, diff --git a/frontend/app/utils/search.ts b/frontend/app/utils/search.ts index 017a5a7f6..d688ee369 100644 --- a/frontend/app/utils/search.ts +++ b/frontend/app/utils/search.ts @@ -58,9 +58,6 @@ const getFiltersFromEntries = (entires: any) => { let filter: any = {}; const filterKey = getFilterKeyTypeByKey(item.key); - if (!filterKey) { - return; - } const tmp = item.value.split('^'); const valueArr = tmp[0].split('|'); const operator = valueArr.shift(); @@ -78,10 +75,20 @@ const getFiltersFromEntries = (entires: any) => { } } + if (!filter) { + return + } + filter.value = valueArr; filter.operator = operator; - filter.source = sourceArr && sourceArr.length > 0 ? sourceArr : null; - filter.sourceOperator = !!sourceOperator ? decodeURI(sourceOperator) : null; + if (filter.icon === "filters/metadata") { + filter.source = filter.type; + filter.type = 'metadata'; + } else { + filter.source = sourceArr && sourceArr.length > 0 ? sourceArr : null; + filter.sourceOperator = !!sourceOperator ? decodeURI(sourceOperator) : null; + } + if (!filter.filters || filter.filters.size === 0) { filters.push(filter); } From 12c81188edbc1e768019dc5efdf6a2e3c0267047 Mon Sep 17 00:00:00 2001 From: Shekar Siri Date: Mon, 20 Feb 2023 18:26:40 +0100 Subject: [PATCH 077/151] fix(ui) - search url unhandled filter key --- .../app/components/shared/Filters/FilterSource/FilterSource.tsx | 2 +- frontend/app/utils/search.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/frontend/app/components/shared/Filters/FilterSource/FilterSource.tsx b/frontend/app/components/shared/Filters/FilterSource/FilterSource.tsx index 7ae8d3a92..07ca61ec3 100644 --- a/frontend/app/components/shared/Filters/FilterSource/FilterSource.tsx +++ b/frontend/app/components/shared/Filters/FilterSource/FilterSource.tsx @@ -12,7 +12,7 @@ function FilterSource(props: Props) { const [value, setValue] = useState(filter.source && filter.source[0] ? filter.source[0] : ''); useEffect(() => { - setValue(filter.source[0] || ''); + setValue(filter.source && filter.source[0] ? filter.source[0] : ''); }, [filter]); const write = ({ target: { value, name } }: any) => setValue(value); diff --git a/frontend/app/utils/search.ts b/frontend/app/utils/search.ts index d688ee369..4b32f8d13 100644 --- a/frontend/app/utils/search.ts +++ b/frontend/app/utils/search.ts @@ -13,7 +13,7 @@ export const createUrlQuery = (filter: any) => { let str = `${f.operator}|${f.value.join('|')}`; if (f.hasSource) { - str = `${str}^${f.sourceOperator}|${f.source.join('|')}`; + str = `${str}^${f.sourceOperator ? f.sourceOperator : ''}|${f.source ? f.source.join('|') : ''}`; } let key: any = setQueryParamKeyFromFilterkey(f.key); From 47f371bda8a946c06f23c168cec5be7539cb5c92 Mon Sep 17 00:00:00 2001 From: Shekar Siri Date: Mon, 20 Feb 2023 18:27:45 +0100 Subject: [PATCH 078/151] fix(ui) - modal scroll issue --- frontend/app/components/ui/Modal/Modal.tsx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/frontend/app/components/ui/Modal/Modal.tsx b/frontend/app/components/ui/Modal/Modal.tsx index 89ba9f5d9..c489aa216 100644 --- a/frontend/app/components/ui/Modal/Modal.tsx +++ b/frontend/app/components/ui/Modal/Modal.tsx @@ -13,7 +13,8 @@ function Modal(props: Props) { useEffect(() => { if (open) { document.body.style.overflow = 'hidden'; - } else { + } + return () => { document.body.style.overflow = 'auto'; } }, [open]); From b563839f291b86d98ef20b05aa4773fbe9bd7d80 Mon Sep 17 00:00:00 2001 From: Shekar Siri Date: Mon, 20 Feb 2023 18:37:39 +0100 Subject: [PATCH 079/151] fix(ui) - card sessions pagination reset --- .../Dashboard/components/WidgetSessions/WidgetSessions.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/app/components/Dashboard/components/WidgetSessions/WidgetSessions.tsx b/frontend/app/components/Dashboard/components/WidgetSessions/WidgetSessions.tsx index 4052e7a7e..f563d688e 100644 --- a/frontend/app/components/Dashboard/components/WidgetSessions/WidgetSessions.tsx +++ b/frontend/app/components/Dashboard/components/WidgetSessions/WidgetSessions.tsx @@ -94,7 +94,7 @@ function WidgetSessions(props: Props) { useEffect(() => { metricStore.updateKey('sessionsPage', 1); loadData(); - }, [filter.startTimestamp, filter.endTimestamp, filter.filters, depsString, metricStore.clickMapSearch]); + }, [filter.startTimestamp, filter.endTimestamp, filter.filters, depsString, metricStore.clickMapSearch, activeSeries]); useEffect(loadData, [metricStore.sessionsPage]); return ( From 2a2abc6952c5ebf5e5b323980a70a594f8b9e7bc Mon Sep 17 00:00:00 2001 From: Shekar Siri Date: Mon, 20 Feb 2023 18:46:52 +0100 Subject: [PATCH 080/151] fix(ui) - filters z-index that causing depth issue --- .../Filters/FilterValueDropdown/FilterValueDropdown.module.css | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/app/components/shared/Filters/FilterValueDropdown/FilterValueDropdown.module.css b/frontend/app/components/shared/Filters/FilterValueDropdown/FilterValueDropdown.module.css index b0ca01016..6e34010b3 100644 --- a/frontend/app/components/shared/Filters/FilterValueDropdown/FilterValueDropdown.module.css +++ b/frontend/app/components/shared/Filters/FilterValueDropdown/FilterValueDropdown.module.css @@ -6,7 +6,7 @@ align-items: center; height: 26px; width: 100%; - z-index: 3; + /* z-index: 3; TODO this has to be fixed in clickmaps @Nikita */ & .right { height: 24px; From cb517e447e91af7114737fb48910f6bfe63e6d55 Mon Sep 17 00:00:00 2001 From: nick-delirium Date: Tue, 21 Feb 2023 10:33:11 +0100 Subject: [PATCH 081/151] fix(player): dont load devtools for clickmaps, fix scrolling overflow --- frontend/app/player/web/MessageManager.ts | 5 +++-- frontend/app/player/web/Screen/Screen.ts | 7 +++---- frontend/app/player/web/WebPlayer.ts | 2 +- frontend/app/player/web/addons/TargetMarker.ts | 2 -- 4 files changed, 7 insertions(+), 9 deletions(-) diff --git a/frontend/app/player/web/MessageManager.ts b/frontend/app/player/web/MessageManager.ts index d0ae18020..68ef0cbf8 100644 --- a/frontend/app/player/web/MessageManager.ts +++ b/frontend/app/player/web/MessageManager.ts @@ -193,9 +193,9 @@ export default class MessageManager { // this.state.update({ filesLoaded: true }) } - async loadMessages() { + async loadMessages(isClickmap: boolean = false) { this.setMessagesLoading(true) - // TODO: reuseable decryptor instance + // TODO: reusable decryptor instance const createNewParser = (shouldDecrypt = true) => { const decrypt = shouldDecrypt && this.session.fileKey ? (b: Uint8Array) => decryptSessionBytes(b, this.session.fileKey) @@ -233,6 +233,7 @@ export default class MessageManager { .finally(this.onFileReadFinally); // load devtools (TODO: start after the first DOM file download) + if (isClickmap) return; this.state.update({ devtoolsLoading: true }) loadFiles(this.session.devtoolsURL, createNewParser()) // EFS fallback diff --git a/frontend/app/player/web/Screen/Screen.ts b/frontend/app/player/web/Screen/Screen.ts index b095385b1..cca56d402 100644 --- a/frontend/app/player/web/Screen/Screen.ts +++ b/frontend/app/player/web/Screen/Screen.ts @@ -213,11 +213,12 @@ export default class Screen { case ScaleMode.Embed: this.scaleRatio = Math.min(offsetWidth / width, offsetHeight / height) translate = "translate(-50%, -50%)" + posStyles = { height: height + 'px' } break; case ScaleMode.AdjustParentHeight: this.scaleRatio = offsetWidth / width translate = "translate(-50%, 0)" - posStyles = { top: 0 } + posStyles = { top: 0, height: this.document!.documentElement.getBoundingClientRect().height + 'px', } break; } @@ -232,13 +233,11 @@ export default class Screen { } Object.assign(this.screen.style, posStyles, { - height: height + 'px', width: width + 'px', transform: `scale(${this.scaleRatio}) ${translate}`, }) - Object.assign(this.iframe.style, { + Object.assign(this.iframe.style, posStyles, { width: width + 'px', - height: height + 'px', }) this.boundingRect = this.overlay.getBoundingClientRect(); diff --git a/frontend/app/player/web/WebPlayer.ts b/frontend/app/player/web/WebPlayer.ts index c4da835ff..d94d10beb 100644 --- a/frontend/app/player/web/WebPlayer.ts +++ b/frontend/app/player/web/WebPlayer.ts @@ -46,7 +46,7 @@ export default class WebPlayer extends Player { this.screen = screen this.messageManager = messageManager if (!live) { // hack. TODO: split OfflinePlayer class - messageManager.loadMessages() + void messageManager.loadMessages(isClickMap) } this.targetMarker = new TargetMarker(this.screen, wpState) diff --git a/frontend/app/player/web/addons/TargetMarker.ts b/frontend/app/player/web/addons/TargetMarker.ts index c9315f01b..6629ceaec 100644 --- a/frontend/app/player/web/addons/TargetMarker.ts +++ b/frontend/app/player/web/addons/TargetMarker.ts @@ -161,7 +161,6 @@ export default class TargetMarker { const scaleRatio = this.screen.getScale() Object.assign(overlay.style, clickmapStyles.overlayStyle({ height: iframeSize.height, width: iframeSize.width, scale: scaleRatio })) - console.log(selections) this.clickMapOverlay = overlay selections.forEach((s, i) => { const el = this.screen.getElementBySelector(s.selector); @@ -189,7 +188,6 @@ export default class TargetMarker { const border = document.createElement("div") - let key = 0 if (width > 50) { From e8d41bbcd3630b00b0d1c6c4247c146771f0d2ad Mon Sep 17 00:00:00 2001 From: rjshrjndrn Date: Tue, 21 Feb 2023 10:30:54 +0000 Subject: [PATCH 082/151] Updating parallel script --- scripts/helmcharts/build_deploy_parallel.sh | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/scripts/helmcharts/build_deploy_parallel.sh b/scripts/helmcharts/build_deploy_parallel.sh index 38c1633bb..268811a34 100644 --- a/scripts/helmcharts/build_deploy_parallel.sh +++ b/scripts/helmcharts/build_deploy_parallel.sh @@ -8,6 +8,12 @@ set -e # Removing local alpine:latest image docker rmi alpine || true +# Signing image +# cosign sign --key awskms:///alias/openreplay-container-sign image_url:tag +export SIGN_IMAGE=1 +export PUSH_IMAGE=1 +export AWS_DEFAULT_REGION="eu-central-1" +export SIGN_KEY="awskms:///alias/openreplay-container-sign" echo $DOCKER_REPO [[ -z DOCKER_REPO ]] && { echo Set DOCKER_REPO="your docker registry" @@ -22,9 +28,9 @@ echo $DOCKER_REPO tmux split-window "cd ../../frontend && IMAGE_TAG=$IMAGE_TAG DOCKER_REPO=$DOCKER_REPO PUSH_IMAGE=1 bash build.sh $@" tmux select-layout tiled tmux split-window "cd ../../sourcemap-reader && IMAGE_TAG=$IMAGE_TAG DOCKER_REPO=$DOCKER_REPO PUSH_IMAGE=1 bash build.sh $@" - tmux split-window "cd ../../api && IMAGE_TAG=$IMAGE_TAG DOCKER_REPO=$DOCKER_REPO PUSH_IMAGE=1 bash build.sh $@" - tmux split-window "cd ../../api && IMAGE_TAG=$IMAGE_TAG DOCKER_REPO=$DOCKER_REPO PUSH_IMAGE=1 bash build_alerts.sh $@" - tmux split-window "cd ../../api && IMAGE_TAG=$IMAGE_TAG DOCKER_REPO=$DOCKER_REPO PUSH_IMAGE=1 bash build_crons.sh $@" + tmux split-window "cd ../../api && IMAGE_TAG=$IMAGE_TAG DOCKER_REPO=$DOCKER_REPO PUSH_IMAGE=1 bash build.sh $@ + && IMAGE_TAG=$IMAGE_TAG DOCKER_REPO=$DOCKER_REPO PUSH_IMAGE=1 bash build_alerts.sh $@ + && IMAGE_TAG=$IMAGE_TAG DOCKER_REPO=$DOCKER_REPO PUSH_IMAGE=1 bash build_crons.sh $@" tmux select-layout tiled } From 9c6cb3c23d88f3ba6f736fa47ed9a0c1a49203a9 Mon Sep 17 00:00:00 2001 From: rjshrjndrn Date: Tue, 21 Feb 2023 11:32:54 +0100 Subject: [PATCH 083/151] chore(build): ignoring ee folder for sourcemap-reader build Signed-off-by: rjshrjndrn --- sourcemap-reader/build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sourcemap-reader/build.sh b/sourcemap-reader/build.sh index 859347fd4..fbe8762e2 100644 --- a/sourcemap-reader/build.sh +++ b/sourcemap-reader/build.sh @@ -34,7 +34,7 @@ function build_api(){ tag="" # Copy enterprise code [[ $1 == "ee" ]] && { - cp -rf ../ee/sourcemap-reader/* ./ + cp -rf ../ee/sourcemap-reader/* ./ || true # We share same codebase for ee/foss envarg="default-ee" tag="ee-" } From bd10225364def4ab089c89b95d224ad6e28fd7a9 Mon Sep 17 00:00:00 2001 From: Shekar Siri Date: Tue, 21 Feb 2023 11:39:23 +0100 Subject: [PATCH 084/151] fix(ui) - widget sessions pagination --- .../WidgetSessions/WidgetSessions.tsx | 323 ++++++++++-------- 1 file changed, 173 insertions(+), 150 deletions(-) diff --git a/frontend/app/components/Dashboard/components/WidgetSessions/WidgetSessions.tsx b/frontend/app/components/Dashboard/components/WidgetSessions/WidgetSessions.tsx index f563d688e..9535ab976 100644 --- a/frontend/app/components/Dashboard/components/WidgetSessions/WidgetSessions.tsx +++ b/frontend/app/components/Dashboard/components/WidgetSessions/WidgetSessions.tsx @@ -10,168 +10,191 @@ import { debounce } from 'App/utils'; import useIsMounted from 'App/hooks/useIsMounted'; import AnimatedSVG, { ICONS } from 'Shared/AnimatedSVG/AnimatedSVG'; import { numberWithCommas } from 'App/utils'; -import { CLICKMAP } from "App/constants/card"; +import { CLICKMAP } from 'App/constants/card'; interface Props { - className?: string; + className?: string; } function WidgetSessions(props: Props) { - const { className = '' } = props; - const [activeSeries, setActiveSeries] = useState('all'); - const [data, setData] = useState([]); - const isMounted = useIsMounted(); - const [loading, setLoading] = useState(false); - const filteredSessions = getListSessionsBySeries(data, activeSeries); - const { dashboardStore, metricStore, sessionStore } = useStore(); - const filter = dashboardStore.drillDownFilter; - const widget = metricStore.instance; - const startTime = DateTime.fromMillis(filter.startTimestamp).toFormat('LLL dd, yyyy HH:mm'); - const endTime = DateTime.fromMillis(filter.endTimestamp).toFormat('LLL dd, yyyy HH:mm'); - const [seriesOptions, setSeriesOptions] = useState([{ label: 'All', value: 'all' }]); + const { className = '' } = props; + const [activeSeries, setActiveSeries] = useState('all'); + const [data, setData] = useState([]); + const isMounted = useIsMounted(); + const [loading, setLoading] = useState(false); + const filteredSessions = getListSessionsBySeries(data, activeSeries); + const { dashboardStore, metricStore, sessionStore } = useStore(); + const filter = dashboardStore.drillDownFilter; + const widget = metricStore.instance; + const startTime = DateTime.fromMillis(filter.startTimestamp).toFormat('LLL dd, yyyy HH:mm'); + const endTime = DateTime.fromMillis(filter.endTimestamp).toFormat('LLL dd, yyyy HH:mm'); + const [seriesOptions, setSeriesOptions] = useState([{ label: 'All', value: 'all' }]); - const writeOption = ({ value }: any) => setActiveSeries(value.value); - useEffect(() => { - if (!data) return; - const seriesOptions = data.map((item: any) => ({ - label: item.seriesName, - value: item.seriesId, - })); - setSeriesOptions([{ label: 'All', value: 'all' }, ...seriesOptions]); - }, [data]); + const writeOption = ({ value }: any) => setActiveSeries(value.value); + useEffect(() => { + if (!data) return; + const seriesOptions = data.map((item: any) => ({ + label: item.seriesName, + value: item.seriesId, + })); + setSeriesOptions([{ label: 'All', value: 'all' }, ...seriesOptions]); + }, [data]); - const fetchSessions = (metricId: any, filter: any) => { - if (!isMounted()) return; - setLoading(true); - delete filter.eventsOrderSupport; - widget - .fetchSessions(metricId, filter) - .then((res: any) => { - setData(res); - }) - .finally(() => { - setLoading(false); - }); - }; - const fetchClickmapSessions = (customFilters: Record) => { - sessionStore.getSessions(customFilters) - .then(data => { - setData([{ ...data, seriesId: 1 , seriesName: "Clicks" }]) - }) + const fetchSessions = (metricId: any, filter: any) => { + if (!isMounted()) return; + setLoading(true); + delete filter.eventsOrderSupport; + widget + .fetchSessions(metricId, filter) + .then((res: any) => { + setData(res); + }) + .finally(() => { + setLoading(false); + }); + }; + const fetchClickmapSessions = (customFilters: Record) => { + sessionStore.getSessions(customFilters).then((data) => { + setData([{ ...data, seriesId: 1, seriesName: 'Clicks' }]); + }); + }; + const debounceRequest: any = React.useCallback(debounce(fetchSessions, 1000), []); + const debounceClickMapSearch = React.useCallback(debounce(fetchClickmapSessions, 1000), []); + + const depsString = JSON.stringify(widget.series); + + const loadData = () => { + if (widget.metricType === CLICKMAP && metricStore.clickMapSearch) { + const clickFilter = { + value: [metricStore.clickMapSearch], + type: 'CLICK', + operator: 'onSelector', + isEvent: true, + // @ts-ignore + filters: [], + }; + const timeRange = { + rangeValue: dashboardStore.drillDownPeriod.rangeValue, + startDate: dashboardStore.drillDownPeriod.start, + endDate: dashboardStore.drillDownPeriod.end, + }; + const customFilter = { + ...filter, + ...timeRange, + filters: [...sessionStore.userFilter.filters, clickFilter], + }; + debounceClickMapSearch(customFilter); + } else { + debounceRequest(widget.metricId, { + ...filter, + series: widget.series.map((s) => s.toJson()), + page: metricStore.sessionsPage, + limit: metricStore.sessionsPageSize, + }); } - const debounceRequest: any = React.useCallback(debounce(fetchSessions, 1000), []); - const debounceClickMapSearch = React.useCallback(debounce(fetchClickmapSessions, 1000), []) + }; + useEffect(() => { + metricStore.updateKey('sessionsPage', 1); + loadData(); + }, [ + filter.startTimestamp, + filter.endTimestamp, + filter.filters, + depsString, + metricStore.clickMapSearch, + activeSeries, + ]); + useEffect(loadData, [metricStore.sessionsPage]); - const depsString = JSON.stringify(widget.series); - - const loadData = () => { - if (widget.metricType === CLICKMAP && metricStore.clickMapSearch) { - const clickFilter = { - value: [ - metricStore.clickMapSearch - ], - type: "CLICK", - operator: "onSelector", - isEvent: true, - // @ts-ignore - "filters": [] - } - const timeRange = { - rangeValue: dashboardStore.drillDownPeriod.rangeValue, - startDate: dashboardStore.drillDownPeriod.start, - endDate: dashboardStore.drillDownPeriod.end, - } - const customFilter = { ...filter, ...timeRange, filters: [ ...sessionStore.userFilter.filters, clickFilter]} - debounceClickMapSearch(customFilter) - } else { - debounceRequest(widget.metricId, { - ...filter, - series: widget.series.map(s => s.toJson()), - page: metricStore.sessionsPage, - limit: metricStore.sessionsPageSize, - }); - } - } - useEffect(() => { - metricStore.updateKey('sessionsPage', 1); - loadData(); - }, [filter.startTimestamp, filter.endTimestamp, filter.filters, depsString, metricStore.clickMapSearch, activeSeries]); - useEffect(loadData, [metricStore.sessionsPage]); - - return ( -
-
-
-

{metricStore.clickMapSearch ? 'Clicks' : 'Sessions'}

-
- {metricStore.clickMapLabel ? `on "${metricStore.clickMapLabel}" ` : null} - between {startTime} and{' '} - {endTime}{' '} -
-
- - {widget.metricType !== 'table' && widget.metricType !== CLICKMAP && ( -
- Filter by Series - +
+ )} +
+ +
+ + + +
+
+ No relevant sessions found for the selected time period. +
+
+ } + show={filteredSessions.sessions.length === 0} + > + {filteredSessions.sessions.map((session: any) => ( + + +
+ + ))} + +
+
+ Showing{' '} + + {(metricStore.sessionsPage - 1) * metricStore.sessionsPageSize + 1} + {' '} + to{' '} + + {(metricStore.sessionsPage - 1) * metricStore.sessionsPageSize + + filteredSessions.sessions.length} + {' '} + of {numberWithCommas(filteredSessions.total)}{' '} + sessions. +
+ metricStore.updateKey('sessionsPage', page)} + limit={metricStore.sessionsPageSize} + debounceRequest={500} + /> +
+ + +
+
+ ); } const getListSessionsBySeries = (data: any, seriesId: any) => { - const arr: any = { sessions: [], total: 0 }; - data.forEach((element: any) => { - if (seriesId === 'all') { - const sessionIds = arr.sessions.map((i: any) => i.sessionId); - arr.sessions.push(...element.sessions.filter((i: any) => !sessionIds.includes(i.sessionId))); - arr.total = element.total; - } else { - if (element.seriesId === seriesId) { - arr.sessions.push(...element.sessions); - arr.total = element.total; - } - } - }); - return arr; + const arr = data.reduce( + (arr: any, element: any) => { + if (seriesId === 'all') { + const sessionIds = arr.sessions.map((i: any) => i.sessionId); + const sessions = element.sessions.filter((i: any) => !sessionIds.includes(i.sessionId)); + arr.sessions.push(...sessions); + } else if (element.seriesId === seriesId) { + const sessionIds = arr.sessions.map((i: any) => i.sessionId); + const sessions = element.sessions.filter((i: any) => !sessionIds.includes(i.sessionId)); + const duplicates = element.sessions.length - sessions.length; + arr.sessions.push(...sessions); + arr.total = element.total - duplicates; + } + return arr; + }, + { sessions: [] } + ); + arr.total = seriesId === 'all' ? Math.max(...data.map((i: any) => i.total)) : data.find((i: any) => i.seriesId === seriesId).total; + return arr; }; export default observer(WidgetSessions); From 30efc6cfe522d5357a681c2e037a2a814abd539b Mon Sep 17 00:00:00 2001 From: rjshrjndrn Date: Tue, 21 Feb 2023 11:55:45 +0100 Subject: [PATCH 085/151] chore(helm): 2 kafka replicas Signed-off-by: rjshrjndrn --- scripts/helmcharts/databases/values.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/helmcharts/databases/values.yaml b/scripts/helmcharts/databases/values.yaml index 80cdba4e3..1ed77adde 100644 --- a/scripts/helmcharts/databases/values.yaml +++ b/scripts/helmcharts/databases/values.yaml @@ -132,6 +132,7 @@ kafka: tag: 2.8.1 fullnameOverride: kafka enabled: false + replicaCount: 2 # Enterprise dbs From bf4689e171b035c805f4e28cd4d30f0ee0b62cd2 Mon Sep 17 00:00:00 2001 From: nick-delirium Date: Tue, 21 Feb 2023 12:03:05 +0100 Subject: [PATCH 086/151] fix(ui): fix alert change value --- .../Dashboard/components/Alerts/AlertForm/Condition.tsx | 1 + frontend/app/mstore/alertsStore.ts | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/frontend/app/components/Dashboard/components/Alerts/AlertForm/Condition.tsx b/frontend/app/components/Dashboard/components/Alerts/AlertForm/Condition.tsx index 5039cc1dd..ba6956323 100644 --- a/frontend/app/components/Dashboard/components/Alerts/AlertForm/Condition.tsx +++ b/frontend/app/components/Dashboard/components/Alerts/AlertForm/Condition.tsx @@ -2,6 +2,7 @@ import React from 'react'; import { Input } from 'UI'; import Select from 'Shared/Select'; import { alertConditions as conditions } from 'App/constants'; +import Alert from 'Types/alert' const thresholdOptions = [ { label: '15 minutes', value: 15 }, diff --git a/frontend/app/mstore/alertsStore.ts b/frontend/app/mstore/alertsStore.ts index e608c1873..a2d155ffc 100644 --- a/frontend/app/mstore/alertsStore.ts +++ b/frontend/app/mstore/alertsStore.ts @@ -20,7 +20,9 @@ export default class AlertsStore { this.page = 1; } + // TODO: remove it updateKey(key: string, value: any) { + // @ts-ignore this[key] = value } @@ -77,10 +79,10 @@ export default class AlertsStore { edit = (diff: Partial) => { const key = Object.keys(diff)[0] - const oldInst = this.instance + const oldInst = { ...this.instance } // @ts-ignore oldInst[key] = diff[key] - this.instance = oldInst + this.instance = new Alert(oldInst, !!oldInst.alertId) } } From e5cf98362e4d5fd55e39ae573544f842c635540c Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Tue, 21 Feb 2023 14:56:59 +0100 Subject: [PATCH 087/151] feat(chalice): fixed update alerts --- api/chalicelib/utils/helper.py | 1 + api/schemas.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/api/chalicelib/utils/helper.py b/api/chalicelib/utils/helper.py index 85e34ec80..369aff40a 100644 --- a/api/chalicelib/utils/helper.py +++ b/api/chalicelib/utils/helper.py @@ -283,6 +283,7 @@ def custom_alert_to_front(values): # to support frontend format for payload if values.get("seriesId") is not None and values["query"]["left"] == schemas.AlertColumn.custom: values["query"]["left"] = values["seriesId"] + values["seriesId"] = None return values diff --git a/api/schemas.py b/api/schemas.py index ab057426a..dabeea83d 100644 --- a/api/schemas.py +++ b/api/schemas.py @@ -363,9 +363,11 @@ class AlertSchema(BaseModel): @root_validator(pre=True) def transform_alert(cls, values): - if values.get("seriesId") is None and isinstance(values["query"]["left"], int): + if isinstance(values["query"]["left"], int): values["seriesId"] = values["query"]["left"] values["query"]["left"] = AlertColumn.custom + else: + values["seriesId"] = None return values From 2448c474766797c920ac04c0d913fd14a46d374e Mon Sep 17 00:00:00 2001 From: Shekar Siri Date: Tue, 21 Feb 2023 11:39:23 +0100 Subject: [PATCH 088/151] fix(ui) - widget sessions pagination --- .../WidgetSessions/WidgetSessions.tsx | 323 ++++++++++-------- 1 file changed, 173 insertions(+), 150 deletions(-) diff --git a/frontend/app/components/Dashboard/components/WidgetSessions/WidgetSessions.tsx b/frontend/app/components/Dashboard/components/WidgetSessions/WidgetSessions.tsx index f563d688e..9535ab976 100644 --- a/frontend/app/components/Dashboard/components/WidgetSessions/WidgetSessions.tsx +++ b/frontend/app/components/Dashboard/components/WidgetSessions/WidgetSessions.tsx @@ -10,168 +10,191 @@ import { debounce } from 'App/utils'; import useIsMounted from 'App/hooks/useIsMounted'; import AnimatedSVG, { ICONS } from 'Shared/AnimatedSVG/AnimatedSVG'; import { numberWithCommas } from 'App/utils'; -import { CLICKMAP } from "App/constants/card"; +import { CLICKMAP } from 'App/constants/card'; interface Props { - className?: string; + className?: string; } function WidgetSessions(props: Props) { - const { className = '' } = props; - const [activeSeries, setActiveSeries] = useState('all'); - const [data, setData] = useState([]); - const isMounted = useIsMounted(); - const [loading, setLoading] = useState(false); - const filteredSessions = getListSessionsBySeries(data, activeSeries); - const { dashboardStore, metricStore, sessionStore } = useStore(); - const filter = dashboardStore.drillDownFilter; - const widget = metricStore.instance; - const startTime = DateTime.fromMillis(filter.startTimestamp).toFormat('LLL dd, yyyy HH:mm'); - const endTime = DateTime.fromMillis(filter.endTimestamp).toFormat('LLL dd, yyyy HH:mm'); - const [seriesOptions, setSeriesOptions] = useState([{ label: 'All', value: 'all' }]); + const { className = '' } = props; + const [activeSeries, setActiveSeries] = useState('all'); + const [data, setData] = useState([]); + const isMounted = useIsMounted(); + const [loading, setLoading] = useState(false); + const filteredSessions = getListSessionsBySeries(data, activeSeries); + const { dashboardStore, metricStore, sessionStore } = useStore(); + const filter = dashboardStore.drillDownFilter; + const widget = metricStore.instance; + const startTime = DateTime.fromMillis(filter.startTimestamp).toFormat('LLL dd, yyyy HH:mm'); + const endTime = DateTime.fromMillis(filter.endTimestamp).toFormat('LLL dd, yyyy HH:mm'); + const [seriesOptions, setSeriesOptions] = useState([{ label: 'All', value: 'all' }]); - const writeOption = ({ value }: any) => setActiveSeries(value.value); - useEffect(() => { - if (!data) return; - const seriesOptions = data.map((item: any) => ({ - label: item.seriesName, - value: item.seriesId, - })); - setSeriesOptions([{ label: 'All', value: 'all' }, ...seriesOptions]); - }, [data]); + const writeOption = ({ value }: any) => setActiveSeries(value.value); + useEffect(() => { + if (!data) return; + const seriesOptions = data.map((item: any) => ({ + label: item.seriesName, + value: item.seriesId, + })); + setSeriesOptions([{ label: 'All', value: 'all' }, ...seriesOptions]); + }, [data]); - const fetchSessions = (metricId: any, filter: any) => { - if (!isMounted()) return; - setLoading(true); - delete filter.eventsOrderSupport; - widget - .fetchSessions(metricId, filter) - .then((res: any) => { - setData(res); - }) - .finally(() => { - setLoading(false); - }); - }; - const fetchClickmapSessions = (customFilters: Record) => { - sessionStore.getSessions(customFilters) - .then(data => { - setData([{ ...data, seriesId: 1 , seriesName: "Clicks" }]) - }) + const fetchSessions = (metricId: any, filter: any) => { + if (!isMounted()) return; + setLoading(true); + delete filter.eventsOrderSupport; + widget + .fetchSessions(metricId, filter) + .then((res: any) => { + setData(res); + }) + .finally(() => { + setLoading(false); + }); + }; + const fetchClickmapSessions = (customFilters: Record) => { + sessionStore.getSessions(customFilters).then((data) => { + setData([{ ...data, seriesId: 1, seriesName: 'Clicks' }]); + }); + }; + const debounceRequest: any = React.useCallback(debounce(fetchSessions, 1000), []); + const debounceClickMapSearch = React.useCallback(debounce(fetchClickmapSessions, 1000), []); + + const depsString = JSON.stringify(widget.series); + + const loadData = () => { + if (widget.metricType === CLICKMAP && metricStore.clickMapSearch) { + const clickFilter = { + value: [metricStore.clickMapSearch], + type: 'CLICK', + operator: 'onSelector', + isEvent: true, + // @ts-ignore + filters: [], + }; + const timeRange = { + rangeValue: dashboardStore.drillDownPeriod.rangeValue, + startDate: dashboardStore.drillDownPeriod.start, + endDate: dashboardStore.drillDownPeriod.end, + }; + const customFilter = { + ...filter, + ...timeRange, + filters: [...sessionStore.userFilter.filters, clickFilter], + }; + debounceClickMapSearch(customFilter); + } else { + debounceRequest(widget.metricId, { + ...filter, + series: widget.series.map((s) => s.toJson()), + page: metricStore.sessionsPage, + limit: metricStore.sessionsPageSize, + }); } - const debounceRequest: any = React.useCallback(debounce(fetchSessions, 1000), []); - const debounceClickMapSearch = React.useCallback(debounce(fetchClickmapSessions, 1000), []) + }; + useEffect(() => { + metricStore.updateKey('sessionsPage', 1); + loadData(); + }, [ + filter.startTimestamp, + filter.endTimestamp, + filter.filters, + depsString, + metricStore.clickMapSearch, + activeSeries, + ]); + useEffect(loadData, [metricStore.sessionsPage]); - const depsString = JSON.stringify(widget.series); - - const loadData = () => { - if (widget.metricType === CLICKMAP && metricStore.clickMapSearch) { - const clickFilter = { - value: [ - metricStore.clickMapSearch - ], - type: "CLICK", - operator: "onSelector", - isEvent: true, - // @ts-ignore - "filters": [] - } - const timeRange = { - rangeValue: dashboardStore.drillDownPeriod.rangeValue, - startDate: dashboardStore.drillDownPeriod.start, - endDate: dashboardStore.drillDownPeriod.end, - } - const customFilter = { ...filter, ...timeRange, filters: [ ...sessionStore.userFilter.filters, clickFilter]} - debounceClickMapSearch(customFilter) - } else { - debounceRequest(widget.metricId, { - ...filter, - series: widget.series.map(s => s.toJson()), - page: metricStore.sessionsPage, - limit: metricStore.sessionsPageSize, - }); - } - } - useEffect(() => { - metricStore.updateKey('sessionsPage', 1); - loadData(); - }, [filter.startTimestamp, filter.endTimestamp, filter.filters, depsString, metricStore.clickMapSearch, activeSeries]); - useEffect(loadData, [metricStore.sessionsPage]); - - return ( -
-
-
-

{metricStore.clickMapSearch ? 'Clicks' : 'Sessions'}

-
- {metricStore.clickMapLabel ? `on "${metricStore.clickMapLabel}" ` : null} - between {startTime} and{' '} - {endTime}{' '} -
-
- - {widget.metricType !== 'table' && widget.metricType !== CLICKMAP && ( -
- Filter by Series - +
+ )} +
+ +
+ + + +
+
+ No relevant sessions found for the selected time period. +
+
+ } + show={filteredSessions.sessions.length === 0} + > + {filteredSessions.sessions.map((session: any) => ( + + +
+ + ))} + +
+
+ Showing{' '} + + {(metricStore.sessionsPage - 1) * metricStore.sessionsPageSize + 1} + {' '} + to{' '} + + {(metricStore.sessionsPage - 1) * metricStore.sessionsPageSize + + filteredSessions.sessions.length} + {' '} + of {numberWithCommas(filteredSessions.total)}{' '} + sessions. +
+ metricStore.updateKey('sessionsPage', page)} + limit={metricStore.sessionsPageSize} + debounceRequest={500} + /> +
+ + +
+
+ ); } const getListSessionsBySeries = (data: any, seriesId: any) => { - const arr: any = { sessions: [], total: 0 }; - data.forEach((element: any) => { - if (seriesId === 'all') { - const sessionIds = arr.sessions.map((i: any) => i.sessionId); - arr.sessions.push(...element.sessions.filter((i: any) => !sessionIds.includes(i.sessionId))); - arr.total = element.total; - } else { - if (element.seriesId === seriesId) { - arr.sessions.push(...element.sessions); - arr.total = element.total; - } - } - }); - return arr; + const arr = data.reduce( + (arr: any, element: any) => { + if (seriesId === 'all') { + const sessionIds = arr.sessions.map((i: any) => i.sessionId); + const sessions = element.sessions.filter((i: any) => !sessionIds.includes(i.sessionId)); + arr.sessions.push(...sessions); + } else if (element.seriesId === seriesId) { + const sessionIds = arr.sessions.map((i: any) => i.sessionId); + const sessions = element.sessions.filter((i: any) => !sessionIds.includes(i.sessionId)); + const duplicates = element.sessions.length - sessions.length; + arr.sessions.push(...sessions); + arr.total = element.total - duplicates; + } + return arr; + }, + { sessions: [] } + ); + arr.total = seriesId === 'all' ? Math.max(...data.map((i: any) => i.total)) : data.find((i: any) => i.seriesId === seriesId).total; + return arr; }; export default observer(WidgetSessions); From 75f0e103bc27b37e48ca06fcf52b8fc886ffd6bd Mon Sep 17 00:00:00 2001 From: rjshrjndrn Date: Tue, 21 Feb 2023 11:55:45 +0100 Subject: [PATCH 089/151] chore(helm): 2 kafka replicas Signed-off-by: rjshrjndrn --- scripts/helmcharts/databases/values.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/helmcharts/databases/values.yaml b/scripts/helmcharts/databases/values.yaml index 80cdba4e3..1ed77adde 100644 --- a/scripts/helmcharts/databases/values.yaml +++ b/scripts/helmcharts/databases/values.yaml @@ -132,6 +132,7 @@ kafka: tag: 2.8.1 fullnameOverride: kafka enabled: false + replicaCount: 2 # Enterprise dbs From 9e1f2444802aff0e0d2da687b530769e8affed4c Mon Sep 17 00:00:00 2001 From: nick-delirium Date: Tue, 21 Feb 2023 12:03:05 +0100 Subject: [PATCH 090/151] fix(ui): fix alert change value --- .../Dashboard/components/Alerts/AlertForm/Condition.tsx | 1 + frontend/app/mstore/alertsStore.ts | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/frontend/app/components/Dashboard/components/Alerts/AlertForm/Condition.tsx b/frontend/app/components/Dashboard/components/Alerts/AlertForm/Condition.tsx index 5039cc1dd..ba6956323 100644 --- a/frontend/app/components/Dashboard/components/Alerts/AlertForm/Condition.tsx +++ b/frontend/app/components/Dashboard/components/Alerts/AlertForm/Condition.tsx @@ -2,6 +2,7 @@ import React from 'react'; import { Input } from 'UI'; import Select from 'Shared/Select'; import { alertConditions as conditions } from 'App/constants'; +import Alert from 'Types/alert' const thresholdOptions = [ { label: '15 minutes', value: 15 }, diff --git a/frontend/app/mstore/alertsStore.ts b/frontend/app/mstore/alertsStore.ts index e608c1873..a2d155ffc 100644 --- a/frontend/app/mstore/alertsStore.ts +++ b/frontend/app/mstore/alertsStore.ts @@ -20,7 +20,9 @@ export default class AlertsStore { this.page = 1; } + // TODO: remove it updateKey(key: string, value: any) { + // @ts-ignore this[key] = value } @@ -77,10 +79,10 @@ export default class AlertsStore { edit = (diff: Partial) => { const key = Object.keys(diff)[0] - const oldInst = this.instance + const oldInst = { ...this.instance } // @ts-ignore oldInst[key] = diff[key] - this.instance = oldInst + this.instance = new Alert(oldInst, !!oldInst.alertId) } } From 9a1d456add7bd4dbb91ae4e9d2aeb8685f7bbee9 Mon Sep 17 00:00:00 2001 From: Shekar Siri Date: Tue, 21 Feb 2023 15:12:48 +0100 Subject: [PATCH 091/151] change(ui) - widget sessions clear filters --- .../WidgetSessions/WidgetSessions.tsx | 28 +++++++++++++------ 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/frontend/app/components/Dashboard/components/WidgetSessions/WidgetSessions.tsx b/frontend/app/components/Dashboard/components/WidgetSessions/WidgetSessions.tsx index 9535ab976..3720dd94b 100644 --- a/frontend/app/components/Dashboard/components/WidgetSessions/WidgetSessions.tsx +++ b/frontend/app/components/Dashboard/components/WidgetSessions/WidgetSessions.tsx @@ -1,5 +1,5 @@ import React, { useEffect, useState } from 'react'; -import { NoContent, Loader, Pagination } from 'UI'; +import { NoContent, Loader, Pagination, Button } from 'UI'; import Select from 'Shared/Select'; import cn from 'classnames'; import { useStore } from 'App/mstore'; @@ -28,6 +28,7 @@ function WidgetSessions(props: Props) { const startTime = DateTime.fromMillis(filter.startTimestamp).toFormat('LLL dd, yyyy HH:mm'); const endTime = DateTime.fromMillis(filter.endTimestamp).toFormat('LLL dd, yyyy HH:mm'); const [seriesOptions, setSeriesOptions] = useState([{ label: 'All', value: 'all' }]); + const hasFilters = filter.filters.length > 0 || (filter.startTimestamp !== dashboardStore.drillDownPeriod.start || filter.endTimestamp !== dashboardStore.drillDownPeriod.end); const writeOption = ({ value }: any) => setActiveSeries(value.value); useEffect(() => { @@ -105,6 +106,11 @@ function WidgetSessions(props: Props) { ]); useEffect(loadData, [metricStore.sessionsPage]); + const clearFilters = () => { + metricStore.updateKey('sessionsPage', 1); + dashboardStore.resetDrillDownFilter(); + } + return (
@@ -117,12 +123,15 @@ function WidgetSessions(props: Props) {
- {widget.metricType !== 'table' && widget.metricType !== CLICKMAP && ( -
- Filter by Series - +
+ )} +
@@ -193,7 +202,10 @@ const getListSessionsBySeries = (data: any, seriesId: any) => { }, { sessions: [] } ); - arr.total = seriesId === 'all' ? Math.max(...data.map((i: any) => i.total)) : data.find((i: any) => i.seriesId === seriesId).total; + arr.total = + seriesId === 'all' + ? Math.max(...data.map((i: any) => i.total)) + : data.find((i: any) => i.seriesId === seriesId).total; return arr; }; From 65f6d403df4300aaa05c81c091d3be958302c730 Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Tue, 21 Feb 2023 15:15:44 +0100 Subject: [PATCH 092/151] feat(chalice): fixed update alerts --- api/schemas.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/api/schemas.py b/api/schemas.py index dabeea83d..5cae3a31a 100644 --- a/api/schemas.py +++ b/api/schemas.py @@ -363,11 +363,10 @@ class AlertSchema(BaseModel): @root_validator(pre=True) def transform_alert(cls, values): + values["seriesId"] = None if isinstance(values["query"]["left"], int): values["seriesId"] = values["query"]["left"] values["query"]["left"] = AlertColumn.custom - else: - values["seriesId"] = None return values From 61eb1c1fd7bf92fe5d9c4f8208e6940d050e7057 Mon Sep 17 00:00:00 2001 From: Shekar Siri Date: Tue, 21 Feb 2023 15:12:48 +0100 Subject: [PATCH 093/151] change(ui) - widget sessions clear filters --- .../WidgetSessions/WidgetSessions.tsx | 28 +++++++++++++------ 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/frontend/app/components/Dashboard/components/WidgetSessions/WidgetSessions.tsx b/frontend/app/components/Dashboard/components/WidgetSessions/WidgetSessions.tsx index 9535ab976..3720dd94b 100644 --- a/frontend/app/components/Dashboard/components/WidgetSessions/WidgetSessions.tsx +++ b/frontend/app/components/Dashboard/components/WidgetSessions/WidgetSessions.tsx @@ -1,5 +1,5 @@ import React, { useEffect, useState } from 'react'; -import { NoContent, Loader, Pagination } from 'UI'; +import { NoContent, Loader, Pagination, Button } from 'UI'; import Select from 'Shared/Select'; import cn from 'classnames'; import { useStore } from 'App/mstore'; @@ -28,6 +28,7 @@ function WidgetSessions(props: Props) { const startTime = DateTime.fromMillis(filter.startTimestamp).toFormat('LLL dd, yyyy HH:mm'); const endTime = DateTime.fromMillis(filter.endTimestamp).toFormat('LLL dd, yyyy HH:mm'); const [seriesOptions, setSeriesOptions] = useState([{ label: 'All', value: 'all' }]); + const hasFilters = filter.filters.length > 0 || (filter.startTimestamp !== dashboardStore.drillDownPeriod.start || filter.endTimestamp !== dashboardStore.drillDownPeriod.end); const writeOption = ({ value }: any) => setActiveSeries(value.value); useEffect(() => { @@ -105,6 +106,11 @@ function WidgetSessions(props: Props) { ]); useEffect(loadData, [metricStore.sessionsPage]); + const clearFilters = () => { + metricStore.updateKey('sessionsPage', 1); + dashboardStore.resetDrillDownFilter(); + } + return (
@@ -117,12 +123,15 @@ function WidgetSessions(props: Props) {
- {widget.metricType !== 'table' && widget.metricType !== CLICKMAP && ( -
- Filter by Series - +
+ )} +
@@ -193,7 +202,10 @@ const getListSessionsBySeries = (data: any, seriesId: any) => { }, { sessions: [] } ); - arr.total = seriesId === 'all' ? Math.max(...data.map((i: any) => i.total)) : data.find((i: any) => i.seriesId === seriesId).total; + arr.total = + seriesId === 'all' + ? Math.max(...data.map((i: any) => i.total)) + : data.find((i: any) => i.seriesId === seriesId).total; return arr; }; From bc227dc450f54ee2359af3ee9ed099e59e38431f Mon Sep 17 00:00:00 2001 From: nick-delirium Date: Tue, 21 Feb 2023 15:19:16 +0100 Subject: [PATCH 094/151] fix(ui): fix alert trigger name --- .../components/Alerts/AlertListItem.tsx | 42 ++++++++++++------- .../Dashboard/components/Alerts/NewAlert.tsx | 8 +++- 2 files changed, 34 insertions(+), 16 deletions(-) diff --git a/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx b/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx index 024cc734c..071dd204c 100644 --- a/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx +++ b/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx @@ -42,9 +42,8 @@ const getNotifyChannel = (alert: Record, webhooks: Array) => { ' (' + alert.msteamsInput .map((channelId: number) => { - return ( - webhooks.find((hook) => hook.webhookId === channelId && hook.type === 'msteams')?.name - ); + return webhooks.find((hook) => hook.webhookId === channelId && hook.type === 'msteams') + ?.name; }) .join(', ') + ')' @@ -58,7 +57,7 @@ const getNotifyChannel = (alert: Record, webhooks: Array) => { } } if (alert.msteams) { - str += (str === '' ? '' : ' and ') + 'MS Teams' + str += (str === '' ? '' : ' and ') + 'MS Teams'; if (alert.msteamsInput.length > 0) { str += getMsTeamsChannels(); } @@ -79,10 +78,11 @@ interface Props extends RouteComponentProps { init: (alert: Alert) => void; demo?: boolean; webhooks: Array; + triggerOptions: Record; } function AlertListItem(props: Props) { - const { alert, siteId, history, init, demo, webhooks } = props; + const { alert, siteId, history, init, demo, webhooks, triggerOptions } = props; if (!alert) { return null; @@ -95,6 +95,11 @@ function AlertListItem(props: Props) { history.push(path); }; + const formTriggerName = () => + Number.isInteger(alert.query.left) && triggerOptions + ? triggerOptions.find((opt: { value: any, label: string }) => opt.value === alert.query.left).label + : alert.query.left; + return (
{'When the '} - {alert.detectionMethod} + + {alert.detectionMethod} + {' of '} - {alert.seriesName || alert.query.left} + + {triggerOptions ? formTriggerName() : alert.seriesName} + {' is '} {alert.query.operator} - {numberWithCommas(alert.query.right)}{alert.change === 'percent' ? '%' : alert.metric?.unit} + {numberWithCommas(alert.query.right)} + {alert.change === 'percent' ? '%' : alert.metric?.unit} {' over the past '} - {getThreshold( - alert.currentPeriod)} + + {getThreshold(alert.currentPeriod)} + {alert.detectionMethod === 'change' ? ( <> {' compared to the previous '} - {getThreshold( - alert.previousPeriod)} + + {getThreshold(alert.previousPeriod)} + ) : null} {', notify me on '} diff --git a/frontend/app/components/Dashboard/components/Alerts/NewAlert.tsx b/frontend/app/components/Dashboard/components/Alerts/NewAlert.tsx index 717c7ea59..67a6bb459 100644 --- a/frontend/app/components/Dashboard/components/Alerts/NewAlert.tsx +++ b/frontend/app/components/Dashboard/components/Alerts/NewAlert.tsx @@ -278,7 +278,13 @@ const NewAlert = (props: IProps) => {
{instance && ( - null} webhooks={webhooks} /> + null} + webhooks={webhooks} /> )}
From 4bdc095c76a1c421ae9cb04ef186f3ba4d0a7277 Mon Sep 17 00:00:00 2001 From: Alexander Zavorotynskiy Date: Tue, 21 Feb 2023 15:29:24 +0100 Subject: [PATCH 095/151] fix(backend): removed wrong defer in db.Saver --- ee/backend/internal/db/datasaver/saver.go | 1 - 1 file changed, 1 deletion(-) diff --git a/ee/backend/internal/db/datasaver/saver.go b/ee/backend/internal/db/datasaver/saver.go index 76057309d..e05e502f1 100644 --- a/ee/backend/internal/db/datasaver/saver.go +++ b/ee/backend/internal/db/datasaver/saver.go @@ -19,7 +19,6 @@ func New(pg *cache.PGCache, cfg *db.Config) *Saver { var producer types.Producer = nil if cfg.UseQuickwit { producer = queue.NewProducer(cfg.MessageSizeLimit, true) - defer producer.Close(15000) } return &Saver{pg: pg, producer: producer, topic: cfg.QuickwitTopic} } From 56c14692733aa0b02d638674eaaa6dd7e81a2ce1 Mon Sep 17 00:00:00 2001 From: Alexander Zavorotynskiy Date: Tue, 21 Feb 2023 15:51:03 +0100 Subject: [PATCH 096/151] fix(backend): removed debug log from session iterator --- backend/pkg/messages/session-iterator.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/backend/pkg/messages/session-iterator.go b/backend/pkg/messages/session-iterator.go index eb9f32387..45daae4b8 100644 --- a/backend/pkg/messages/session-iterator.go +++ b/backend/pkg/messages/session-iterator.go @@ -40,13 +40,6 @@ func SplitMessages(data []byte) ([]*msgInfo, error) { return nil, fmt.Errorf("read message type err: %s", err) } - if msgType == MsgRedux { - log.Printf("redux") - } - if msgType == MsgFetch { - log.Printf("fetch") - } - // Read message body _, err = ReadMessage(msgType, reader) if err != nil { From f01ff51d3ffea0d6752247285bcd4334d3dca3a4 Mon Sep 17 00:00:00 2001 From: nick-delirium Date: Tue, 21 Feb 2023 15:46:21 +0100 Subject: [PATCH 097/151] change(tracker): tracker v4.9.10; assist v.4.1.6 --- tracker/tracker-assist/CHANGELOG.md | 5 +++++ tracker/tracker-assist/package.json | 2 +- tracker/tracker/package.json | 2 +- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/tracker/tracker-assist/CHANGELOG.md b/tracker/tracker-assist/CHANGELOG.md index 5afe0ef96..7ce338e9b 100644 --- a/tracker/tracker-assist/CHANGELOG.md +++ b/tracker/tracker-assist/CHANGELOG.md @@ -1,6 +1,11 @@ +## 4.1.6 + +- fix recording state import + ## 4.1.5 - fixed peerjs hack that caused ts compile issues +- - added screen recording feature (EE) license ## 4.1.4 diff --git a/tracker/tracker-assist/package.json b/tracker/tracker-assist/package.json index 7e64ceab8..8b9dcd8ef 100644 --- a/tracker/tracker-assist/package.json +++ b/tracker/tracker-assist/package.json @@ -1,7 +1,7 @@ { "name": "@openreplay/tracker-assist", "description": "Tracker plugin for screen assistance through the WebRTC", - "version": "4.1.5", + "version": "4.1.6", "keywords": [ "WebRTC", "assistance", diff --git a/tracker/tracker/package.json b/tracker/tracker/package.json index 8e65c3f4c..5682ffc1c 100644 --- a/tracker/tracker/package.json +++ b/tracker/tracker/package.json @@ -1,7 +1,7 @@ { "name": "@openreplay/tracker", "description": "The OpenReplay tracker main package", - "version": "4.1.9-beta.4", + "version": "4.1.10", "keywords": [ "logging", "replay" From c6bed2fc0a1ab0947cd0d2b65bad97e62b1cd58a Mon Sep 17 00:00:00 2001 From: nick-delirium Date: Tue, 21 Feb 2023 16:11:15 +0100 Subject: [PATCH 098/151] fix(player): fix live player on player ready --- .../Session/Player/LivePlayer/LivePlayerInst.tsx | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/frontend/app/components/Session/Player/LivePlayer/LivePlayerInst.tsx b/frontend/app/components/Session/Player/LivePlayer/LivePlayerInst.tsx index 2472b6547..c17007648 100644 --- a/frontend/app/components/Session/Player/LivePlayer/LivePlayerInst.tsx +++ b/frontend/app/components/Session/Player/LivePlayer/LivePlayerInst.tsx @@ -4,7 +4,7 @@ import { findDOMNode } from 'react-dom'; import cn from 'classnames'; import LiveControls from './LiveControls'; import ConsolePanel from 'Shared/DevTools/ConsolePanel'; - +import { observer } from 'mobx-react-lite' import Overlay from './Overlay'; import stl from 'Components/Session_/Player/player.module.css'; import { PlayerContext, ILivePlayerContext } from 'App/components/Session/playerContext'; @@ -27,7 +27,9 @@ function Player(props: IProps) { // @ts-ignore TODO const playerContext = React.useContext(PlayerContext); const screenWrapper = React.useRef(null); + const ready = playerContext.store.get().ready + console.log(ready) React.useEffect(() => { if (!props.closedLive || isMultiview) { const parentElement = findDOMNode(screenWrapper.current) as HTMLDivElement | null; //TODO: good architecture @@ -40,7 +42,7 @@ function Player(props: IProps) { React.useEffect(() => { playerContext.player.scale(); - }, [playerContext.player]); + }, [playerContext.player, ready]); if (!playerContext.player) return null; @@ -78,4 +80,4 @@ export default connect( (isAssist && !state.getIn(['sessions', 'current']).live), }; } -)(Player); +)(observer(Player)); From 601c460dc84c6687ec7c32931d4f6109a51c3bc3 Mon Sep 17 00:00:00 2001 From: Shekar Siri Date: Tue, 21 Feb 2023 16:11:38 +0100 Subject: [PATCH 099/151] change(ui) - enterprise text --- frontend/app/components/Assist/Assist.tsx | 3 ++- .../Dashboard/components/DashboardOptions/DashboardOptions.tsx | 3 ++- .../Dashboard/components/MetricTypeItem/MetricTypeItem.tsx | 3 ++- .../Dashboard/components/MetricTypeList/MetricTypeList.tsx | 3 ++- frontend/app/components/Login/Login.js | 3 ++- .../app/components/Session_/ScreenRecorder/ScreenRecorder.tsx | 3 ++- .../shared/CustomDropdownOption/CustomDropdownOption.tsx | 3 ++- frontend/app/constants/index.js | 3 ++- 8 files changed, 16 insertions(+), 8 deletions(-) diff --git a/frontend/app/components/Assist/Assist.tsx b/frontend/app/components/Assist/Assist.tsx index abb1403a9..b58df1352 100644 --- a/frontend/app/components/Assist/Assist.tsx +++ b/frontend/app/components/Assist/Assist.tsx @@ -6,6 +6,7 @@ import AssistRouter from './AssistRouter'; import { SideMenuitem } from 'UI'; import { withSiteId, assist, recordings } from 'App/routes'; import { connect } from 'react-redux'; +import { ENTERPRISE_REQUEIRED } from 'App/constants'; interface Props extends RouteComponentProps { siteId: string; @@ -40,7 +41,7 @@ function Assist(props: Props) { iconName="record-circle" onClick={() => redirect(recordings())} disabled={!isEnterprise} - tooltipTitle="This feature requires an enterprise license." + tooltipTitle={ENTERPRISE_REQUEIRED} />
diff --git a/frontend/app/components/Dashboard/components/DashboardOptions/DashboardOptions.tsx b/frontend/app/components/Dashboard/components/DashboardOptions/DashboardOptions.tsx index 62cfd9404..b006dbb22 100644 --- a/frontend/app/components/Dashboard/components/DashboardOptions/DashboardOptions.tsx +++ b/frontend/app/components/Dashboard/components/DashboardOptions/DashboardOptions.tsx @@ -1,6 +1,7 @@ import React from 'react'; import { ItemMenu } from 'UI'; import { connect } from 'react-redux'; +import { ENTERPRISE_REQUEIRED } from 'App/constants'; interface Props { editHandler: (isTitle: boolean) => void; @@ -16,7 +17,7 @@ function DashboardOptions(props: Props) { { icon: 'text-paragraph', text: `${!isTitlePresent ? 'Add' : 'Edit'} Description`, onClick: () => editHandler(false) }, { icon: 'users', text: 'Visibility & Access', onClick: editHandler }, { icon: 'trash', text: 'Delete', onClick: deleteHandler }, - { icon: 'pdf-download', text: 'Download Report', onClick: renderReport, disabled: !isEnterprise, tooltipTitle: 'This feature requires an enterprise license.' } + { icon: 'pdf-download', text: 'Download Report', onClick: renderReport, disabled: !isEnterprise, tooltipTitle: {ENTERPRISE_REQUEIRED} } ] return ( diff --git a/frontend/app/components/Dashboard/components/MetricTypeItem/MetricTypeItem.tsx b/frontend/app/components/Dashboard/components/MetricTypeItem/MetricTypeItem.tsx index 0d5fc4c89..c28389c4a 100644 --- a/frontend/app/components/Dashboard/components/MetricTypeItem/MetricTypeItem.tsx +++ b/frontend/app/components/Dashboard/components/MetricTypeItem/MetricTypeItem.tsx @@ -2,6 +2,7 @@ import { IconNames } from 'App/components/ui/SVG'; import React from 'react'; import { Icon, Tooltip } from 'UI'; import cn from 'classnames'; +import { ENTERPRISE_REQUEIRED } from 'App/constants'; export interface MetricType { title: string; @@ -23,7 +24,7 @@ function MetricTypeItem(props: Props) { onClick = () => {}, } = props; return ( - +
{authDetails.edition === 'ee' ? "SSO has not been configured. Please reach out to your admin." : "This feature requires an enterprise license."}
} + title={
{authDetails.edition === 'ee' ? "SSO has not been configured. Please reach out to your admin." : ENTERPRISE_REQUEIRED}
} placement="top" > - {webhook.exists() && } -
- {webhook.exists() && - } -
- +
+
+ + {webhook.exists() && } +
+ {webhook.exists() && ( + + )}
- ); + +
+ ); } export default observer(WebhookForm); diff --git a/frontend/app/mstore/settingsStore.ts b/frontend/app/mstore/settingsStore.ts index 7dd584fd3..c31071694 100644 --- a/frontend/app/mstore/settingsStore.ts +++ b/frontend/app/mstore/settingsStore.ts @@ -6,7 +6,6 @@ import Webhook, { IWebhook } from 'Types/webhook'; import { webhookService } from 'App/services'; -import Alert, { IAlert } from "Types/alert"; export default class SettingsStore { loadingCaptureRate: boolean = false; @@ -73,8 +72,11 @@ export default class SettingsStore { this.webhookInst = new Webhook(data) if (inst.webhookId === undefined) this.setWebhooks([...this.webhooks, this.webhookInst]) else this.setWebhooks([...this.webhooks.filter(hook => hook.webhookId !== data.webhookId), this.webhookInst]) - this.hooksLoading = false + }) + .finally(() => { + this.hooksLoading = false + }) } setWebhooks = (webhooks: Webhook[]) => { diff --git a/frontend/app/services/WebhookService.ts b/frontend/app/services/WebhookService.ts index 2bcefa619..7b1073867 100644 --- a/frontend/app/services/WebhookService.ts +++ b/frontend/app/services/WebhookService.ts @@ -6,20 +6,17 @@ export default class WebhookService extends BaseService { return this.client.get('/webhooks') .then(r => r.json()) .then(j => j.data || []) - .catch(Promise.reject) } saveWebhook(inst: Webhook) { return this.client.put('/webhooks', inst) .then(r => r.json()) .then(j => j.data || {}) - .catch(Promise.reject) } removeWebhook(id: Webhook["webhookId"]) { return this.client.delete('/webhooks/' + id) .then(r => r.json()) .then(j => j.data || {}) - .catch(Promise.reject) } } \ No newline at end of file From 4c4ffc2bcaad3777f3db0b95faf464a7a6365c30 Mon Sep 17 00:00:00 2001 From: nick-delirium Date: Wed, 22 Feb 2023 14:25:03 +0100 Subject: [PATCH 115/151] fix(ui): remove consolelog --- .../shared/FetchDetailsModal/components/FetchTabs/FetchTabs.tsx | 1 - 1 file changed, 1 deletion(-) diff --git a/frontend/app/components/shared/FetchDetailsModal/components/FetchTabs/FetchTabs.tsx b/frontend/app/components/shared/FetchDetailsModal/components/FetchTabs/FetchTabs.tsx index 6efbb506d..c6495227b 100644 --- a/frontend/app/components/shared/FetchDetailsModal/components/FetchTabs/FetchTabs.tsx +++ b/frontend/app/components/shared/FetchDetailsModal/components/FetchTabs/FetchTabs.tsx @@ -62,7 +62,6 @@ function FetchTabs({ resource }: Props) { const [requestHeaders, setRequestHeaders] = useState | null>(null); const [responseHeaders, setResponseHeaders] = useState | null>(null); - console.log(resource) useEffect(() => { const { request, response } = resource; parseRequestResponse( From 5bb9a8e73adc5063225ee39c4a0e56629c8ee5b5 Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Wed, 22 Feb 2023 17:11:34 +0100 Subject: [PATCH 116/151] feat(chalice): fixed insights --- ee/api/chalicelib/core/sessions_insights.py | 32 +++++++++++---------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/ee/api/chalicelib/core/sessions_insights.py b/ee/api/chalicelib/core/sessions_insights.py index 9f05e9b11..532e47e19 100644 --- a/ee/api/chalicelib/core/sessions_insights.py +++ b/ee/api/chalicelib/core/sessions_insights.py @@ -173,7 +173,7 @@ def query_requests_by_period(project_id, start_time, end_time, filters: Optional if n == n_: data_['value'] = v[0] data_['oldValue'] = v[1] - data_['change'] = 100* v[2] + data_['change'] = 100 * v[2] data_['isNew'] = False break results.append(data_) @@ -252,12 +252,12 @@ def query_most_errors_by_period(project_id, start_time, end_time, for n in common_errors: if n is None: continue - old_errors = _sum_table_index(_table_where(table_hh2, names_idx, n), names_idx) - if old_errors == 0: + sum_old_errors = _sum_table_index(_table_where(table_hh2, names_idx, n), sessions_idx) + if sum_old_errors == 0: continue - new_errors = _sum_table_index(_table_where(table_hh1, names_idx, n), names_idx) + sum_new_errors = _sum_table_index(_table_where(table_hh1, names_idx, n), sessions_idx) # error_increase[n] = (new_errors - old_errors) / old_errors - error_values[n] = new_errors, old_errors, (new_errors - old_errors) / old_errors + error_values[n] = sum_new_errors, sum_old_errors, (sum_new_errors - sum_old_errors) / sum_old_errors ratio = sorted(percentage_errors.items(), key=lambda k: k[1], reverse=True) increase = sorted(error_values.items(), key=lambda k: k[1][-1], reverse=True) names_ = set([k[0] for k in increase[:3] + ratio[:3]] + new_errors[:3]) @@ -347,18 +347,20 @@ def query_cpu_memory_by_period(project_id, start_time, end_time, output = list() if cpu_oldvalue is not None or cpu_newvalue is not None: output.append({'category': schemas_ee.InsightCategories.resources, - 'name': 'cpu', - 'value': cpu_newvalue, - 'oldValue': cpu_oldvalue, - 'change': 100 * (cpu_newvalue - cpu_oldvalue) / cpu_oldvalue if cpu_ratio is not None else cpu_ratio, - 'isNew': True if cpu_newvalue is not None and cpu_oldvalue is None else False}) + 'name': 'cpu', + 'value': cpu_newvalue, + 'oldValue': cpu_oldvalue, + 'change': 100 * ( + cpu_newvalue - cpu_oldvalue) / cpu_oldvalue if cpu_ratio is not None else cpu_ratio, + 'isNew': True if cpu_newvalue is not None and cpu_oldvalue is None else False}) if mem_oldvalue is not None or mem_newvalue is not None: output.append({'category': schemas_ee.InsightCategories.resources, - 'name': 'memory', - 'value': mem_newvalue, - 'oldValue': mem_oldvalue, - 'change': 100 * (mem_newvalue - mem_oldvalue) / mem_oldvalue if mem_ratio is not None else mem_ratio, - 'isNew': True if mem_newvalue is not None and mem_oldvalue is None else False}) + 'name': 'memory', + 'value': mem_newvalue, + 'oldValue': mem_oldvalue, + 'change': 100 * ( + mem_newvalue - mem_oldvalue) / mem_oldvalue if mem_ratio is not None else mem_ratio, + 'isNew': True if mem_newvalue is not None and mem_oldvalue is None else False}) return output From c0f2602c17beb2c8e4c68ea7bb6386a01830e3f8 Mon Sep 17 00:00:00 2001 From: nick-delirium Date: Wed, 22 Feb 2023 10:15:42 +0100 Subject: [PATCH 117/151] change(ui): fix for network tabs? --- .../FetchDetailsModal/components/FetchTabs/FetchTabs.tsx | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/frontend/app/components/shared/FetchDetailsModal/components/FetchTabs/FetchTabs.tsx b/frontend/app/components/shared/FetchDetailsModal/components/FetchTabs/FetchTabs.tsx index 63415ed26..6efbb506d 100644 --- a/frontend/app/components/shared/FetchDetailsModal/components/FetchTabs/FetchTabs.tsx +++ b/frontend/app/components/shared/FetchDetailsModal/components/FetchTabs/FetchTabs.tsx @@ -22,7 +22,7 @@ function parseRequestResponse( setStringBody(''); return; } - let json = JSON.parse(r) + const json = JSON.parse(r) const hs = json.headers const bd = json.body as string @@ -35,11 +35,8 @@ function parseRequestResponse( setJSONBody(null) setStringBody('') } - if (typeof bd !== 'string') { - throw new Error(`body is not a string`) - } try { - let jBody = JSON.parse(bd) + const jBody = JSON.parse(bd) if (typeof jBody === "object" && jBody != null) { setJSONBody(jBody) } else { From 203f0131b43bfc633ca6592c7f5e56aa52ccc7ff Mon Sep 17 00:00:00 2001 From: nick-delirium Date: Wed, 22 Feb 2023 12:25:15 +0100 Subject: [PATCH 118/151] fix(ui): display error for webhooks --- .../components/Client/Webhooks/WebhookForm.js | 133 ++++++++++-------- frontend/app/mstore/settingsStore.ts | 6 +- frontend/app/services/WebhookService.ts | 3 - 3 files changed, 76 insertions(+), 66 deletions(-) diff --git a/frontend/app/components/Client/Webhooks/WebhookForm.js b/frontend/app/components/Client/Webhooks/WebhookForm.js index 62f009f1e..08799456f 100644 --- a/frontend/app/components/Client/Webhooks/WebhookForm.js +++ b/frontend/app/components/Client/Webhooks/WebhookForm.js @@ -1,75 +1,86 @@ import React from 'react'; import { Form, Button, Input } from 'UI'; import styles from './webhookForm.module.css'; -import { useStore } from 'App/mstore' -import { observer } from 'mobx-react-lite' +import { useStore } from 'App/mstore'; +import { observer } from 'mobx-react-lite'; +import { toast } from 'react-toastify'; function WebhookForm(props) { - const { settingsStore } = useStore() - const { webhookInst: webhook, hooksLoading: loading, saveWebhook, editWebhook } = settingsStore - const write = ({ target: { value, name } }) => editWebhook({ [name]: value }); + const { settingsStore } = useStore(); + const { webhookInst: webhook, hooksLoading: loading, saveWebhook, editWebhook } = settingsStore; + const write = ({ target: { value, name } }) => editWebhook({ [name]: value }); - const save = () => { - saveWebhook(webhook).then(() => { - props.onClose(); - }); - }; + const save = () => { + saveWebhook(webhook) + .then(() => { + props.onClose(); + }) + .catch((e) => { + const baseStr = 'Error saving webhook'; + if (e.response) { + e.response.json().then(({ errors }) => { + toast.error(baseStr + ': ' + errors.join(',')); + }); + } else { + toast.error(baseStr); + } + }); + }; + return ( +
+

{webhook.exists() ? 'Update' : 'Add'} Webhook

+
+ + + + - return ( -
-

{webhook.exists() ? 'Update' : 'Add'} Webhook

- - - - - + + + + - - - - + + + + - - - - - -
-
- - {webhook.exists() && } -
- {webhook.exists() && - } -
- +
+
+ + {webhook.exists() && } +
+ {webhook.exists() && ( + + )}
- ); + +
+ ); } export default observer(WebhookForm); diff --git a/frontend/app/mstore/settingsStore.ts b/frontend/app/mstore/settingsStore.ts index 7dd584fd3..c31071694 100644 --- a/frontend/app/mstore/settingsStore.ts +++ b/frontend/app/mstore/settingsStore.ts @@ -6,7 +6,6 @@ import Webhook, { IWebhook } from 'Types/webhook'; import { webhookService } from 'App/services'; -import Alert, { IAlert } from "Types/alert"; export default class SettingsStore { loadingCaptureRate: boolean = false; @@ -73,8 +72,11 @@ export default class SettingsStore { this.webhookInst = new Webhook(data) if (inst.webhookId === undefined) this.setWebhooks([...this.webhooks, this.webhookInst]) else this.setWebhooks([...this.webhooks.filter(hook => hook.webhookId !== data.webhookId), this.webhookInst]) - this.hooksLoading = false + }) + .finally(() => { + this.hooksLoading = false + }) } setWebhooks = (webhooks: Webhook[]) => { diff --git a/frontend/app/services/WebhookService.ts b/frontend/app/services/WebhookService.ts index 2bcefa619..7b1073867 100644 --- a/frontend/app/services/WebhookService.ts +++ b/frontend/app/services/WebhookService.ts @@ -6,20 +6,17 @@ export default class WebhookService extends BaseService { return this.client.get('/webhooks') .then(r => r.json()) .then(j => j.data || []) - .catch(Promise.reject) } saveWebhook(inst: Webhook) { return this.client.put('/webhooks', inst) .then(r => r.json()) .then(j => j.data || {}) - .catch(Promise.reject) } removeWebhook(id: Webhook["webhookId"]) { return this.client.delete('/webhooks/' + id) .then(r => r.json()) .then(j => j.data || {}) - .catch(Promise.reject) } } \ No newline at end of file From 9459f4b679702ac4538d64dc80796ac0cbf83e9e Mon Sep 17 00:00:00 2001 From: nick-delirium Date: Wed, 22 Feb 2023 14:25:03 +0100 Subject: [PATCH 119/151] fix(ui): remove consolelog --- .../shared/FetchDetailsModal/components/FetchTabs/FetchTabs.tsx | 1 - 1 file changed, 1 deletion(-) diff --git a/frontend/app/components/shared/FetchDetailsModal/components/FetchTabs/FetchTabs.tsx b/frontend/app/components/shared/FetchDetailsModal/components/FetchTabs/FetchTabs.tsx index 6efbb506d..c6495227b 100644 --- a/frontend/app/components/shared/FetchDetailsModal/components/FetchTabs/FetchTabs.tsx +++ b/frontend/app/components/shared/FetchDetailsModal/components/FetchTabs/FetchTabs.tsx @@ -62,7 +62,6 @@ function FetchTabs({ resource }: Props) { const [requestHeaders, setRequestHeaders] = useState | null>(null); const [responseHeaders, setResponseHeaders] = useState | null>(null); - console.log(resource) useEffect(() => { const { request, response } = resource; parseRequestResponse( From b2f4795745c9518fe97e56475262b6ff851cbd83 Mon Sep 17 00:00:00 2001 From: nick-delirium Date: Wed, 22 Feb 2023 17:18:17 +0100 Subject: [PATCH 120/151] fix(ui): fix form update --- frontend/app/mstore/alertsStore.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/frontend/app/mstore/alertsStore.ts b/frontend/app/mstore/alertsStore.ts index a2d155ffc..245be0bcf 100644 --- a/frontend/app/mstore/alertsStore.ts +++ b/frontend/app/mstore/alertsStore.ts @@ -79,10 +79,10 @@ export default class AlertsStore { edit = (diff: Partial) => { const key = Object.keys(diff)[0] - const oldInst = { ...this.instance } + const oldInst = this.instance // @ts-ignore oldInst[key] = diff[key] - this.instance = new Alert(oldInst, !!oldInst.alertId) + this.instance = oldInst } } From 6e16aacb56bccf0fed30dc79aad32e0cab4faa1e Mon Sep 17 00:00:00 2001 From: nick-delirium Date: Wed, 22 Feb 2023 17:54:03 +0100 Subject: [PATCH 121/151] fix(ui): fix alert unit change --- .../components/Alerts/AlertForm/Condition.tsx | 4 +- .../components/Alerts/AlertListItem.tsx | 3 +- .../Dashboard/components/Alerts/NewAlert.tsx | 5 ++ frontend/app/mstore/alertsStore.ts | 76 ++++++++++--------- 4 files changed, 50 insertions(+), 38 deletions(-) diff --git a/frontend/app/components/Dashboard/components/Alerts/AlertForm/Condition.tsx b/frontend/app/components/Dashboard/components/Alerts/AlertForm/Condition.tsx index ba6956323..80a900895 100644 --- a/frontend/app/components/Dashboard/components/Alerts/AlertForm/Condition.tsx +++ b/frontend/app/components/Dashboard/components/Alerts/AlertForm/Condition.tsx @@ -26,6 +26,7 @@ interface ICondition { writeQuery: (data: any) => void; writeQueryOption: (e: any, data: any) => void; unit: any; + changeUnit: (value: string) => void; } function Condition({ @@ -36,6 +37,7 @@ function Condition({ writeQueryOption, writeQuery, unit, + changeUnit, }: ICondition) { return (
@@ -48,7 +50,7 @@ function Condition({ options={changeOptions} name="change" defaultValue={instance.change} - onChange={({ value }) => writeOption(null, { name: 'change', value })} + onChange={({ value }) => changeUnit(value)} id="change-dropdown" />
diff --git a/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx b/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx index 071dd204c..8137b7750 100644 --- a/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx +++ b/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx @@ -8,6 +8,7 @@ import { DateTime } from 'luxon'; import { withRouter, RouteComponentProps } from 'react-router-dom'; import cn from 'classnames'; import Alert from 'Types/alert'; +import { observer } from 'mobx-react-lite' const getThreshold = (threshold: number) => { if (threshold === 15) return '15 Minutes'; @@ -165,4 +166,4 @@ function AlertListItem(props: Props) { ); } -export default withRouter(AlertListItem); +export default withRouter(observer(AlertListItem)); diff --git a/frontend/app/components/Dashboard/components/Alerts/NewAlert.tsx b/frontend/app/components/Dashboard/components/Alerts/NewAlert.tsx index 67a6bb459..4d1d247b0 100644 --- a/frontend/app/components/Dashboard/components/Alerts/NewAlert.tsx +++ b/frontend/app/components/Dashboard/components/Alerts/NewAlert.tsx @@ -167,6 +167,10 @@ const NewAlert = (props: IProps) => { edit({ query: { ...query, [name]: value } }); }; + const changeUnit = (value: string) => { + alertsStore.changeUnit(value) + } + const writeQuery = ({ target: { value, name } }: React.ChangeEvent) => { const { query } = instance; edit({ query: { ...query, [name]: value } }); @@ -243,6 +247,7 @@ const NewAlert = (props: IProps) => { instance={instance} triggerOptions={triggerOptions} writeQueryOption={writeQueryOption} + changeUnit={changeUnit} writeQuery={writeQuery} unit={unit} /> diff --git a/frontend/app/mstore/alertsStore.ts b/frontend/app/mstore/alertsStore.ts index 245be0bcf..33665f861 100644 --- a/frontend/app/mstore/alertsStore.ts +++ b/frontend/app/mstore/alertsStore.ts @@ -1,14 +1,14 @@ -import { makeAutoObservable } from 'mobx' -import Alert, { IAlert } from 'Types/alert' -import { alertsService } from 'App/services' +import { makeAutoObservable, action } from 'mobx'; +import Alert, { IAlert } from 'Types/alert'; +import { alertsService } from 'App/services'; export default class AlertsStore { alerts: Alert[] = []; - triggerOptions: { label: string, value: string | number, unit?: string }[] = []; + triggerOptions: { label: string; value: string | number; unit?: string }[] = []; alertsSearch = ''; - // @ts-ignore + // @ts-ignore instance: Alert = new Alert({}, false); - loading = false + loading = false; page: number = 1; constructor() { @@ -18,71 +18,75 @@ export default class AlertsStore { changeSearch = (value: string) => { this.alertsSearch = value; this.page = 1; - } + }; // TODO: remove it updateKey(key: string, value: any) { // @ts-ignore - this[key] = value + this[key] = value; } fetchList = async () => { - this.loading = true + this.loading = true; try { const list = await alertsService.fetchList(); - this.alerts = list.map(alert => new Alert(alert, true)); + this.alerts = list.map((alert) => new Alert(alert, true)); } catch (e) { - console.error(e) + console.error(e); } finally { - this.loading = false + this.loading = false; } - } + }; save = async (inst: Alert) => { - this.loading = true + this.loading = true; try { - await alertsService.save(inst ? inst : this.instance) - this.instance.isExists = true + await alertsService.save(inst ? inst : this.instance); + this.instance.isExists = true; } catch (e) { - console.error(e) + console.error(e); } finally { - this.loading = false + this.loading = false; } - } + }; remove = async (id: string) => { - this.loading = true + this.loading = true; try { - await alertsService.remove(id) + await alertsService.remove(id); } catch (e) { - console.error(e) + console.error(e); } finally { - this.loading = false + this.loading = false; } - } + }; fetchTriggerOptions = async () => { - this.loading = true + this.loading = true; try { const options = await alertsService.fetchTriggerOptions(); - this.triggerOptions = options.map(({ name, value }) => ({ label: name, value })) + this.triggerOptions = options.map(({ name, value }) => ({ label: name, value })); } catch (e) { - console.error(e) + console.error(e); } finally { - this.loading = false + this.loading = false; } - } + }; init = (inst: Partial | Alert) => { - this.instance = inst instanceof Alert ? inst : new Alert(inst, false) - } + this.instance = inst instanceof Alert ? inst : new Alert(inst, false); + }; edit = (diff: Partial) => { - const key = Object.keys(diff)[0] - const oldInst = this.instance + const key = Object.keys(diff)[0]; + const oldInst = this.instance; // @ts-ignore - oldInst[key] = diff[key] + oldInst[key] = diff[key]; - this.instance = oldInst - } + this.instance = oldInst; + }; + + changeUnit = ({ value }: { value: string }) => { + this.instance.change = value; + }; } From 791ccaa82b8496ef8a2f805225df7ff5e7072aac Mon Sep 17 00:00:00 2001 From: Shekar Siri Date: Thu, 23 Feb 2023 11:03:47 +0100 Subject: [PATCH 122/151] fix(ui) - tooltip text --- .../Dashboard/components/DashboardOptions/DashboardOptions.tsx | 2 +- frontend/app/components/ui/ItemMenu/ItemMenu.tsx | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/frontend/app/components/Dashboard/components/DashboardOptions/DashboardOptions.tsx b/frontend/app/components/Dashboard/components/DashboardOptions/DashboardOptions.tsx index b006dbb22..63b1c3f35 100644 --- a/frontend/app/components/Dashboard/components/DashboardOptions/DashboardOptions.tsx +++ b/frontend/app/components/Dashboard/components/DashboardOptions/DashboardOptions.tsx @@ -17,7 +17,7 @@ function DashboardOptions(props: Props) { { icon: 'text-paragraph', text: `${!isTitlePresent ? 'Add' : 'Edit'} Description`, onClick: () => editHandler(false) }, { icon: 'users', text: 'Visibility & Access', onClick: editHandler }, { icon: 'trash', text: 'Delete', onClick: deleteHandler }, - { icon: 'pdf-download', text: 'Download Report', onClick: renderReport, disabled: !isEnterprise, tooltipTitle: {ENTERPRISE_REQUEIRED} } + { icon: 'pdf-download', text: 'Download Report', onClick: renderReport, disabled: !isEnterprise, tooltipTitle: ENTERPRISE_REQUEIRED } ] return ( diff --git a/frontend/app/components/ui/ItemMenu/ItemMenu.tsx b/frontend/app/components/ui/ItemMenu/ItemMenu.tsx index fcb7e6467..bd8ecee28 100644 --- a/frontend/app/components/ui/ItemMenu/ItemMenu.tsx +++ b/frontend/app/components/ui/ItemMenu/ItemMenu.tsx @@ -68,7 +68,7 @@ export default class ItemMenu extends React.PureComponent { {items .filter(({ hidden }) => !hidden) .map(({ onClick, text, icon, disabled = false, tooltipTitle = '' }) => ( - +
{}} From 69c2f3f291ce38530ca7a2eb7017a407993321dd Mon Sep 17 00:00:00 2001 From: nick-delirium Date: Thu, 23 Feb 2023 11:55:56 +0100 Subject: [PATCH 123/151] change(tracker): 5.0.0 release --- tracker/tracker/CHANGELOG.md | 3 ++- tracker/tracker/package.json | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/tracker/tracker/CHANGELOG.md b/tracker/tracker/CHANGELOG.md index 9d12152f6..559e4e865 100644 --- a/tracker/tracker/CHANGELOG.md +++ b/tracker/tracker/CHANGELOG.md @@ -1,10 +1,11 @@ -## 4.1.10 +## 5.0.0 - Added "tel" to supported input types - Added `{ withCurrentTime: true }` to `tracker.getSessionURL` method which will return sessionURL with current session's timestamp - Added Network module that captures fetch/xhr by default (with no plugin required) - Use `timeOrigin()` instead of `performance.timing.navigationStart` in ResourceTiming messages - Added app restart when service worker died after inactivity (mobile safari) +- **[breaking]** string dictionary to reduce session size ## 4.1.8 diff --git a/tracker/tracker/package.json b/tracker/tracker/package.json index 5682ffc1c..c45c15e4a 100644 --- a/tracker/tracker/package.json +++ b/tracker/tracker/package.json @@ -1,7 +1,7 @@ { "name": "@openreplay/tracker", "description": "The OpenReplay tracker main package", - "version": "4.1.10", + "version": "5.0.0", "keywords": [ "logging", "replay" From 9f19800fa1792c3d09060cd28a768261a8cbf50b Mon Sep 17 00:00:00 2001 From: nick-delirium Date: Thu, 23 Feb 2023 11:58:16 +0100 Subject: [PATCH 124/151] change(tracker): assist 5.0.0 --- tracker/tracker-assist/CHANGELOG.md | 2 +- tracker/tracker-assist/package.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tracker/tracker-assist/CHANGELOG.md b/tracker/tracker-assist/CHANGELOG.md index 7ce338e9b..f61aad123 100644 --- a/tracker/tracker-assist/CHANGELOG.md +++ b/tracker/tracker-assist/CHANGELOG.md @@ -1,4 +1,4 @@ -## 4.1.6 +## 5.0.0 - fix recording state import diff --git a/tracker/tracker-assist/package.json b/tracker/tracker-assist/package.json index 8b9dcd8ef..aaa80429d 100644 --- a/tracker/tracker-assist/package.json +++ b/tracker/tracker-assist/package.json @@ -1,7 +1,7 @@ { "name": "@openreplay/tracker-assist", "description": "Tracker plugin for screen assistance through the WebRTC", - "version": "4.1.6", + "version": "5.0.0", "keywords": [ "WebRTC", "assistance", From fc4c7704da592f39a2caf6e8b3f39afc74c1d5d7 Mon Sep 17 00:00:00 2001 From: Mehdi Osman Date: Thu, 23 Feb 2023 08:18:31 -0500 Subject: [PATCH 125/151] Updated tracker minVersion --- frontend/.env.sample | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/.env.sample b/frontend/.env.sample index 88f79bdb6..4b6cface2 100644 --- a/frontend/.env.sample +++ b/frontend/.env.sample @@ -23,4 +23,4 @@ MINIO_SECRET_KEY = '' # APP and TRACKER VERSIONS VERSION = '1.10.0' -TRACKER_VERSION = '4.1.10' +TRACKER_VERSION = '5.0.0' From a7062ad00baae7da42aa8665e69082b949f51657 Mon Sep 17 00:00:00 2001 From: Mehdi Osman Date: Thu, 23 Feb 2023 08:18:54 -0500 Subject: [PATCH 126/151] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 363c64d1c..05608a3c1 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ OpenReplay is a session replay suite you can host yourself, that lets you see what users do on your web app, helping you troubleshoot issues faster. It's the only open-source alternative to products such as FullStory and LogRocket. - **Session replay.** OpenReplay replays what users do, but not only. It also shows you what went under the hood, how your website or app behaves by capturing network activity, console logs, JS errors, store actions/state, page speed metrics, cpu/memory usage and much more. -- **Low footprint**. With a ~18KB (.gz) tracker that asynchronously sends minimal data for a very limited impact on performance. +- **Low footprint**. With a ~19KB (.gz) tracker that asynchronously sends minimal data for a very limited impact on performance. - **Self-hosted**. No more security compliance checks, 3rd-parties processing user data. Everything OpenReplay captures stays in your cloud for a complete control over your data. - **Privacy controls**. Fine-grained security features for sanitizing user data. - **Easy deploy**. With support of major public cloud providers (AWS, GCP, Azure, DigitalOcean). From 15eb5d53a12156c57fd9ce5daabb6f24fae3b6a8 Mon Sep 17 00:00:00 2001 From: Alexander Zavorotynskiy Date: Thu, 23 Feb 2023 14:20:50 +0100 Subject: [PATCH 127/151] feat(backend): upgraded /x/text library --- backend/go.mod | 2 +- backend/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/backend/go.mod b/backend/go.mod index 161513ed8..9633f2b18 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -62,7 +62,7 @@ require ( golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 // indirect golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 // indirect golang.org/x/sys v0.1.0 // indirect - golang.org/x/text v0.4.0 // indirect + golang.org/x/text v0.7.0 // indirect golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd // indirect diff --git a/backend/go.sum b/backend/go.sum index de6d507d3..676cf479b 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -715,8 +715,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From 3bfa7573e91b1495e6657857079268cd3713f339 Mon Sep 17 00:00:00 2001 From: rjshrjndrn Date: Thu, 23 Feb 2023 16:01:47 +0100 Subject: [PATCH 128/151] fix(helm): fix chalice pg hardcoded port Signed-off-by: rjshrjndrn --- .../openreplay/charts/chalice/templates/deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/helmcharts/openreplay/charts/chalice/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/chalice/templates/deployment.yaml index 586b43293..29d311a25 100644 --- a/scripts/helmcharts/openreplay/charts/chalice/templates/deployment.yaml +++ b/scripts/helmcharts/openreplay/charts/chalice/templates/deployment.yaml @@ -66,7 +66,7 @@ spec: - name: pg_host value: '{{ .Values.global.postgresql.postgresqlHost }}' - name: pg_port - value: "5432" + value: '{{ .Values.global.postgresql.postgresqlPort }}' - name: pg_dbname value: "{{ .Values.global.postgresql.postgresqlDatabase }}" - name: pg_user From 87d842ba43439930731a5f692211e88529f5333d Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Thu, 23 Feb 2023 17:08:11 +0100 Subject: [PATCH 129/151] feat(chalice): cleaned code --- api/chalicelib/core/users.py | 8 -------- ee/api/chalicelib/core/users.py | 13 ------------- 2 files changed, 21 deletions(-) diff --git a/api/chalicelib/core/users.py b/api/chalicelib/core/users.py index e5ae6e72b..c4933f92c 100644 --- a/api/chalicelib/core/users.py +++ b/api/chalicelib/core/users.py @@ -514,14 +514,6 @@ def set_password_invitation(user_id, new_password): } -def count_members(): - with pg_client.PostgresClient() as cur: - cur.execute("""SELECT COUNT(user_id) - FROM public.users WHERE deleted_at IS NULL;""") - r = cur.fetchone() - return r["count"] - - def email_exists(email): with pg_client.PostgresClient() as cur: cur.execute( diff --git a/ee/api/chalicelib/core/users.py b/ee/api/chalicelib/core/users.py index d2b13535a..ff357113f 100644 --- a/ee/api/chalicelib/core/users.py +++ b/ee/api/chalicelib/core/users.py @@ -591,19 +591,6 @@ def set_password_invitation(tenant_id, user_id, new_password): } -def count_members(tenant_id): - with pg_client.PostgresClient() as cur: - cur.execute( - cur.mogrify( - """SELECT - COUNT(user_id) - FROM public.users WHERE tenant_id = %(tenant_id)s AND deleted_at IS NULL;""", - {"tenant_id": tenant_id}) - ) - r = cur.fetchone() - return r["count"] - - def email_exists(email): with pg_client.PostgresClient() as cur: cur.execute( From 4e8cb33727f7e9eace9fdffb3f3e0f15e66c0fa5 Mon Sep 17 00:00:00 2001 From: nick-delirium Date: Wed, 22 Feb 2023 17:18:17 +0100 Subject: [PATCH 130/151] fix(ui): fix form update --- frontend/app/mstore/alertsStore.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/frontend/app/mstore/alertsStore.ts b/frontend/app/mstore/alertsStore.ts index a2d155ffc..245be0bcf 100644 --- a/frontend/app/mstore/alertsStore.ts +++ b/frontend/app/mstore/alertsStore.ts @@ -79,10 +79,10 @@ export default class AlertsStore { edit = (diff: Partial) => { const key = Object.keys(diff)[0] - const oldInst = { ...this.instance } + const oldInst = this.instance // @ts-ignore oldInst[key] = diff[key] - this.instance = new Alert(oldInst, !!oldInst.alertId) + this.instance = oldInst } } From 5693896a99c8820ce1a8968aefff6bc949e50ed8 Mon Sep 17 00:00:00 2001 From: nick-delirium Date: Wed, 22 Feb 2023 17:54:03 +0100 Subject: [PATCH 131/151] fix(ui): fix alert unit change --- .../components/Alerts/AlertForm/Condition.tsx | 4 +- .../components/Alerts/AlertListItem.tsx | 3 +- .../Dashboard/components/Alerts/NewAlert.tsx | 5 ++ frontend/app/mstore/alertsStore.ts | 76 ++++++++++--------- 4 files changed, 50 insertions(+), 38 deletions(-) diff --git a/frontend/app/components/Dashboard/components/Alerts/AlertForm/Condition.tsx b/frontend/app/components/Dashboard/components/Alerts/AlertForm/Condition.tsx index ba6956323..80a900895 100644 --- a/frontend/app/components/Dashboard/components/Alerts/AlertForm/Condition.tsx +++ b/frontend/app/components/Dashboard/components/Alerts/AlertForm/Condition.tsx @@ -26,6 +26,7 @@ interface ICondition { writeQuery: (data: any) => void; writeQueryOption: (e: any, data: any) => void; unit: any; + changeUnit: (value: string) => void; } function Condition({ @@ -36,6 +37,7 @@ function Condition({ writeQueryOption, writeQuery, unit, + changeUnit, }: ICondition) { return (
@@ -48,7 +50,7 @@ function Condition({ options={changeOptions} name="change" defaultValue={instance.change} - onChange={({ value }) => writeOption(null, { name: 'change', value })} + onChange={({ value }) => changeUnit(value)} id="change-dropdown" />
diff --git a/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx b/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx index 071dd204c..8137b7750 100644 --- a/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx +++ b/frontend/app/components/Dashboard/components/Alerts/AlertListItem.tsx @@ -8,6 +8,7 @@ import { DateTime } from 'luxon'; import { withRouter, RouteComponentProps } from 'react-router-dom'; import cn from 'classnames'; import Alert from 'Types/alert'; +import { observer } from 'mobx-react-lite' const getThreshold = (threshold: number) => { if (threshold === 15) return '15 Minutes'; @@ -165,4 +166,4 @@ function AlertListItem(props: Props) { ); } -export default withRouter(AlertListItem); +export default withRouter(observer(AlertListItem)); diff --git a/frontend/app/components/Dashboard/components/Alerts/NewAlert.tsx b/frontend/app/components/Dashboard/components/Alerts/NewAlert.tsx index 67a6bb459..4d1d247b0 100644 --- a/frontend/app/components/Dashboard/components/Alerts/NewAlert.tsx +++ b/frontend/app/components/Dashboard/components/Alerts/NewAlert.tsx @@ -167,6 +167,10 @@ const NewAlert = (props: IProps) => { edit({ query: { ...query, [name]: value } }); }; + const changeUnit = (value: string) => { + alertsStore.changeUnit(value) + } + const writeQuery = ({ target: { value, name } }: React.ChangeEvent) => { const { query } = instance; edit({ query: { ...query, [name]: value } }); @@ -243,6 +247,7 @@ const NewAlert = (props: IProps) => { instance={instance} triggerOptions={triggerOptions} writeQueryOption={writeQueryOption} + changeUnit={changeUnit} writeQuery={writeQuery} unit={unit} /> diff --git a/frontend/app/mstore/alertsStore.ts b/frontend/app/mstore/alertsStore.ts index 245be0bcf..33665f861 100644 --- a/frontend/app/mstore/alertsStore.ts +++ b/frontend/app/mstore/alertsStore.ts @@ -1,14 +1,14 @@ -import { makeAutoObservable } from 'mobx' -import Alert, { IAlert } from 'Types/alert' -import { alertsService } from 'App/services' +import { makeAutoObservable, action } from 'mobx'; +import Alert, { IAlert } from 'Types/alert'; +import { alertsService } from 'App/services'; export default class AlertsStore { alerts: Alert[] = []; - triggerOptions: { label: string, value: string | number, unit?: string }[] = []; + triggerOptions: { label: string; value: string | number; unit?: string }[] = []; alertsSearch = ''; - // @ts-ignore + // @ts-ignore instance: Alert = new Alert({}, false); - loading = false + loading = false; page: number = 1; constructor() { @@ -18,71 +18,75 @@ export default class AlertsStore { changeSearch = (value: string) => { this.alertsSearch = value; this.page = 1; - } + }; // TODO: remove it updateKey(key: string, value: any) { // @ts-ignore - this[key] = value + this[key] = value; } fetchList = async () => { - this.loading = true + this.loading = true; try { const list = await alertsService.fetchList(); - this.alerts = list.map(alert => new Alert(alert, true)); + this.alerts = list.map((alert) => new Alert(alert, true)); } catch (e) { - console.error(e) + console.error(e); } finally { - this.loading = false + this.loading = false; } - } + }; save = async (inst: Alert) => { - this.loading = true + this.loading = true; try { - await alertsService.save(inst ? inst : this.instance) - this.instance.isExists = true + await alertsService.save(inst ? inst : this.instance); + this.instance.isExists = true; } catch (e) { - console.error(e) + console.error(e); } finally { - this.loading = false + this.loading = false; } - } + }; remove = async (id: string) => { - this.loading = true + this.loading = true; try { - await alertsService.remove(id) + await alertsService.remove(id); } catch (e) { - console.error(e) + console.error(e); } finally { - this.loading = false + this.loading = false; } - } + }; fetchTriggerOptions = async () => { - this.loading = true + this.loading = true; try { const options = await alertsService.fetchTriggerOptions(); - this.triggerOptions = options.map(({ name, value }) => ({ label: name, value })) + this.triggerOptions = options.map(({ name, value }) => ({ label: name, value })); } catch (e) { - console.error(e) + console.error(e); } finally { - this.loading = false + this.loading = false; } - } + }; init = (inst: Partial | Alert) => { - this.instance = inst instanceof Alert ? inst : new Alert(inst, false) - } + this.instance = inst instanceof Alert ? inst : new Alert(inst, false); + }; edit = (diff: Partial) => { - const key = Object.keys(diff)[0] - const oldInst = this.instance + const key = Object.keys(diff)[0]; + const oldInst = this.instance; // @ts-ignore - oldInst[key] = diff[key] + oldInst[key] = diff[key]; - this.instance = oldInst - } + this.instance = oldInst; + }; + + changeUnit = ({ value }: { value: string }) => { + this.instance.change = value; + }; } From 728917f0d6c2bb590c5afab694046778e853dfa6 Mon Sep 17 00:00:00 2001 From: Shekar Siri Date: Thu, 23 Feb 2023 11:03:47 +0100 Subject: [PATCH 132/151] fix(ui) - tooltip text --- .../Dashboard/components/DashboardOptions/DashboardOptions.tsx | 2 +- frontend/app/components/ui/ItemMenu/ItemMenu.tsx | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/frontend/app/components/Dashboard/components/DashboardOptions/DashboardOptions.tsx b/frontend/app/components/Dashboard/components/DashboardOptions/DashboardOptions.tsx index b006dbb22..63b1c3f35 100644 --- a/frontend/app/components/Dashboard/components/DashboardOptions/DashboardOptions.tsx +++ b/frontend/app/components/Dashboard/components/DashboardOptions/DashboardOptions.tsx @@ -17,7 +17,7 @@ function DashboardOptions(props: Props) { { icon: 'text-paragraph', text: `${!isTitlePresent ? 'Add' : 'Edit'} Description`, onClick: () => editHandler(false) }, { icon: 'users', text: 'Visibility & Access', onClick: editHandler }, { icon: 'trash', text: 'Delete', onClick: deleteHandler }, - { icon: 'pdf-download', text: 'Download Report', onClick: renderReport, disabled: !isEnterprise, tooltipTitle: {ENTERPRISE_REQUEIRED} } + { icon: 'pdf-download', text: 'Download Report', onClick: renderReport, disabled: !isEnterprise, tooltipTitle: ENTERPRISE_REQUEIRED } ] return ( diff --git a/frontend/app/components/ui/ItemMenu/ItemMenu.tsx b/frontend/app/components/ui/ItemMenu/ItemMenu.tsx index fcb7e6467..bd8ecee28 100644 --- a/frontend/app/components/ui/ItemMenu/ItemMenu.tsx +++ b/frontend/app/components/ui/ItemMenu/ItemMenu.tsx @@ -68,7 +68,7 @@ export default class ItemMenu extends React.PureComponent { {items .filter(({ hidden }) => !hidden) .map(({ onClick, text, icon, disabled = false, tooltipTitle = '' }) => ( - +
{}} From db482a8ddd462a66efebce714f0614182e5fabee Mon Sep 17 00:00:00 2001 From: nick-delirium Date: Thu, 23 Feb 2023 11:55:56 +0100 Subject: [PATCH 133/151] change(tracker): 5.0.0 release --- tracker/tracker/CHANGELOG.md | 3 ++- tracker/tracker/package.json | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/tracker/tracker/CHANGELOG.md b/tracker/tracker/CHANGELOG.md index 9d12152f6..559e4e865 100644 --- a/tracker/tracker/CHANGELOG.md +++ b/tracker/tracker/CHANGELOG.md @@ -1,10 +1,11 @@ -## 4.1.10 +## 5.0.0 - Added "tel" to supported input types - Added `{ withCurrentTime: true }` to `tracker.getSessionURL` method which will return sessionURL with current session's timestamp - Added Network module that captures fetch/xhr by default (with no plugin required) - Use `timeOrigin()` instead of `performance.timing.navigationStart` in ResourceTiming messages - Added app restart when service worker died after inactivity (mobile safari) +- **[breaking]** string dictionary to reduce session size ## 4.1.8 diff --git a/tracker/tracker/package.json b/tracker/tracker/package.json index 5682ffc1c..c45c15e4a 100644 --- a/tracker/tracker/package.json +++ b/tracker/tracker/package.json @@ -1,7 +1,7 @@ { "name": "@openreplay/tracker", "description": "The OpenReplay tracker main package", - "version": "4.1.10", + "version": "5.0.0", "keywords": [ "logging", "replay" From f3e0293d7775467108f6e3d401720eee8254153f Mon Sep 17 00:00:00 2001 From: nick-delirium Date: Thu, 23 Feb 2023 11:58:16 +0100 Subject: [PATCH 134/151] change(tracker): assist 5.0.0 --- tracker/tracker-assist/CHANGELOG.md | 2 +- tracker/tracker-assist/package.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tracker/tracker-assist/CHANGELOG.md b/tracker/tracker-assist/CHANGELOG.md index 7ce338e9b..f61aad123 100644 --- a/tracker/tracker-assist/CHANGELOG.md +++ b/tracker/tracker-assist/CHANGELOG.md @@ -1,4 +1,4 @@ -## 4.1.6 +## 5.0.0 - fix recording state import diff --git a/tracker/tracker-assist/package.json b/tracker/tracker-assist/package.json index 8b9dcd8ef..aaa80429d 100644 --- a/tracker/tracker-assist/package.json +++ b/tracker/tracker-assist/package.json @@ -1,7 +1,7 @@ { "name": "@openreplay/tracker-assist", "description": "Tracker plugin for screen assistance through the WebRTC", - "version": "4.1.6", + "version": "5.0.0", "keywords": [ "WebRTC", "assistance", From dbc644826986c2df95ef1939bba29d2efc8e8ff9 Mon Sep 17 00:00:00 2001 From: Mehdi Osman Date: Thu, 23 Feb 2023 08:18:31 -0500 Subject: [PATCH 135/151] Updated tracker minVersion --- frontend/.env.sample | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/.env.sample b/frontend/.env.sample index 88f79bdb6..4b6cface2 100644 --- a/frontend/.env.sample +++ b/frontend/.env.sample @@ -23,4 +23,4 @@ MINIO_SECRET_KEY = '' # APP and TRACKER VERSIONS VERSION = '1.10.0' -TRACKER_VERSION = '4.1.10' +TRACKER_VERSION = '5.0.0' From dc3a5bd875ee0f2f7344b5d3aa34c2be00dbb865 Mon Sep 17 00:00:00 2001 From: Mehdi Osman Date: Thu, 23 Feb 2023 08:18:54 -0500 Subject: [PATCH 136/151] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 363c64d1c..05608a3c1 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ OpenReplay is a session replay suite you can host yourself, that lets you see what users do on your web app, helping you troubleshoot issues faster. It's the only open-source alternative to products such as FullStory and LogRocket. - **Session replay.** OpenReplay replays what users do, but not only. It also shows you what went under the hood, how your website or app behaves by capturing network activity, console logs, JS errors, store actions/state, page speed metrics, cpu/memory usage and much more. -- **Low footprint**. With a ~18KB (.gz) tracker that asynchronously sends minimal data for a very limited impact on performance. +- **Low footprint**. With a ~19KB (.gz) tracker that asynchronously sends minimal data for a very limited impact on performance. - **Self-hosted**. No more security compliance checks, 3rd-parties processing user data. Everything OpenReplay captures stays in your cloud for a complete control over your data. - **Privacy controls**. Fine-grained security features for sanitizing user data. - **Easy deploy**. With support of major public cloud providers (AWS, GCP, Azure, DigitalOcean). From 6af59e64481421e1b47b1dc960f9140ac6784745 Mon Sep 17 00:00:00 2001 From: Alexander Zavorotynskiy Date: Thu, 23 Feb 2023 14:20:50 +0100 Subject: [PATCH 137/151] feat(backend): upgraded /x/text library --- backend/go.mod | 2 +- backend/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/backend/go.mod b/backend/go.mod index 161513ed8..9633f2b18 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -62,7 +62,7 @@ require ( golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 // indirect golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 // indirect golang.org/x/sys v0.1.0 // indirect - golang.org/x/text v0.4.0 // indirect + golang.org/x/text v0.7.0 // indirect golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd // indirect diff --git a/backend/go.sum b/backend/go.sum index de6d507d3..676cf479b 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -715,8 +715,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From c9789ed99ab6312d1f0c29e8ed67db7db1f37790 Mon Sep 17 00:00:00 2001 From: rjshrjndrn Date: Thu, 23 Feb 2023 16:01:47 +0100 Subject: [PATCH 138/151] fix(helm): fix chalice pg hardcoded port Signed-off-by: rjshrjndrn --- .../openreplay/charts/chalice/templates/deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/helmcharts/openreplay/charts/chalice/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/chalice/templates/deployment.yaml index 586b43293..29d311a25 100644 --- a/scripts/helmcharts/openreplay/charts/chalice/templates/deployment.yaml +++ b/scripts/helmcharts/openreplay/charts/chalice/templates/deployment.yaml @@ -66,7 +66,7 @@ spec: - name: pg_host value: '{{ .Values.global.postgresql.postgresqlHost }}' - name: pg_port - value: "5432" + value: '{{ .Values.global.postgresql.postgresqlPort }}' - name: pg_dbname value: "{{ .Values.global.postgresql.postgresqlDatabase }}" - name: pg_user From 3f42f7b9e782113f80da393559a32d1adcdb0021 Mon Sep 17 00:00:00 2001 From: rjshrjndrn Date: Thu, 23 Feb 2023 18:08:09 +0100 Subject: [PATCH 139/151] chore(helm): Adding support for global env variables --- .../charts/alerts/templates/deployment.yaml | 4 +++ .../charts/assets/templates/deployment.yaml | 4 +++ .../charts/assist/templates/deployment.yaml | 4 +++ .../charts/chalice/templates/deployment.yaml | 4 +++ .../charts/db/templates/deployment.yaml | 4 +++ .../charts/ender/templates/deployment.yaml | 4 +++ .../charts/frontend/templates/deployment.yaml | 4 +++ .../heuristics/templates/deployment.yaml | 4 +++ .../charts/http/templates/deployment.yaml | 4 +++ .../integrations/templates/deployment.yaml | 4 +++ .../charts/peers/templates/deployment.yaml | 4 +++ .../charts/quickwit/templates/deployment.yaml | 4 +++ .../charts/sink/templates/deployment.yaml | 4 +++ .../sourcemapreader/templates/deployment.yaml | 4 +++ .../charts/storage/templates/deployment.yaml | 4 +++ .../helmcharts/openreplay/templates/job.yaml | 28 +++++++++++++++++++ scripts/helmcharts/openreplay/values.yaml | 2 ++ 17 files changed, 90 insertions(+) diff --git a/scripts/helmcharts/openreplay/charts/alerts/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/alerts/templates/deployment.yaml index 4afed4367..d4c1d6e49 100644 --- a/scripts/helmcharts/openreplay/charts/alerts/templates/deployment.yaml +++ b/scripts/helmcharts/openreplay/charts/alerts/templates/deployment.yaml @@ -116,6 +116,10 @@ spec: value: '{{ .Values.global.email.emailSslCert }}' - name: EMAIL_FROM value: '{{ .Values.global.email.emailFrom }}' + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} {{- range $key, $val := .Values.env }} - name: {{ $key }} value: '{{ $val }}' diff --git a/scripts/helmcharts/openreplay/charts/assets/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/assets/templates/deployment.yaml index f66479475..f959adc13 100644 --- a/scripts/helmcharts/openreplay/charts/assets/templates/deployment.yaml +++ b/scripts/helmcharts/openreplay/charts/assets/templates/deployment.yaml @@ -94,6 +94,10 @@ spec: value: '{{ .Values.global.s3.endpoint }}/{{.Values.global.s3.assetsBucket}}' {{- end }} {{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }} + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} {{- range $key, $val := .Values.env }} - name: {{ $key }} value: '{{ $val }}' diff --git a/scripts/helmcharts/openreplay/charts/assist/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/assist/templates/deployment.yaml index e153e50c3..92ae9a93c 100644 --- a/scripts/helmcharts/openreplay/charts/assist/templates/deployment.yaml +++ b/scripts/helmcharts/openreplay/charts/assist/templates/deployment.yaml @@ -75,6 +75,10 @@ spec: {{- end }} - name: REDIS_URL value: {{ .Values.global.redis.redisHost }} + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} {{- range $key, $val := .Values.env }} - name: {{ $key }} value: '{{ $val }}' diff --git a/scripts/helmcharts/openreplay/charts/chalice/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/chalice/templates/deployment.yaml index 29d311a25..a15553a8a 100644 --- a/scripts/helmcharts/openreplay/charts/chalice/templates/deployment.yaml +++ b/scripts/helmcharts/openreplay/charts/chalice/templates/deployment.yaml @@ -134,6 +134,10 @@ spec: value: '{{ .Values.global.email.emailSslCert }}' - name: EMAIL_FROM value: '{{ .Values.global.email.emailFrom }}' + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} {{- range $key, $val := .Values.env }} - name: {{ $key }} value: '{{ $val }}' diff --git a/scripts/helmcharts/openreplay/charts/db/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/db/templates/deployment.yaml index 63182fbac..90e971c8d 100644 --- a/scripts/helmcharts/openreplay/charts/db/templates/deployment.yaml +++ b/scripts/helmcharts/openreplay/charts/db/templates/deployment.yaml @@ -69,6 +69,10 @@ spec: - name: POSTGRES_STRING value: 'postgres://{{ .Values.global.postgresql.postgresqlUser }}:$(pg_password)@{{ .Values.global.postgresql.postgresqlHost }}:{{ .Values.global.postgresql.postgresqlPort }}/{{ .Values.global.postgresql.postgresqlDatabase }}' {{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }} + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} {{- range $key, $val := .Values.env }} - name: {{ $key }} value: '{{ $val }}' diff --git a/scripts/helmcharts/openreplay/charts/ender/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/ender/templates/deployment.yaml index e5b0a946b..fec4a808d 100644 --- a/scripts/helmcharts/openreplay/charts/ender/templates/deployment.yaml +++ b/scripts/helmcharts/openreplay/charts/ender/templates/deployment.yaml @@ -61,6 +61,10 @@ spec: - name: POSTGRES_STRING value: 'postgres://{{ .Values.global.postgresql.postgresqlUser }}:$(pg_password)@{{ .Values.global.postgresql.postgresqlHost }}:{{ .Values.global.postgresql.postgresqlPort }}/{{ .Values.global.postgresql.postgresqlDatabase }}' {{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }} + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} {{- range $key, $val := .Values.env }} - name: {{ $key }} value: '{{ $val }}' diff --git a/scripts/helmcharts/openreplay/charts/frontend/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/frontend/templates/deployment.yaml index e5eb29441..f685b76bc 100644 --- a/scripts/helmcharts/openreplay/charts/frontend/templates/deployment.yaml +++ b/scripts/helmcharts/openreplay/charts/frontend/templates/deployment.yaml @@ -101,6 +101,10 @@ spec: value: '{{ .Values.global.s3.endpoint }}/{{.Values.global.s3.assetsBucket}}' {{- end }} {{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }} + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} {{- range $key, $val := .Values.env }} - name: {{ $key }} value: '{{ $val }}' diff --git a/scripts/helmcharts/openreplay/charts/heuristics/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/heuristics/templates/deployment.yaml index 6d88fec7a..f545ff77f 100644 --- a/scripts/helmcharts/openreplay/charts/heuristics/templates/deployment.yaml +++ b/scripts/helmcharts/openreplay/charts/heuristics/templates/deployment.yaml @@ -50,6 +50,10 @@ spec: - name: KAFKA_USE_SSL value: '{{ .Values.global.kafka.kafkaUseSsl }}' {{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }} + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} {{- range $key, $val := .Values.env }} - name: {{ $key }} value: '{{ $val }}' diff --git a/scripts/helmcharts/openreplay/charts/http/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/http/templates/deployment.yaml index 9f7d407bb..1add28054 100644 --- a/scripts/helmcharts/openreplay/charts/http/templates/deployment.yaml +++ b/scripts/helmcharts/openreplay/charts/http/templates/deployment.yaml @@ -101,6 +101,10 @@ spec: value: '{{ .Values.global.s3.endpoint }}/{{.Values.global.s3.assetsBucket}}' {{- end }} {{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }} + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} {{- range $key, $val := .Values.env }} - name: {{ $key }} value: '{{ $val }}' diff --git a/scripts/helmcharts/openreplay/charts/integrations/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/integrations/templates/deployment.yaml index 0f9ead73c..522316d81 100644 --- a/scripts/helmcharts/openreplay/charts/integrations/templates/deployment.yaml +++ b/scripts/helmcharts/openreplay/charts/integrations/templates/deployment.yaml @@ -61,6 +61,10 @@ spec: - name: POSTGRES_STRING value: 'postgres://{{ .Values.global.postgresql.postgresqlUser }}:$(pg_password)@{{ .Values.global.postgresql.postgresqlHost }}:{{ .Values.global.postgresql.postgresqlPort }}/{{ .Values.global.postgresql.postgresqlDatabase }}' {{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }} + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} {{- range $key, $val := .Values.env }} - name: {{ $key }} value: '{{ $val }}' diff --git a/scripts/helmcharts/openreplay/charts/peers/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/peers/templates/deployment.yaml index 2cbd395d9..98c290708 100644 --- a/scripts/helmcharts/openreplay/charts/peers/templates/deployment.yaml +++ b/scripts/helmcharts/openreplay/charts/peers/templates/deployment.yaml @@ -54,6 +54,10 @@ spec: {{- else }} value: {{ .Values.global.s3.accessKey }} {{- end }} + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} {{- range $key, $val := .Values.env }} - name: {{ $key }} value: '{{ $val }}' diff --git a/scripts/helmcharts/openreplay/charts/quickwit/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/quickwit/templates/deployment.yaml index 3ac58c215..34c9ddd73 100644 --- a/scripts/helmcharts/openreplay/charts/quickwit/templates/deployment.yaml +++ b/scripts/helmcharts/openreplay/charts/quickwit/templates/deployment.yaml @@ -57,6 +57,10 @@ spec: value: {{ .Values.global.s3.secretKey }} - name: QW_DATA_DIR value: /opt/openreplay/ + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} ports: {{- range $key, $val := .Values.service.ports }} - name: {{ $key }} diff --git a/scripts/helmcharts/openreplay/charts/sink/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/sink/templates/deployment.yaml index 7381541a1..88bd89c1f 100644 --- a/scripts/helmcharts/openreplay/charts/sink/templates/deployment.yaml +++ b/scripts/helmcharts/openreplay/charts/sink/templates/deployment.yaml @@ -70,6 +70,10 @@ spec: value: '{{ .Values.global.s3.endpoint }}/{{.Values.global.s3.assetsBucket}}' {{- end }} {{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }} + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} {{- range $key, $val := .Values.env }} - name: {{ $key }} value: '{{ $val }}' diff --git a/scripts/helmcharts/openreplay/charts/sourcemapreader/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/sourcemapreader/templates/deployment.yaml index 7abca821c..1d8041c5b 100644 --- a/scripts/helmcharts/openreplay/charts/sourcemapreader/templates/deployment.yaml +++ b/scripts/helmcharts/openreplay/charts/sourcemapreader/templates/deployment.yaml @@ -79,6 +79,10 @@ spec: # S3 compatible storage value: '{{ .Values.global.s3.endpoint }}/{{.Values.global.s3.assetsBucket}}' {{- end }} + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} {{- range $key, $val := .Values.env }} - name: {{ $key }} value: '{{ $val }}' diff --git a/scripts/helmcharts/openreplay/charts/storage/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/storage/templates/deployment.yaml index 9cb2cca22..aff40a227 100644 --- a/scripts/helmcharts/openreplay/charts/storage/templates/deployment.yaml +++ b/scripts/helmcharts/openreplay/charts/storage/templates/deployment.yaml @@ -78,6 +78,10 @@ spec: - name: KAFKA_USE_SSL value: '{{ .Values.global.kafka.kafkaUseSsl }}' {{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }} + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} {{- range $key, $val := .Values.env }} - name: {{ $key }} value: '{{ $val }}' diff --git a/scripts/helmcharts/openreplay/templates/job.yaml b/scripts/helmcharts/openreplay/templates/job.yaml index 095232a7d..3e0494d7f 100644 --- a/scripts/helmcharts/openreplay/templates/job.yaml +++ b/scripts/helmcharts/openreplay/templates/job.yaml @@ -35,6 +35,10 @@ spec: - name: git image: alpine/git env: + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} - name: ENTERPRISE_EDITION_LICENSE value: "{{ .Values.global.enterpriseEditionLicense }}" command: @@ -107,6 +111,10 @@ spec: {{- else }} value: '{{ .Values.global.postgresql.postgresqlPassword }}' {{- end}} + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} image: bitnami/postgresql:13.3.0-debian-10-r53 command: - /bin/bash @@ -122,6 +130,10 @@ spec: - name: minio image: bitnami/minio:2020.10.9-debian-10-r6 env: + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} - name: FORCE_MIGRATION value: "{{ .Values.forceMigration }}" - name: UPGRADE_FRONTENT @@ -152,6 +164,10 @@ spec: {{- if .Values.vault.enabled }} - name: vault env: + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} - name: FORCE_MIGRATION value: "{{ .Values.forceMigration }}" - name: PGHOST @@ -177,6 +193,10 @@ spec: mountPath: /opt/migrations/ - name: vault-s3-upload env: + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} - name: AWS_ACCESS_KEY_ID value: "{{ .Values.global.s3.accessKey }}" - name: AWS_SECRET_ACCESS_KEY @@ -221,6 +241,10 @@ spec: - name: clickhouse image: clickhouse/clickhouse-server:22.12-alpine env: + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} - name: FORCE_MIGRATION value: "{{ .Values.forceMigration }}" - name: PREVIOUS_APP_VERSION @@ -248,6 +272,10 @@ spec: - name: kafka image: bitnami/kafka:2.6.0-debian-10-r30 env: + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} - name: RETENTION_TIME value: "{{ .Values.global.kafka.retentionTime }}" - name: KAFKA_HOST diff --git a/scripts/helmcharts/openreplay/values.yaml b/scripts/helmcharts/openreplay/values.yaml index f601d22da..694585180 100644 --- a/scripts/helmcharts/openreplay/values.yaml +++ b/scripts/helmcharts/openreplay/values.yaml @@ -37,3 +37,5 @@ global: vault: *vault redis: *redis clusterDomain: "svc.cluster.local" + # In case you've http proxy to access internet. + env: {} From 13b4fc1c12bf510d0d8cb67953066734e4514a59 Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Fri, 24 Feb 2023 13:50:44 +0100 Subject: [PATCH 140/151] feat(DB): backup data before upgrade --- .../db/init_dbs/postgresql/1.10.0/1.10.0.sql | 145 ++++++++++++++++- .../db/init_dbs/postgresql/1.10.0/1.10.0.sql | 147 +++++++++++++++++- 2 files changed, 289 insertions(+), 3 deletions(-) diff --git a/ee/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql b/ee/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql index d31b3cec7..6461a1214 100644 --- a/ee/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql +++ b/ee/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql @@ -5,6 +5,148 @@ $$ SELECT 'v1.10.0-ee' $$ LANGUAGE sql IMMUTABLE; +-- Backup dashboard & search data: +DO +$$ + BEGIN + IF NOT (SELECT EXISTS(SELECT schema_name + FROM information_schema.schemata + WHERE schema_name = 'backup_v1_10_0')) THEN + CREATE SCHEMA backup_v1_10_0; + CREATE TABLE backup_v1_10_0.dashboards + ( + dashboard_id integer, + project_id integer, + user_id integer, + name text NOT NULL, + description text NOT NULL DEFAULT '', + is_public boolean NOT NULL DEFAULT TRUE, + is_pinned boolean NOT NULL DEFAULT FALSE, + created_at timestamp NOT NULL DEFAULT timezone('utc'::text, now()), + deleted_at timestamp NULL DEFAULT NULL + ); + CREATE TABLE backup_v1_10_0.dashboard_widgets + ( + widget_id integer, + dashboard_id integer, + metric_id integer, + user_id integer, + created_at timestamp NOT NULL DEFAULT timezone('utc'::text, now()), + config jsonb NOT NULL DEFAULT '{}'::jsonb + ); + CREATE TABLE backup_v1_10_0.searches + ( + search_id integer, + project_id integer, + user_id integer, + name text not null, + filter jsonb not null, + created_at timestamp default timezone('utc'::text, now()) not null, + deleted_at timestamp, + is_public boolean NOT NULL DEFAULT False + ); + CREATE TABLE backup_v1_10_0.metrics + ( + metric_id integer, + project_id integer, + user_id integer, + name text NOT NULL, + is_public boolean NOT NULL DEFAULT FALSE, + active boolean NOT NULL DEFAULT TRUE, + created_at timestamp NOT NULL DEFAULT timezone('utc'::text, now()), + deleted_at timestamp, + edited_at timestamp NOT NULL DEFAULT timezone('utc'::text, now()), + metric_type text NOT NULL, + view_type text NOT NULL, + metric_of text NOT NULL DEFAULT 'sessionCount', + metric_value text[] NOT NULL DEFAULT '{}'::text[], + metric_format text, + category text NULL DEFAULT 'custom', + is_pinned boolean NOT NULL DEFAULT FALSE, + is_predefined boolean NOT NULL DEFAULT FALSE, + is_template boolean NOT NULL DEFAULT FALSE, + predefined_key text NULL DEFAULT NULL, + default_config jsonb NOT NULL + ); + CREATE TABLE backup_v1_10_0.metric_series + ( + series_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + metric_id integer REFERENCES metrics (metric_id) ON DELETE CASCADE, + index integer NOT NULL, + name text NULL, + filter jsonb NOT NULL, + created_at timestamp DEFAULT timezone('utc'::text, now()) NOT NULL, + deleted_at timestamp + ); + + INSERT INTO backup_v1_10_0.dashboards(dashboard_id, project_id, user_id, name, description, is_public, + is_pinned, + created_at, deleted_at) + SELECT dashboard_id, + project_id, + user_id, + name, + description, + is_public, + is_pinned, + created_at, + deleted_at + FROM public.dashboards + ORDER BY dashboard_id; + + INSERT INTO backup_v1_10_0.metrics(metric_id, project_id, user_id, name, is_public, active, created_at, + deleted_at, edited_at, metric_type, view_type, metric_of, metric_value, + metric_format, category, is_pinned, is_predefined, is_template, + predefined_key, default_config) + SELECT metric_id, + project_id, + user_id, + name, + is_public, + active, + created_at, + deleted_at, + edited_at, + metric_type, + view_type, + metric_of, + metric_value, + metric_format, + category, + is_pinned, + is_predefined, + is_template, + predefined_key, + default_config + FROM public.metrics + ORDER BY metric_id; + + INSERT INTO backup_v1_10_0.metric_series(series_id, metric_id, index, name, filter, created_at, deleted_at) + SELECT series_id, metric_id, index, name, filter, created_at, deleted_at + FROM public.metric_series + ORDER BY series_id; + + INSERT INTO backup_v1_10_0.dashboard_widgets(widget_id, dashboard_id, metric_id, user_id, created_at, config) + SELECT widget_id, dashboard_id, metric_id, user_id, created_at, config + FROM public.dashboard_widgets + ORDER BY widget_id; + + INSERT INTO backup_v1_10_0.searches(search_id, project_id, user_id, name, filter, created_at, deleted_at, + is_public) + SELECT search_id, + project_id, + user_id, + name, + filter, + created_at, + deleted_at, + is_public + FROM public.searches + ORDER BY search_id; + END IF; + END +$$ LANGUAGE plpgsql; + CREATE TABLE IF NOT EXISTS frontend_signals ( project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, @@ -119,7 +261,8 @@ $$ and column_name = 'is_predefined') THEN -- 0. change metric_of UPDATE metrics - SET metric_of=coalesce(replace(get_global_key(metric_of), '"', ''),left(metric_of, 1) || right(replace(initcap(metric_of), '_', ''), -1)) + SET metric_of=coalesce(replace(get_global_key(metric_of), '"', ''), + left(metric_of, 1) || right(replace(initcap(metric_of), '_', ''), -1)) WHERE not is_predefined; -- 1. pre transform structure diff --git a/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql b/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql index 23ac42d37..92c0964bb 100644 --- a/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql +++ b/scripts/schema/db/init_dbs/postgresql/1.10.0/1.10.0.sql @@ -5,6 +5,148 @@ $$ SELECT 'v1.10.0' $$ LANGUAGE sql IMMUTABLE; +-- Backup dashboard & search data: +DO +$$ + BEGIN + IF NOT (SELECT EXISTS(SELECT schema_name + FROM information_schema.schemata + WHERE schema_name = 'backup_v1_10_0')) THEN + CREATE SCHEMA backup_v1_10_0; + CREATE TABLE backup_v1_10_0.dashboards + ( + dashboard_id integer, + project_id integer, + user_id integer, + name text NOT NULL, + description text NOT NULL DEFAULT '', + is_public boolean NOT NULL DEFAULT TRUE, + is_pinned boolean NOT NULL DEFAULT FALSE, + created_at timestamp NOT NULL DEFAULT timezone('utc'::text, now()), + deleted_at timestamp NULL DEFAULT NULL + ); + CREATE TABLE backup_v1_10_0.dashboard_widgets + ( + widget_id integer, + dashboard_id integer, + metric_id integer, + user_id integer, + created_at timestamp NOT NULL DEFAULT timezone('utc'::text, now()), + config jsonb NOT NULL DEFAULT '{}'::jsonb + ); + CREATE TABLE backup_v1_10_0.searches + ( + search_id integer, + project_id integer, + user_id integer, + name text not null, + filter jsonb not null, + created_at timestamp default timezone('utc'::text, now()) not null, + deleted_at timestamp, + is_public boolean NOT NULL DEFAULT False + ); + CREATE TABLE backup_v1_10_0.metrics + ( + metric_id integer, + project_id integer, + user_id integer, + name text NOT NULL, + is_public boolean NOT NULL DEFAULT FALSE, + active boolean NOT NULL DEFAULT TRUE, + created_at timestamp NOT NULL DEFAULT timezone('utc'::text, now()), + deleted_at timestamp, + edited_at timestamp NOT NULL DEFAULT timezone('utc'::text, now()), + metric_type text NOT NULL, + view_type text NOT NULL, + metric_of text NOT NULL DEFAULT 'sessionCount', + metric_value text[] NOT NULL DEFAULT '{}'::text[], + metric_format text, + category text NULL DEFAULT 'custom', + is_pinned boolean NOT NULL DEFAULT FALSE, + is_predefined boolean NOT NULL DEFAULT FALSE, + is_template boolean NOT NULL DEFAULT FALSE, + predefined_key text NULL DEFAULT NULL, + default_config jsonb NOT NULL + ); + CREATE TABLE backup_v1_10_0.metric_series + ( + series_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + metric_id integer REFERENCES metrics (metric_id) ON DELETE CASCADE, + index integer NOT NULL, + name text NULL, + filter jsonb NOT NULL, + created_at timestamp DEFAULT timezone('utc'::text, now()) NOT NULL, + deleted_at timestamp + ); + + INSERT INTO backup_v1_10_0.dashboards(dashboard_id, project_id, user_id, name, description, is_public, + is_pinned, + created_at, deleted_at) + SELECT dashboard_id, + project_id, + user_id, + name, + description, + is_public, + is_pinned, + created_at, + deleted_at + FROM public.dashboards + ORDER BY dashboard_id; + + INSERT INTO backup_v1_10_0.metrics(metric_id, project_id, user_id, name, is_public, active, created_at, + deleted_at, edited_at, metric_type, view_type, metric_of, metric_value, + metric_format, category, is_pinned, is_predefined, is_template, + predefined_key, default_config) + SELECT metric_id, + project_id, + user_id, + name, + is_public, + active, + created_at, + deleted_at, + edited_at, + metric_type, + view_type, + metric_of, + metric_value, + metric_format, + category, + is_pinned, + is_predefined, + is_template, + predefined_key, + default_config + FROM public.metrics + ORDER BY metric_id; + + INSERT INTO backup_v1_10_0.metric_series(series_id, metric_id, index, name, filter, created_at, deleted_at) + SELECT series_id, metric_id, index, name, filter, created_at, deleted_at + FROM public.metric_series + ORDER BY series_id; + + INSERT INTO backup_v1_10_0.dashboard_widgets(widget_id, dashboard_id, metric_id, user_id, created_at, config) + SELECT widget_id, dashboard_id, metric_id, user_id, created_at, config + FROM public.dashboard_widgets + ORDER BY widget_id; + + INSERT INTO backup_v1_10_0.searches(search_id, project_id, user_id, name, filter, created_at, deleted_at, + is_public) + SELECT search_id, + project_id, + user_id, + name, + filter, + created_at, + deleted_at, + is_public + FROM public.searches + ORDER BY search_id; + END IF; + END +$$ LANGUAGE plpgsql; + ALTER TYPE webhook_type ADD VALUE IF NOT EXISTS 'msteams'; UPDATE metrics @@ -93,7 +235,8 @@ $$ and column_name = 'is_predefined') THEN -- 0. change metric_of UPDATE metrics - SET metric_of=coalesce(replace(get_global_key(metric_of), '"', ''),left(metric_of, 1) || right(replace(initcap(metric_of), '_', ''), -1)) + SET metric_of=coalesce(replace(get_global_key(metric_of), '"', ''), + left(metric_of, 1) || right(replace(initcap(metric_of), '_', ''), -1)) WHERE not is_predefined; -- 1. pre transform structure @@ -482,4 +625,4 @@ COMMIT; CREATE INDEX CONCURRENTLY IF NOT EXISTS clicks_selector_idx ON events.clicks (selector); CREATE INDEX CONCURRENTLY IF NOT EXISTS clicks_path_idx ON events.clicks (path); CREATE INDEX CONCURRENTLY IF NOT EXISTS clicks_path_gin_idx ON events.clicks USING GIN (path gin_trgm_ops); -CREATE INDEX CONCURRENTLY IF NOT EXISTS issues_project_id_issue_id_idx ON public.issues (project_id, issue_id); \ No newline at end of file +CREATE INDEX CONCURRENTLY IF NOT EXISTS issues_project_id_issue_id_idx ON public.issues (project_id, issue_id); From 58bb783c0c982fd55a25c352b8b5b9f1606332c9 Mon Sep 17 00:00:00 2001 From: rjshrjndrn Date: Thu, 23 Feb 2023 18:08:09 +0100 Subject: [PATCH 141/151] chore(helm): Adding support for global env variables --- .../charts/alerts/templates/deployment.yaml | 4 +++ .../charts/assets/templates/deployment.yaml | 4 +++ .../charts/assist/templates/deployment.yaml | 4 +++ .../charts/chalice/templates/deployment.yaml | 4 +++ .../charts/db/templates/deployment.yaml | 4 +++ .../charts/ender/templates/deployment.yaml | 4 +++ .../charts/frontend/templates/deployment.yaml | 4 +++ .../heuristics/templates/deployment.yaml | 4 +++ .../charts/http/templates/deployment.yaml | 4 +++ .../integrations/templates/deployment.yaml | 4 +++ .../charts/peers/templates/deployment.yaml | 4 +++ .../charts/quickwit/templates/deployment.yaml | 4 +++ .../charts/sink/templates/deployment.yaml | 4 +++ .../sourcemapreader/templates/deployment.yaml | 4 +++ .../charts/storage/templates/deployment.yaml | 4 +++ .../helmcharts/openreplay/templates/job.yaml | 28 +++++++++++++++++++ scripts/helmcharts/openreplay/values.yaml | 2 ++ 17 files changed, 90 insertions(+) diff --git a/scripts/helmcharts/openreplay/charts/alerts/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/alerts/templates/deployment.yaml index 4afed4367..d4c1d6e49 100644 --- a/scripts/helmcharts/openreplay/charts/alerts/templates/deployment.yaml +++ b/scripts/helmcharts/openreplay/charts/alerts/templates/deployment.yaml @@ -116,6 +116,10 @@ spec: value: '{{ .Values.global.email.emailSslCert }}' - name: EMAIL_FROM value: '{{ .Values.global.email.emailFrom }}' + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} {{- range $key, $val := .Values.env }} - name: {{ $key }} value: '{{ $val }}' diff --git a/scripts/helmcharts/openreplay/charts/assets/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/assets/templates/deployment.yaml index f66479475..f959adc13 100644 --- a/scripts/helmcharts/openreplay/charts/assets/templates/deployment.yaml +++ b/scripts/helmcharts/openreplay/charts/assets/templates/deployment.yaml @@ -94,6 +94,10 @@ spec: value: '{{ .Values.global.s3.endpoint }}/{{.Values.global.s3.assetsBucket}}' {{- end }} {{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }} + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} {{- range $key, $val := .Values.env }} - name: {{ $key }} value: '{{ $val }}' diff --git a/scripts/helmcharts/openreplay/charts/assist/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/assist/templates/deployment.yaml index e153e50c3..92ae9a93c 100644 --- a/scripts/helmcharts/openreplay/charts/assist/templates/deployment.yaml +++ b/scripts/helmcharts/openreplay/charts/assist/templates/deployment.yaml @@ -75,6 +75,10 @@ spec: {{- end }} - name: REDIS_URL value: {{ .Values.global.redis.redisHost }} + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} {{- range $key, $val := .Values.env }} - name: {{ $key }} value: '{{ $val }}' diff --git a/scripts/helmcharts/openreplay/charts/chalice/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/chalice/templates/deployment.yaml index 29d311a25..a15553a8a 100644 --- a/scripts/helmcharts/openreplay/charts/chalice/templates/deployment.yaml +++ b/scripts/helmcharts/openreplay/charts/chalice/templates/deployment.yaml @@ -134,6 +134,10 @@ spec: value: '{{ .Values.global.email.emailSslCert }}' - name: EMAIL_FROM value: '{{ .Values.global.email.emailFrom }}' + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} {{- range $key, $val := .Values.env }} - name: {{ $key }} value: '{{ $val }}' diff --git a/scripts/helmcharts/openreplay/charts/db/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/db/templates/deployment.yaml index 63182fbac..90e971c8d 100644 --- a/scripts/helmcharts/openreplay/charts/db/templates/deployment.yaml +++ b/scripts/helmcharts/openreplay/charts/db/templates/deployment.yaml @@ -69,6 +69,10 @@ spec: - name: POSTGRES_STRING value: 'postgres://{{ .Values.global.postgresql.postgresqlUser }}:$(pg_password)@{{ .Values.global.postgresql.postgresqlHost }}:{{ .Values.global.postgresql.postgresqlPort }}/{{ .Values.global.postgresql.postgresqlDatabase }}' {{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }} + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} {{- range $key, $val := .Values.env }} - name: {{ $key }} value: '{{ $val }}' diff --git a/scripts/helmcharts/openreplay/charts/ender/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/ender/templates/deployment.yaml index e5b0a946b..fec4a808d 100644 --- a/scripts/helmcharts/openreplay/charts/ender/templates/deployment.yaml +++ b/scripts/helmcharts/openreplay/charts/ender/templates/deployment.yaml @@ -61,6 +61,10 @@ spec: - name: POSTGRES_STRING value: 'postgres://{{ .Values.global.postgresql.postgresqlUser }}:$(pg_password)@{{ .Values.global.postgresql.postgresqlHost }}:{{ .Values.global.postgresql.postgresqlPort }}/{{ .Values.global.postgresql.postgresqlDatabase }}' {{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }} + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} {{- range $key, $val := .Values.env }} - name: {{ $key }} value: '{{ $val }}' diff --git a/scripts/helmcharts/openreplay/charts/frontend/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/frontend/templates/deployment.yaml index e5eb29441..f685b76bc 100644 --- a/scripts/helmcharts/openreplay/charts/frontend/templates/deployment.yaml +++ b/scripts/helmcharts/openreplay/charts/frontend/templates/deployment.yaml @@ -101,6 +101,10 @@ spec: value: '{{ .Values.global.s3.endpoint }}/{{.Values.global.s3.assetsBucket}}' {{- end }} {{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }} + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} {{- range $key, $val := .Values.env }} - name: {{ $key }} value: '{{ $val }}' diff --git a/scripts/helmcharts/openreplay/charts/heuristics/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/heuristics/templates/deployment.yaml index 6d88fec7a..f545ff77f 100644 --- a/scripts/helmcharts/openreplay/charts/heuristics/templates/deployment.yaml +++ b/scripts/helmcharts/openreplay/charts/heuristics/templates/deployment.yaml @@ -50,6 +50,10 @@ spec: - name: KAFKA_USE_SSL value: '{{ .Values.global.kafka.kafkaUseSsl }}' {{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }} + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} {{- range $key, $val := .Values.env }} - name: {{ $key }} value: '{{ $val }}' diff --git a/scripts/helmcharts/openreplay/charts/http/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/http/templates/deployment.yaml index 9f7d407bb..1add28054 100644 --- a/scripts/helmcharts/openreplay/charts/http/templates/deployment.yaml +++ b/scripts/helmcharts/openreplay/charts/http/templates/deployment.yaml @@ -101,6 +101,10 @@ spec: value: '{{ .Values.global.s3.endpoint }}/{{.Values.global.s3.assetsBucket}}' {{- end }} {{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }} + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} {{- range $key, $val := .Values.env }} - name: {{ $key }} value: '{{ $val }}' diff --git a/scripts/helmcharts/openreplay/charts/integrations/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/integrations/templates/deployment.yaml index 0f9ead73c..522316d81 100644 --- a/scripts/helmcharts/openreplay/charts/integrations/templates/deployment.yaml +++ b/scripts/helmcharts/openreplay/charts/integrations/templates/deployment.yaml @@ -61,6 +61,10 @@ spec: - name: POSTGRES_STRING value: 'postgres://{{ .Values.global.postgresql.postgresqlUser }}:$(pg_password)@{{ .Values.global.postgresql.postgresqlHost }}:{{ .Values.global.postgresql.postgresqlPort }}/{{ .Values.global.postgresql.postgresqlDatabase }}' {{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }} + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} {{- range $key, $val := .Values.env }} - name: {{ $key }} value: '{{ $val }}' diff --git a/scripts/helmcharts/openreplay/charts/peers/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/peers/templates/deployment.yaml index 2cbd395d9..98c290708 100644 --- a/scripts/helmcharts/openreplay/charts/peers/templates/deployment.yaml +++ b/scripts/helmcharts/openreplay/charts/peers/templates/deployment.yaml @@ -54,6 +54,10 @@ spec: {{- else }} value: {{ .Values.global.s3.accessKey }} {{- end }} + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} {{- range $key, $val := .Values.env }} - name: {{ $key }} value: '{{ $val }}' diff --git a/scripts/helmcharts/openreplay/charts/quickwit/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/quickwit/templates/deployment.yaml index 3ac58c215..34c9ddd73 100644 --- a/scripts/helmcharts/openreplay/charts/quickwit/templates/deployment.yaml +++ b/scripts/helmcharts/openreplay/charts/quickwit/templates/deployment.yaml @@ -57,6 +57,10 @@ spec: value: {{ .Values.global.s3.secretKey }} - name: QW_DATA_DIR value: /opt/openreplay/ + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} ports: {{- range $key, $val := .Values.service.ports }} - name: {{ $key }} diff --git a/scripts/helmcharts/openreplay/charts/sink/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/sink/templates/deployment.yaml index 7381541a1..88bd89c1f 100644 --- a/scripts/helmcharts/openreplay/charts/sink/templates/deployment.yaml +++ b/scripts/helmcharts/openreplay/charts/sink/templates/deployment.yaml @@ -70,6 +70,10 @@ spec: value: '{{ .Values.global.s3.endpoint }}/{{.Values.global.s3.assetsBucket}}' {{- end }} {{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }} + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} {{- range $key, $val := .Values.env }} - name: {{ $key }} value: '{{ $val }}' diff --git a/scripts/helmcharts/openreplay/charts/sourcemapreader/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/sourcemapreader/templates/deployment.yaml index 7abca821c..1d8041c5b 100644 --- a/scripts/helmcharts/openreplay/charts/sourcemapreader/templates/deployment.yaml +++ b/scripts/helmcharts/openreplay/charts/sourcemapreader/templates/deployment.yaml @@ -79,6 +79,10 @@ spec: # S3 compatible storage value: '{{ .Values.global.s3.endpoint }}/{{.Values.global.s3.assetsBucket}}' {{- end }} + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} {{- range $key, $val := .Values.env }} - name: {{ $key }} value: '{{ $val }}' diff --git a/scripts/helmcharts/openreplay/charts/storage/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/storage/templates/deployment.yaml index 9cb2cca22..aff40a227 100644 --- a/scripts/helmcharts/openreplay/charts/storage/templates/deployment.yaml +++ b/scripts/helmcharts/openreplay/charts/storage/templates/deployment.yaml @@ -78,6 +78,10 @@ spec: - name: KAFKA_USE_SSL value: '{{ .Values.global.kafka.kafkaUseSsl }}' {{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }} + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} {{- range $key, $val := .Values.env }} - name: {{ $key }} value: '{{ $val }}' diff --git a/scripts/helmcharts/openreplay/templates/job.yaml b/scripts/helmcharts/openreplay/templates/job.yaml index 095232a7d..3e0494d7f 100644 --- a/scripts/helmcharts/openreplay/templates/job.yaml +++ b/scripts/helmcharts/openreplay/templates/job.yaml @@ -35,6 +35,10 @@ spec: - name: git image: alpine/git env: + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} - name: ENTERPRISE_EDITION_LICENSE value: "{{ .Values.global.enterpriseEditionLicense }}" command: @@ -107,6 +111,10 @@ spec: {{- else }} value: '{{ .Values.global.postgresql.postgresqlPassword }}' {{- end}} + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} image: bitnami/postgresql:13.3.0-debian-10-r53 command: - /bin/bash @@ -122,6 +130,10 @@ spec: - name: minio image: bitnami/minio:2020.10.9-debian-10-r6 env: + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} - name: FORCE_MIGRATION value: "{{ .Values.forceMigration }}" - name: UPGRADE_FRONTENT @@ -152,6 +164,10 @@ spec: {{- if .Values.vault.enabled }} - name: vault env: + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} - name: FORCE_MIGRATION value: "{{ .Values.forceMigration }}" - name: PGHOST @@ -177,6 +193,10 @@ spec: mountPath: /opt/migrations/ - name: vault-s3-upload env: + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} - name: AWS_ACCESS_KEY_ID value: "{{ .Values.global.s3.accessKey }}" - name: AWS_SECRET_ACCESS_KEY @@ -221,6 +241,10 @@ spec: - name: clickhouse image: clickhouse/clickhouse-server:22.12-alpine env: + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} - name: FORCE_MIGRATION value: "{{ .Values.forceMigration }}" - name: PREVIOUS_APP_VERSION @@ -248,6 +272,10 @@ spec: - name: kafka image: bitnami/kafka:2.6.0-debian-10-r30 env: + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} - name: RETENTION_TIME value: "{{ .Values.global.kafka.retentionTime }}" - name: KAFKA_HOST diff --git a/scripts/helmcharts/openreplay/values.yaml b/scripts/helmcharts/openreplay/values.yaml index f601d22da..694585180 100644 --- a/scripts/helmcharts/openreplay/values.yaml +++ b/scripts/helmcharts/openreplay/values.yaml @@ -37,3 +37,5 @@ global: vault: *vault redis: *redis clusterDomain: "svc.cluster.local" + # In case you've http proxy to access internet. + env: {} From 594385290a834885d44e8356d87133a7d68df7e0 Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Fri, 24 Feb 2023 15:12:58 +0100 Subject: [PATCH 142/151] feat(DB): added missing column --- ee/scripts/schema/db/init_dbs/postgresql/init_schema.sql | 1 + scripts/schema/db/init_dbs/postgresql/init_schema.sql | 1 + 2 files changed, 2 insertions(+) diff --git a/ee/scripts/schema/db/init_dbs/postgresql/init_schema.sql b/ee/scripts/schema/db/init_dbs/postgresql/init_schema.sql index c9cc4f87d..0b2945b39 100644 --- a/ee/scripts/schema/db/init_dbs/postgresql/init_schema.sql +++ b/ee/scripts/schema/db/init_dbs/postgresql/init_schema.sql @@ -747,6 +747,7 @@ $$ metric_value text[] NOT NULL DEFAULT '{}'::text[], metric_format text, thumbnail text, + is_pinned boolean NOT NULL DEFAULT FALSE, default_config jsonb NOT NULL DEFAULT '{ "col": 2, "row": 2, diff --git a/scripts/schema/db/init_dbs/postgresql/init_schema.sql b/scripts/schema/db/init_dbs/postgresql/init_schema.sql index 350c5cbdb..57dea2a58 100644 --- a/scripts/schema/db/init_dbs/postgresql/init_schema.sql +++ b/scripts/schema/db/init_dbs/postgresql/init_schema.sql @@ -854,6 +854,7 @@ $$ metric_value text[] NOT NULL DEFAULT '{}'::text[], metric_format text, thumbnail text, + is_pinned boolean NOT NULL DEFAULT FALSE, default_config jsonb NOT NULL DEFAULT '{ "col": 2, "row": 2, From 15ad4313fce0764705c0dd50717c3e4d8d2dc99a Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Fri, 24 Feb 2023 15:22:18 +0100 Subject: [PATCH 143/151] feat(chalice): changed update dashboard response --- api/chalicelib/core/dashboards.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/api/chalicelib/core/dashboards.py b/api/chalicelib/core/dashboards.py index 93c2b8675..10143a5a4 100644 --- a/api/chalicelib/core/dashboards.py +++ b/api/chalicelib/core/dashboards.py @@ -114,17 +114,19 @@ def update_dashboard(project_id, user_id, dashboard_id, data: schemas.EditDashbo row = cur.fetchone() offset = row["count"] pg_query = f"""UPDATE dashboards - SET name = %(name)s, + SET name = %(name)s, description= %(description)s {", is_public = %(is_public)s" if data.is_public is not None else ""} {", is_pinned = %(is_pinned)s" if data.is_pinned is not None else ""} - WHERE dashboards.project_id = %(projectId)s + WHERE dashboards.project_id = %(projectId)s AND dashboard_id = %(dashboard_id)s - AND (dashboards.user_id = %(userId)s OR is_public)""" + AND (dashboards.user_id = %(userId)s OR is_public) + RETURNING dashboard_id,name,description,is_public,createdAt;""" if data.metrics is not None and len(data.metrics) > 0: pg_query = f"""WITH dash AS ({pg_query}) - INSERT INTO dashboard_widgets(dashboard_id, metric_id, user_id, config) - VALUES {",".join([f"(%(dashboard_id)s, %(metric_id_{i})s, %(userId)s, (SELECT default_config FROM metrics WHERE metric_id=%(metric_id_{i})s)||%(config_{i})s)" for i in range(len(data.metrics))])};""" + INSERT INTO dashboard_widgets(dashboard_id, metric_id, user_id, config) + VALUES {",".join([f"(%(dashboard_id)s, %(metric_id_{i})s, %(userId)s, (SELECT default_config FROM metrics WHERE metric_id=%(metric_id_{i})s)||%(config_{i})s)" for i in range(len(data.metrics))])} + RETURNING dash.*;""" for i, m in enumerate(data.metrics): params[f"metric_id_{i}"] = m # params[f"config_{i}"] = schemas.AddWidgetToDashboardPayloadSchema.schema() \ @@ -134,8 +136,9 @@ def update_dashboard(project_id, user_id, dashboard_id, data: schemas.EditDashbo params[f"config_{i}"] = json.dumps({"position": i + offset}) cur.execute(cur.mogrify(pg_query, params)) + row = cur.fetchone() - return {"success": True} + return helper.dict_to_camel_case(row) def get_widget(project_id, user_id, dashboard_id, widget_id): From 54fa17dc3f2eccba1270b97e4a95a30f8bd0d3e5 Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Fri, 24 Feb 2023 15:31:57 +0100 Subject: [PATCH 144/151] feat(chalice): changed update dashboard response --- api/chalicelib/core/dashboards.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/api/chalicelib/core/dashboards.py b/api/chalicelib/core/dashboards.py index 10143a5a4..89f56176b 100644 --- a/api/chalicelib/core/dashboards.py +++ b/api/chalicelib/core/dashboards.py @@ -121,7 +121,7 @@ def update_dashboard(project_id, user_id, dashboard_id, data: schemas.EditDashbo WHERE dashboards.project_id = %(projectId)s AND dashboard_id = %(dashboard_id)s AND (dashboards.user_id = %(userId)s OR is_public) - RETURNING dashboard_id,name,description,is_public,createdAt;""" + RETURNING dashboard_id,name,description,is_public,created_at;""" if data.metrics is not None and len(data.metrics) > 0: pg_query = f"""WITH dash AS ({pg_query}) INSERT INTO dashboard_widgets(dashboard_id, metric_id, user_id, config) @@ -137,7 +137,8 @@ def update_dashboard(project_id, user_id, dashboard_id, data: schemas.EditDashbo cur.execute(cur.mogrify(pg_query, params)) row = cur.fetchone() - + if row: + row["created_at"] = TimeUTC.datetime_to_timestamp(row["created_at"]) return helper.dict_to_camel_case(row) From 0aa5dbb4ac84e5e7a7e65988b61e8d99620068cc Mon Sep 17 00:00:00 2001 From: nick-delirium Date: Fri, 24 Feb 2023 15:59:16 +0100 Subject: [PATCH 145/151] change(ui): remove additional calls to api after dashb update --- frontend/app/mstore/dashboardStore.ts | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/frontend/app/mstore/dashboardStore.ts b/frontend/app/mstore/dashboardStore.ts index bd8681c2f..5d96173ff 100644 --- a/frontend/app/mstore/dashboardStore.ts +++ b/frontend/app/mstore/dashboardStore.ts @@ -189,19 +189,19 @@ export default class DashboardStore { return new Promise((resolve, reject) => { dashboardService .saveDashboard(dashboard) - .then((_dashboard) => { + .then((_dashboard: any) => { runInAction(() => { if (isCreating) { toast.success('Dashboard created successfully'); this.addDashboard(new Dashboard().fromJson(_dashboard)); } else { toast.success('Dashboard successfully updated '); - this.updateDashboard(new Dashboard().fromJson(_dashboard)); + this.syncDashboardInfo(_dashboard.dashboardId!, _dashboard); } resolve(_dashboard); }); }) - .catch((error) => { + .catch(() => { toast.error('Error saving dashboard'); reject(); }) @@ -213,6 +213,14 @@ export default class DashboardStore { }); } + syncDashboardInfo(id: string, info: { name: string, description: string, isPublic: boolean, createdAt: number }) { + if (this.selectedDashboard !== null) { + this.selectedDashboard.update(info) + const index = this.dashboards.findIndex((d) => d.dashboardId === id); + Object.assign(this.dashboards[index], info) + } + } + saveMetric(metric: Widget, dashboardId: string): Promise { const isCreating = !metric.widgetId; return dashboardService.saveMetric(metric, dashboardId).then((metric) => { From fb5ba3e932750e196152fd9cc89a99b6f425ed1c Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Fri, 24 Feb 2023 17:42:57 +0100 Subject: [PATCH 146/151] feat(assist): changed image --- utilities/Dockerfile | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/utilities/Dockerfile b/utilities/Dockerfile index 8f4d98549..84b54c906 100644 --- a/utilities/Dockerfile +++ b/utilities/Dockerfile @@ -1,14 +1,11 @@ FROM node:18-alpine LABEL Maintainer="KRAIEM Taha Yassine" -ARG GIT_SHA -LABEL GIT_SHA=$GIT_SHA -RUN apk add --no-cache tini git libc6-compat && ln -s /lib/libc.musl-x86_64.so.1 /lib/ld-linux-x86-64.so.2 +RUN apk add --no-cache tini ARG envarg ENV ENTERPRISE_BUILD=${envarg} \ MAXMINDDB_FILE=/home/openreplay/geoip.mmdb \ PRIVATE_ENDPOINTS=false \ - GIT_SHA=$GIT_SHA \ LISTEN_PORT=9001 WORKDIR /work COPY package.json . From 91286ad76c07eb5b902aec3cf623e6783f2ce885 Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Fri, 24 Feb 2023 17:58:35 +0100 Subject: [PATCH 147/151] chore(actions): changes feat(assist): changed dependencies --- .github/workflows/assist-ee.yaml | 2 +- .github/workflows/assist.yaml | 2 +- ee/utilities/package-lock.json | 24 ++++++++++++------------ utilities/package-lock.json | 24 ++++++++++++------------ 4 files changed, 26 insertions(+), 26 deletions(-) diff --git a/.github/workflows/assist-ee.yaml b/.github/workflows/assist-ee.yaml index 78a783dd1..fc237d371 100644 --- a/.github/workflows/assist-ee.yaml +++ b/.github/workflows/assist-ee.yaml @@ -6,7 +6,7 @@ on: - dev paths: - "ee/utilities/**" - - "utilities/*/**" + - "utilities/**" - "!utilities/.gitignore" - "!utilities/*-dev.sh" diff --git a/.github/workflows/assist.yaml b/.github/workflows/assist.yaml index c599d5cbd..65ca0348c 100644 --- a/.github/workflows/assist.yaml +++ b/.github/workflows/assist.yaml @@ -5,7 +5,7 @@ on: branches: - dev paths: - - "utilities/*/**" + - "utilities/**" - "!utilities/.gitignore" - "!utilities/*-dev.sh" diff --git a/ee/utilities/package-lock.json b/ee/utilities/package-lock.json index c90edb001..1d74677cf 100644 --- a/ee/utilities/package-lock.json +++ b/ee/utilities/package-lock.json @@ -1,11 +1,11 @@ { - "name": "utilities-server", + "name": "assist-server", "version": "1.0.0", "lockfileVersion": 3, "requires": true, "packages": { "": { - "name": "utilities-server", + "name": "assist-server", "version": "1.0.0", "license": "Elastic License 2.0 (ELv2)", "dependencies": { @@ -117,9 +117,9 @@ } }, "node_modules/@types/node": { - "version": "18.13.0", - "resolved": "https://registry.npmjs.org/@types/node/-/node-18.13.0.tgz", - "integrity": "sha512-gC3TazRzGoOnoKAhUx+Q0t8S9Tzs74z7m0ipwGpSqQrleP14hKxP4/JUeEQcD3W1/aIpnWl8pHowI7WokuZpXg==" + "version": "18.14.1", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.14.1.tgz", + "integrity": "sha512-QH+37Qds3E0eDlReeboBxfHbX9omAcBCXEzswCu6jySP642jiM3cYSIkU/REqwhCUqXdonHFuBfJDiAJxMNhaQ==" }, "node_modules/accepts": { "version": "1.3.8", @@ -355,9 +355,9 @@ } }, "node_modules/engine.io": { - "version": "6.4.0", - "resolved": "https://registry.npmjs.org/engine.io/-/engine.io-6.4.0.tgz", - "integrity": "sha512-OgxY1c/RuCSeO/rTr8DIFXx76IzUUft86R7/P7MMbbkuzeqJoTNw2lmeD91IyGz41QYleIIjWeMJGgug043sfQ==", + "version": "6.4.1", + "resolved": "https://registry.npmjs.org/engine.io/-/engine.io-6.4.1.tgz", + "integrity": "sha512-JFYQurD/nbsA5BSPmbaOSLa3tSVj8L6o4srSwXXY3NqE+gGUNmmPTbhn8tjzcCtSqhFgIeqef81ngny8JM25hw==", "dependencies": { "@types/cookie": "^0.4.1", "@types/cors": "^2.8.12", @@ -1002,14 +1002,14 @@ } }, "node_modules/socket.io": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/socket.io/-/socket.io-4.6.0.tgz", - "integrity": "sha512-b65bp6INPk/BMMrIgVvX12x3Q+NqlGqSlTuvKQWt0BUJ3Hyy3JangBl7fEoWZTXbOKlCqNPbQ6MbWgok/km28w==", + "version": "4.6.1", + "resolved": "https://registry.npmjs.org/socket.io/-/socket.io-4.6.1.tgz", + "integrity": "sha512-KMcaAi4l/8+xEjkRICl6ak8ySoxsYG+gG6/XfRCPJPQ/haCRIJBTL4wIl8YCsmtaBovcAXGLOShyVWQ/FG8GZA==", "dependencies": { "accepts": "~1.3.4", "base64id": "~2.0.0", "debug": "~4.3.2", - "engine.io": "~6.4.0", + "engine.io": "~6.4.1", "socket.io-adapter": "~2.5.2", "socket.io-parser": "~4.2.1" }, diff --git a/utilities/package-lock.json b/utilities/package-lock.json index 91cb862d2..aba9e43fe 100644 --- a/utilities/package-lock.json +++ b/utilities/package-lock.json @@ -1,11 +1,11 @@ { - "name": "utilities-server", + "name": "assist-server", "version": "1.0.0", "lockfileVersion": 3, "requires": true, "packages": { "": { - "name": "utilities-server", + "name": "assist-server", "version": "1.0.0", "license": "Elastic License 2.0 (ELv2)", "dependencies": { @@ -45,9 +45,9 @@ } }, "node_modules/@types/node": { - "version": "18.13.0", - "resolved": "https://registry.npmjs.org/@types/node/-/node-18.13.0.tgz", - "integrity": "sha512-gC3TazRzGoOnoKAhUx+Q0t8S9Tzs74z7m0ipwGpSqQrleP14hKxP4/JUeEQcD3W1/aIpnWl8pHowI7WokuZpXg==" + "version": "18.14.1", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.14.1.tgz", + "integrity": "sha512-QH+37Qds3E0eDlReeboBxfHbX9omAcBCXEzswCu6jySP642jiM3cYSIkU/REqwhCUqXdonHFuBfJDiAJxMNhaQ==" }, "node_modules/accepts": { "version": "1.3.8", @@ -254,9 +254,9 @@ } }, "node_modules/engine.io": { - "version": "6.4.0", - "resolved": "https://registry.npmjs.org/engine.io/-/engine.io-6.4.0.tgz", - "integrity": "sha512-OgxY1c/RuCSeO/rTr8DIFXx76IzUUft86R7/P7MMbbkuzeqJoTNw2lmeD91IyGz41QYleIIjWeMJGgug043sfQ==", + "version": "6.4.1", + "resolved": "https://registry.npmjs.org/engine.io/-/engine.io-6.4.1.tgz", + "integrity": "sha512-JFYQurD/nbsA5BSPmbaOSLa3tSVj8L6o4srSwXXY3NqE+gGUNmmPTbhn8tjzcCtSqhFgIeqef81ngny8JM25hw==", "dependencies": { "@types/cookie": "^0.4.1", "@types/cors": "^2.8.12", @@ -862,14 +862,14 @@ } }, "node_modules/socket.io": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/socket.io/-/socket.io-4.6.0.tgz", - "integrity": "sha512-b65bp6INPk/BMMrIgVvX12x3Q+NqlGqSlTuvKQWt0BUJ3Hyy3JangBl7fEoWZTXbOKlCqNPbQ6MbWgok/km28w==", + "version": "4.6.1", + "resolved": "https://registry.npmjs.org/socket.io/-/socket.io-4.6.1.tgz", + "integrity": "sha512-KMcaAi4l/8+xEjkRICl6ak8ySoxsYG+gG6/XfRCPJPQ/haCRIJBTL4wIl8YCsmtaBovcAXGLOShyVWQ/FG8GZA==", "dependencies": { "accepts": "~1.3.4", "base64id": "~2.0.0", "debug": "~4.3.2", - "engine.io": "~6.4.0", + "engine.io": "~6.4.1", "socket.io-adapter": "~2.5.2", "socket.io-parser": "~4.2.1" }, From ea8cf98beb3016a0f30b61322c96e9206ee35a4d Mon Sep 17 00:00:00 2001 From: Alex Kaminskii Date: Fri, 24 Feb 2023 17:59:45 +0100 Subject: [PATCH 148/151] feat(frontend): use ResourceTiming from file instead of database --- .../Session_/Network/Network.DEPRECATED.js | 148 ------------------ .../Session_/Network/NetworkContent.js | 14 +- .../app/components/Session_/Network/index.js | 2 - .../DevTools/NetworkPanel/NetworkPanel.tsx | 18 +-- .../FetchDetailsModal/FetchDetailsModal.tsx | 4 +- frontend/app/player/web/MessageManager.ts | 20 +-- frontend/app/player/web/WebPlayer.ts | 3 +- frontend/app/player/web/types.ts | 47 ------ frontend/app/player/web/types/index.ts | 2 + frontend/app/player/web/types/log.ts | 23 +++ frontend/app/player/web/types/resource.ts | 114 ++++++++++++++ frontend/app/types/session/resource.ts | 103 ------------ frontend/app/types/session/session.ts | 14 -- .../app/types/synthetics/domBuildingTime.js | 14 -- frontend/app/types/synthetics/index.js | 27 ---- frontend/app/utils/index.ts | 7 - 16 files changed, 165 insertions(+), 395 deletions(-) delete mode 100644 frontend/app/components/Session_/Network/Network.DEPRECATED.js delete mode 100644 frontend/app/components/Session_/Network/index.js delete mode 100644 frontend/app/player/web/types.ts create mode 100644 frontend/app/player/web/types/index.ts create mode 100644 frontend/app/player/web/types/log.ts create mode 100644 frontend/app/player/web/types/resource.ts delete mode 100644 frontend/app/types/session/resource.ts delete mode 100644 frontend/app/types/synthetics/domBuildingTime.js delete mode 100644 frontend/app/types/synthetics/index.js diff --git a/frontend/app/components/Session_/Network/Network.DEPRECATED.js b/frontend/app/components/Session_/Network/Network.DEPRECATED.js deleted file mode 100644 index b79307431..000000000 --- a/frontend/app/components/Session_/Network/Network.DEPRECATED.js +++ /dev/null @@ -1,148 +0,0 @@ -import React from 'react'; -import cn from 'classnames'; -import { connectPlayer, } from 'Player'; -import { Tooltip, TextEllipsis } from 'UI'; -import { getRE } from 'App/utils'; -import { TYPES } from 'Types/session/resource'; -import stl from './network.module.css'; -import NetworkContent from './NetworkContent'; -import { connect } from 'react-redux'; -import { setTimelinePointer } from 'Duck/sessions'; - -const ALL = 'ALL'; -const XHR = 'xhr'; -const JS = 'js'; -const CSS = 'css'; -const IMG = 'img'; -const MEDIA = 'media'; -const OTHER = 'other'; - -const TAB_TO_TYPE_MAP = { - [XHR]: TYPES.XHR, - [JS]: TYPES.JS, - [CSS]: TYPES.CSS, - [IMG]: TYPES.IMG, - [MEDIA]: TYPES.MEDIA, - [OTHER]: TYPES.OTHER, -}; - -export function renderName(r) { - return ( -
- {r.url}
} - > - {r.name} - -
- ); -} - -export function renderDuration(r) { - if (!r.success) return 'x'; - - const text = `${Math.round(r.duration)}ms`; - if (!r.isRed && !r.isYellow) return text; - - let tooltipText; - let className = 'w-full h-full flex items-center '; - if (r.isYellow) { - tooltipText = 'Slower than average'; - className += 'warn color-orange'; - } else { - tooltipText = 'Much slower than average'; - className += 'error color-red'; - } - - return ( - -
{text}
-
- ); -} - -@connectPlayer((state) => ({ - location: state.location, - resources: state.resourceList, - domContentLoadedTime: state.domContentLoadedTime, - loadTime: state.loadTime, - // time: state.time, - playing: state.playing, - domBuildingTime: state.domBuildingTime, - fetchPresented: state.fetchList.length > 0, - listNow: state.resourceListNow, -})) -@connect( - (state) => ({ - timelinePointer: state.getIn(['sessions', 'timelinePointer']), - }), - { setTimelinePointer } -) -export default class Network extends React.PureComponent { - state = { - filter: '', - filteredList: this.props.resources, - activeTab: ALL, - currentIndex: 0, - }; - - onRowClick = (e, index) => { - // no action for direct click on network requests (so far), there is a jump button, and we don't have more information for than is already displayed in the table - }; - - onTabClick = (activeTab) => this.setState({ activeTab }); - - onFilterChange = (e, { value }) => { - const { resources } = this.props; - const filterRE = getRE(value, 'i'); - const filtered = resources.filter( - ({ type, name }) => - filterRE.test(name) && (activeTab === ALL || type === TAB_TO_TYPE_MAP[activeTab]) - ); - - this.setState({ filter: value, filteredList: value ? filtered : resources, currentIndex: 0 }); - }; - - static getDerivedStateFromProps(nextProps, prevState) { - const { filteredList } = prevState; - if (nextProps.timelinePointer) { - const activeItem = filteredList.find((r) => r.time >= nextProps.timelinePointer.time); - return { - currentIndex: activeItem ? filteredList.indexOf(activeItem) : filteredList.length - 1, - }; - } - } - - render() { - const { location, domContentLoadedTime, loadTime, domBuildingTime, fetchPresented, listNow } = - this.props; - const { filteredList } = this.state; - const resourcesSize = filteredList.reduce( - (sum, { decodedBodySize }) => sum + (decodedBodySize || 0), - 0 - ); - const transferredSize = filteredList.reduce( - (sum, { headerSize, encodedBodySize }) => sum + (headerSize || 0) + (encodedBodySize || 0), - 0 - ); - - return ( - - - - ); - } -} diff --git a/frontend/app/components/Session_/Network/NetworkContent.js b/frontend/app/components/Session_/Network/NetworkContent.js index 5718fb55e..f55f5e407 100644 --- a/frontend/app/components/Session_/Network/NetworkContent.js +++ b/frontend/app/components/Session_/Network/NetworkContent.js @@ -2,7 +2,7 @@ import React from 'react'; import cn from 'classnames'; import { QuestionMarkHint, Tooltip, Tabs, Input, NoContent, Icon, Toggler } from 'UI'; import { getRE } from 'App/utils'; -import { TYPES } from 'Types/session/resource'; +import { ResourceType } from 'Player'; import { formatBytes } from 'App/utils'; import { formatMs } from 'App/date'; @@ -21,12 +21,12 @@ const MEDIA = 'media'; const OTHER = 'other'; const TAB_TO_TYPE_MAP = { - [XHR]: TYPES.XHR, - [JS]: TYPES.JS, - [CSS]: TYPES.CSS, - [IMG]: TYPES.IMG, - [MEDIA]: TYPES.MEDIA, - [OTHER]: TYPES.OTHER, + [XHR]: ResourceType.XHR, + [JS]: ResourceType.SCRIPT, + [CSS]: ResourceType.CSS, + [IMG]: ResourceType.IMG, + [MEDIA]: ResourceType.MEDIA, + [OTHER]: ResourceType.OTHER, }; const TABS = [ALL, XHR, JS, CSS, IMG, MEDIA, OTHER].map((tab) => ({ text: tab, diff --git a/frontend/app/components/Session_/Network/index.js b/frontend/app/components/Session_/Network/index.js deleted file mode 100644 index 446e76ea6..000000000 --- a/frontend/app/components/Session_/Network/index.js +++ /dev/null @@ -1,2 +0,0 @@ -export { default } from './Network'; -export * from './Network'; diff --git a/frontend/app/components/shared/DevTools/NetworkPanel/NetworkPanel.tsx b/frontend/app/components/shared/DevTools/NetworkPanel/NetworkPanel.tsx index 9339b1e8b..b7207ddef 100644 --- a/frontend/app/components/shared/DevTools/NetworkPanel/NetworkPanel.tsx +++ b/frontend/app/components/shared/DevTools/NetworkPanel/NetworkPanel.tsx @@ -3,7 +3,7 @@ import { observer } from 'mobx-react-lite'; import { Duration } from 'luxon'; import { Tooltip, Tabs, Input, NoContent, Icon, Toggler } from 'UI'; -import { TYPES } from 'Types/session/resource'; +import { ResourceType } from 'Player'; import { formatBytes } from 'App/utils'; import { formatMs } from 'App/date'; import { useModal } from 'App/components/Modal'; @@ -28,13 +28,13 @@ const MEDIA = 'media'; const OTHER = 'other'; const TYPE_TO_TAB = { - [TYPES.XHR]: XHR, - [TYPES.FETCH]: XHR, - [TYPES.JS]: JS, - [TYPES.CSS]: CSS, - [TYPES.IMG]: IMG, - [TYPES.MEDIA]: MEDIA, - [TYPES.OTHER]: OTHER, + [ResourceType.XHR]: XHR, + [ResourceType.FETCH]: XHR, + [ResourceType.SCRIPT]: JS, + [ResourceType.CSS]: CSS, + [ResourceType.IMG]: IMG, + [ResourceType.MEDIA]: MEDIA, + [ResourceType.OTHER]: OTHER, } const TAP_KEYS = [ALL, XHR, JS, CSS, IMG, MEDIA, OTHER] as const; @@ -154,7 +154,7 @@ function NetworkPanel({ startedAt }: { startedAt: number }) { const activeIndex = devTools[INDEX_KEY].index; const list = useMemo(() => - // TODO: better merge (with body size info) + // TODO: better merge (with body size info) - do it in player resourceList.filter(res => !fetchList.some(ft => { // res.url !== ft.url doesn't work on relative URLs appearing within fetchList (to-fix in player) if (res.name !== ft.name) { return false } diff --git a/frontend/app/components/shared/FetchDetailsModal/FetchDetailsModal.tsx b/frontend/app/components/shared/FetchDetailsModal/FetchDetailsModal.tsx index 49d6536bc..09a08ae16 100644 --- a/frontend/app/components/shared/FetchDetailsModal/FetchDetailsModal.tsx +++ b/frontend/app/components/shared/FetchDetailsModal/FetchDetailsModal.tsx @@ -1,7 +1,7 @@ import React, { useEffect, useState } from 'react'; import FetchBasicDetails from './components/FetchBasicDetails'; import { Button } from 'UI'; -import { TYPES } from 'Types/session/resource'; +import { ResourceType } from 'Player'; import FetchTabs from './components/FetchTabs/FetchTabs'; import { useStore } from 'App/mstore'; import { DateTime } from 'luxon'; @@ -17,7 +17,7 @@ function FetchDetailsModal(props: Props) { const [resource, setResource] = useState(props.resource); const [first, setFirst] = useState(false); const [last, setLast] = useState(false); - const isXHR = resource.type === TYPES.XHR || resource.type === TYPES.FETCH + const isXHR = resource.type === ResourceType.XHR || resource.type === ResourceType.FETCH const { sessionStore: { devTools }, settingsStore: { sessionSettings: { timezone }}, diff --git a/frontend/app/player/web/MessageManager.ts b/frontend/app/player/web/MessageManager.ts index 68ef0cbf8..72c07c445 100644 --- a/frontend/app/player/web/MessageManager.ts +++ b/frontend/app/player/web/MessageManager.ts @@ -2,9 +2,9 @@ import { Decoder } from "syncod"; import logger from 'App/logger'; -import Resource, { TYPES as RES_TYPES } from 'Types/session/resource'; import { TYPES as EVENT_TYPES } from 'Types/session/event'; -import { Log } from './types'; +import { Log } from './types/log'; +import { Resource, ResourceType, getResourceFromResourceTiming, getResourceFromNetworkRequest } from './types/resource' import { toast } from 'react-toastify'; @@ -395,19 +395,13 @@ export default class MessageManager { Log(msg) ) break; + case MType.ResourceTiming: + // TODO: merge `resource` and `fetch` lists into one here instead of UI + this.lists.lists.resource.insert(getResourceFromResourceTiming(msg, this.sessionStart)) + break; case MType.Fetch: case MType.NetworkRequest: - this.lists.lists.fetch.insert(new Resource({ - method: msg.method, - url: msg.url, - request: msg.request, - response: msg.response, - status: msg.status, - duration: msg.duration, - type: msg.type === "xhr" ? RES_TYPES.XHR : RES_TYPES.FETCH, - time: Math.max(msg.timestamp - this.sessionStart, 0), // !!! doesn't look good. TODO: find solution to show negative timings - index, - }) as Timed) + this.lists.lists.fetch.insert(getResourceFromNetworkRequest(msg, this.sessionStart)) break; case MType.Redux: decoded = this.decodeStateMessage(msg, ["state", "action"]); diff --git a/frontend/app/player/web/WebPlayer.ts b/frontend/app/player/web/WebPlayer.ts index d94d10beb..d1a56f9fd 100644 --- a/frontend/app/player/web/WebPlayer.ts +++ b/frontend/app/player/web/WebPlayer.ts @@ -1,4 +1,4 @@ -import { Log, LogLevel } from './types' +import { Log, LogLevel } from './types/log' import type { Store } from 'App/player' import Player from '../player/Player' @@ -30,7 +30,6 @@ export default class WebPlayer extends Player { let initialLists = live ? {} : { event: session.events || [], stack: session.stackEvents || [], - resource: session.resources || [], // MBTODO: put ResourceTiming in file exceptions: session.errors?.map(({ name, ...rest }: any) => Log({ level: LogLevel.ERROR, diff --git a/frontend/app/player/web/types.ts b/frontend/app/player/web/types.ts deleted file mode 100644 index 485921ed0..000000000 --- a/frontend/app/player/web/types.ts +++ /dev/null @@ -1,47 +0,0 @@ -export enum LogLevel { - INFO = 'info', - LOG = 'log', - //ASSERT = 'assert', //? - WARN = 'warn', - ERROR = 'error', - EXCEPTION = 'exception', -} - -export interface ILog { - level: LogLevel - value: string - time: number - index?: number - errorId?: string -} - -export const Log = (log: ILog) => ({ - isRed: log.level === LogLevel.EXCEPTION || log.level === LogLevel.ERROR, - isYellow: log.level === LogLevel.WARN, - ...log -}) - - - - -// func getResourceType(initiator string, URL string) string { -// switch initiator { -// case "xmlhttprequest", "fetch": -// return "fetch" -// case "img": -// return "img" -// default: -// switch getURLExtention(URL) { -// case "css": -// return "stylesheet" -// case "js": -// return "script" -// case "png", "gif", "jpg", "jpeg", "svg": -// return "img" -// case "mp4", "mkv", "ogg", "webm", "avi", "mp3": -// return "media" -// default: -// return "other" -// } -// } -// } \ No newline at end of file diff --git a/frontend/app/player/web/types/index.ts b/frontend/app/player/web/types/index.ts new file mode 100644 index 000000000..2f5f8e464 --- /dev/null +++ b/frontend/app/player/web/types/index.ts @@ -0,0 +1,2 @@ +export * from './log' +export * from './resource' \ No newline at end of file diff --git a/frontend/app/player/web/types/log.ts b/frontend/app/player/web/types/log.ts new file mode 100644 index 000000000..22a20d33c --- /dev/null +++ b/frontend/app/player/web/types/log.ts @@ -0,0 +1,23 @@ +export const enum LogLevel { + INFO = 'info', + LOG = 'log', + //ASSERT = 'assert', //? + WARN = 'warn', + ERROR = 'error', + EXCEPTION = 'exception', +} + +export interface ILog { + level: LogLevel + value: string + time: number + index?: number + errorId?: string +} + +export const Log = (log: ILog) => ({ + isRed: log.level === LogLevel.EXCEPTION || log.level === LogLevel.ERROR, + isYellow: log.level === LogLevel.WARN, + ...log +}) + diff --git a/frontend/app/player/web/types/resource.ts b/frontend/app/player/web/types/resource.ts new file mode 100644 index 000000000..032790e48 --- /dev/null +++ b/frontend/app/player/web/types/resource.ts @@ -0,0 +1,114 @@ +import type { ResourceTiming, NetworkRequest, Fetch } from '../messages' + +export const enum ResourceType { + XHR = 'xhr', + FETCH = 'fetch', + SCRIPT = 'script', + CSS = 'css', + IMG = 'img', + MEDIA = 'media', + OTHER = 'other', +} + +function getURLExtention(url: string): string { + const pts = url.split(".") + return pts[pts.length-1] || "" +} + +// maybe move this thing to the tracker +function getResourceType(initiator: string, url: string): ResourceType { + switch (initiator) { + case "xmlhttprequest": + case "fetch": + return ResourceType.FETCH + case "img": + return ResourceType.IMG + default: + switch (getURLExtention(url)) { + case "css": + return ResourceType.CSS + case "js": + return ResourceType.SCRIPT + case "png": + case "gif": + case "jpg": + case "jpeg": + case "svg": + return ResourceType.IMG + case "mp4": + case "mkv": + case "ogg": + case "webm": + case "avi": + case "mp3": + return ResourceType.MEDIA + default: + return ResourceType.OTHER + } + } +} + +function getResourceName(url: string) { + return url + .split('/') + .filter((s) => s !== '') + .pop(); +} + + +const YELLOW_BOUND = 10; +const RED_BOUND = 80; + + +interface IResource { + //index: number, + time: number, + type: ResourceType, + url: string, + status: string, + method: string, + duration: number, + success: boolean, + ttfb?: number, + request?: string, + response?: string, + headerSize?: number, + encodedBodySize?: number, + decodedBodySize?: number, + responseBodySize?: number, +} + + +export const Resource = (resource: IResource) => ({ + ...resource, + name: getResourceName(resource.url), + isRed: !resource.success, //|| resource.score >= RED_BOUND, + isYellow: false, // resource.score < RED_BOUND && resource.score >= YELLOW_BOUND, +}) + + +export function getResourceFromResourceTiming(msg: ResourceTiming, sessStart: number) { + const success = msg.duration > 0 // might be duration=0 when cached + const type = getResourceType(msg.initiator, msg.url) + return Resource({ + ...msg, + type, + method: type === ResourceType.FETCH ? ".." : "GET", // should be GET for all non-XHR/Fetch resources, right? + success, + status: success ? '2xx-3xx' : '4xx-5xx', + time: Math.max(0, msg.timestamp - sessStart) + }) +} + +export function getResourceFromNetworkRequest(msg: NetworkRequest | Fetch, sessStart: number) { + return Resource({ + ...msg, + // @ts-ignore + type: msg?.type === "xhr" ? ResourceType.XHR : ResourceType.FETCH, + success: msg.status < 400, + status: String(msg.status), + time: Math.max(0, msg.timestamp - sessStart), + }) +} + + diff --git a/frontend/app/types/session/resource.ts b/frontend/app/types/session/resource.ts deleted file mode 100644 index 7e09e8f02..000000000 --- a/frontend/app/types/session/resource.ts +++ /dev/null @@ -1,103 +0,0 @@ -import Record from 'Types/Record'; -import { getResourceName } from 'App/utils'; - -const XHR = 'xhr' as const; -const FETCH = 'fetch' as const; -const JS = 'script' as const; -const CSS = 'css' as const; -const IMG = 'img' as const; -const MEDIA = 'media' as const; -const OTHER = 'other' as const; - -function getResourceStatus(status: number, success: boolean) { - if (status !== undefined) return String(status); - if (typeof success === 'boolean' || typeof success === 'number') { - return !!success - ? '2xx-3xx' - : '4xx-5xx'; - } - return '2xx-3xx'; -} - -export const TYPES = { - XHR, - FETCH, - JS, - CSS, - IMG, - MEDIA, - OTHER, - "stylesheet": CSS, -} - -const YELLOW_BOUND = 10; -const RED_BOUND = 80; - -export function isRed(r: IResource) { - return !r.success || r.score >= RED_BOUND; -} - -interface IResource { - type: keyof typeof TYPES, - url: string, - name: string, - status: number, - duration: number, - index: number, - time: number, - ttfb: number, - timewidth: number, - success: boolean, - score: number, - method: string, - request: string, - response: string, - headerSize: number, - encodedBodySize: number, - decodedBodySize: number, - responseBodySize: number, - timings: Record - datetime: number - timestamp: number -} - -export default class Resource { - name = 'Resource' - type: IResource["type"] - status: string - success: IResource["success"] - time: IResource["time"] - ttfb: IResource["ttfb"] - url: IResource["url"] - duration: IResource["duration"] - index: IResource["index"] - timewidth: IResource["timewidth"] - score: IResource["score"] - method: IResource["method"] - request: IResource["request"] - response: IResource["response"] - headerSize: IResource["headerSize"] - encodedBodySize: IResource["encodedBodySize"] - decodedBodySize: IResource["decodedBodySize"] - responseBodySize: IResource["responseBodySize"] - timings: IResource["timings"] - - constructor({ status, success, time, datetime, timestamp, timings, ...resource }: IResource) { - - // adjusting for 201, 202 etc - const reqSuccess = 300 > status || success - Object.assign(this, { - ...resource, - name: getResourceName(resource.url), - status: getResourceStatus(status, success), - success: reqSuccess, - time: typeof time === 'number' ? time : datetime || timestamp, - ttfb: timings && timings.ttfb, - timewidth: timings && timings.timewidth, - timings, - isRed: !reqSuccess || resource.score >= RED_BOUND, - isYellow: resource.score < RED_BOUND && resource.score >= YELLOW_BOUND, - }) - } -} - diff --git a/frontend/app/types/session/session.ts b/frontend/app/types/session/session.ts index 4dcef8d49..3b254ae4b 100644 --- a/frontend/app/types/session/session.ts +++ b/frontend/app/types/session/session.ts @@ -1,7 +1,6 @@ import { Duration } from 'luxon'; import SessionEvent, { TYPES, EventData, InjectedEvent } from './event'; import StackEvent from './stackEvent'; -import Resource from './resource'; import SessionError, { IError } from './error'; import Issue, { IIssue } from './issue'; import { Note } from 'App/services/NotesService' @@ -31,8 +30,6 @@ export interface ISession { duration: number, events: InjectedEvent[], stackEvents: StackEvent[], - resources: Resource[], - missedResources: Resource[], metadata: [], favorite: boolean, filterId?: string, @@ -119,7 +116,6 @@ export default class Session { duration: ISession["duration"] events: ISession["events"] stackEvents: ISession["stackEvents"] - resources: ISession["resources"] metadata: ISession["metadata"] favorite: ISession["favorite"] filterId?: ISession["filterId"] @@ -181,7 +177,6 @@ export default class Session { devtoolsURL = [], mobsUrl = [], notes = [], - resources = [], ...session } = sessionData const duration = Duration.fromMillis(session.duration < 1000 ? 1000 : session.duration); @@ -208,13 +203,6 @@ export default class Session { }) } - let resourcesList = resources.map((r) => new Resource(r as any)); - resourcesList.forEach((r: Resource) => { - r.time = Math.max(0, r.time - startedAt) - }) - resourcesList = resourcesList.sort((r1, r2) => r1.time - r2.time); - const missedResources = resourcesList.filter(({ success }) => !success); - const stackEventsList: StackEvent[] = [] if (stackEvents?.length || session.userEvents?.length) { const mergedArrays = [...stackEvents, ...session.userEvents] @@ -245,8 +233,6 @@ export default class Session { siteId: projectId, events, stackEvents: stackEventsList, - resources: resourcesList, - missedResources, userDevice, userDeviceType, isMobile, diff --git a/frontend/app/types/synthetics/domBuildingTime.js b/frontend/app/types/synthetics/domBuildingTime.js deleted file mode 100644 index 258902f60..000000000 --- a/frontend/app/types/synthetics/domBuildingTime.js +++ /dev/null @@ -1,14 +0,0 @@ -import { Record } from 'immutable'; - -const DomBuildingTime = Record({ - avg: undefined, - chart: [], -}); - - -function fromJS(data = {}) { - if (data instanceof DomBuildingTime) return data; - return new DomBuildingTime(data); -} - -export default fromJS; \ No newline at end of file diff --git a/frontend/app/types/synthetics/index.js b/frontend/app/types/synthetics/index.js deleted file mode 100644 index 1cec8da81..000000000 --- a/frontend/app/types/synthetics/index.js +++ /dev/null @@ -1,27 +0,0 @@ -import { getChartFormatter } from './helper'; -import DomBuildingTime from './domBuildingTime'; - -export const WIDGET_LIST = [ - { - key: "resourcesLoadingTime", - name: "Resource Fetch Time", - description: 'List of resources that are slowing down your website, sorted by the number of impacted sessions.', - thumb: 'na.png', - type: 'resources', - dataWrapper: (list, period) => DomBuildingTime(list) - .update("chart", getChartFormatter(period)) - }, -]; - -export const WIDGET_KEYS = WIDGET_LIST.map(({ key }) => key); - -const WIDGET_MAP = {}; -WIDGET_LIST.forEach(w => { WIDGET_MAP[ w.key ] = w; }); - -const OVERVIEW_WIDGET_MAP = {}; -WIDGET_LIST.filter(w => w.type === 'overview').forEach(w => { OVERVIEW_WIDGET_MAP[ w.key ] = w; }); - -export { - WIDGET_MAP, - OVERVIEW_WIDGET_MAP -}; diff --git a/frontend/app/utils/index.ts b/frontend/app/utils/index.ts index b1ae63f43..47b40aa70 100644 --- a/frontend/app/utils/index.ts +++ b/frontend/app/utils/index.ts @@ -17,13 +17,6 @@ export function debounce(callback, wait, context = this) { }; } -export function getResourceName(url: string) { - return url - .split('/') - .filter((s) => s !== '') - .pop(); -} - /* eslint-disable no-mixed-operators */ export function randomInt(a, b) { const min = (b ? a : 0) - 0.5; From 6d4b797ab5a8bbd866247ffe7f609a170f468cf0 Mon Sep 17 00:00:00 2001 From: Alex Kaminskii Date: Fri, 24 Feb 2023 18:04:08 +0100 Subject: [PATCH 149/151] fix(player):ignore query in file extension parsing --- frontend/app/player/web/types/resource.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/app/player/web/types/resource.ts b/frontend/app/player/web/types/resource.ts index 032790e48..c1b48ce9d 100644 --- a/frontend/app/player/web/types/resource.ts +++ b/frontend/app/player/web/types/resource.ts @@ -11,7 +11,7 @@ export const enum ResourceType { } function getURLExtention(url: string): string { - const pts = url.split(".") + const pts = url.split("?")[0].split(".") return pts[pts.length-1] || "" } From e4392a1bf8b3f518eea4fa1bccd20b6beffa8451 Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Fri, 24 Feb 2023 18:07:17 +0100 Subject: [PATCH 150/151] feat(assist): changed image --- utilities/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utilities/Dockerfile b/utilities/Dockerfile index 84b54c906..edbaae03c 100644 --- a/utilities/Dockerfile +++ b/utilities/Dockerfile @@ -18,4 +18,4 @@ USER 1001 ADD --chown=1001 https://static.openreplay.com/geoip/GeoLite2-Country.mmdb $MAXMINDDB_FILE ENTRYPOINT ["/sbin/tini", "--"] -CMD npm start \ No newline at end of file +CMD npm start From 081cb778cf131faa665f414d1d3b2e491423c69c Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Fri, 24 Feb 2023 18:17:45 +0100 Subject: [PATCH 151/151] feat(assist): upgrade changes --- ee/utilities/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ee/utilities/Dockerfile b/ee/utilities/Dockerfile index 08ccba56f..3119b5eed 100644 --- a/ee/utilities/Dockerfile +++ b/ee/utilities/Dockerfile @@ -18,4 +18,4 @@ USER 1001 ADD --chown=1001 https://static.openreplay.com/geoip/GeoLite2-Country.mmdb $MAXMINDDB_FILE ENTRYPOINT ["/sbin/tini", "--"] -CMD npm start +CMD npm start \ No newline at end of file