Compare commits

...
Sign in to create a new pull request.

236 commits

Author SHA1 Message Date
Alexander
e0cd1994fd feat(assist): correct 'empty list error' handler 2025-05-13 17:23:53 +02:00
Alexander
33e2e8efb9 feat(assist): project's cache fix 2025-05-13 17:01:25 +02:00
Alexander
27f3ec5585 feat(assist): redis consumer fix 2025-05-13 16:48:37 +02:00
Alexander
6c44970666 feat(assist): event insertion 2025-05-13 16:40:46 +02:00
Alexander
3177ac7229 feat(backend): upgrade redis lib to v9 2025-05-13 16:36:31 +02:00
Alexander
8dbf7d8893 feat(assist): added the assist stats part to the node.js app 2025-05-13 13:53:40 +02:00
Alexander
d1ed9564c2 feat(assist-server): removed helm chart for assist-server 2025-04-18 16:35:46 +02:00
Alexander
620fc05d6c feat(assist-server): removed old assist and moved assist-server into ee/assist/ 2025-04-18 16:32:34 +02:00
Alexander
985ce2812c feat(assist-api): added correct handlers for 403 and 404 on getByID 2025-04-18 16:24:00 +02:00
rjshrjndrn
685f784691 fix(make): distro
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2025-04-18 15:34:25 +02:00
Alexander
fd39e03fd1 feat(assist-api): hack for our best client 2025-04-18 15:34:25 +02:00
rjshrjndrn
82a29f8623 chore(ci): makefile 2025-04-18 15:34:22 +02:00
Alexander
48fdf2c9c9 feat(assist-api): removed copy-pasted envs 2025-04-18 15:33:01 +02:00
Alexander
e6d91430f2 feat(assist-api): added missing config 2025-04-18 15:33:01 +02:00
Alexander
73b7cbe261 feat(assist-server): short ts in logs 2025-04-18 15:33:01 +02:00
Alexander
27ed4ee6b4 feat(assist-api): moved some vars to env 2025-04-18 15:33:01 +02:00
Alexander
59251af8c6 feat(assist-server): slightly improved cacher 2025-04-18 15:33:01 +02:00
Alexander
3ff994490e feat(assist-server): try to use ioredis instead 2025-04-18 15:33:01 +02:00
Alexander
dfe9cd985d feat(assist-server): use sendCommand for SADD 2025-04-18 15:33:01 +02:00
Alexander
edafda9f97 feat(assist-server): fixed import 2025-04-18 15:33:01 +02:00
Alexander
c4e4c3d2cd feat(assist-server): better cache management 2025-04-18 15:33:01 +02:00
Alexander
f15e84086f feat(assist-server): removed useless method 2025-04-18 15:33:01 +02:00
Alexander
a7b91ddcad feat(assist-api): fixed total and counter calculation 2025-04-18 15:33:01 +02:00
Alexander
17a10da4de feat(assist-api): removed fields we do not use 2025-04-18 15:33:01 +02:00
Alexander
f659b24838 feat(assist-api): ignore sessionInfo for agents 2025-04-18 15:33:01 +02:00
Alexander
a4a5ce6498 feat(assist-api): ignore request's body for isLive and getByID 2025-04-18 15:33:01 +02:00
Alexander
7836153fca feat(assist-api): more optimised search method 2025-04-18 15:33:01 +02:00
Alexander
2e85fc953a feat(assist-api): correct response format for getByID 2025-04-18 15:33:01 +02:00
Alexander
ab1b5c19ec feat(assist-api): correct response format for autocomplete 2025-04-18 15:33:01 +02:00
Alexander
5708aa4b39 feat(assist-api): send parsed session's data to chalice 2025-04-18 15:33:01 +02:00
Alexander
fee79521a1 feat(assist-api): added data field to response 2025-04-18 15:33:01 +02:00
Alexander
25adc0410d feat(assist-api): added counter support 2025-04-18 15:33:00 +02:00
Alexander
1f411662c0 feat(assist-api): correctly handle empty filters 2025-04-18 15:33:00 +02:00
Alexander
bcb3b407e8 feat(assist-api): adapted body request parser to current chalice worker schema 2025-04-18 15:33:00 +02:00
Alexander
f25575a0a8 feat(assist-api): body req debug logs 2025-04-18 15:33:00 +02:00
Alexander
4cc30cdcea feat(assist-api): extra debug logs 2025-04-18 15:33:00 +02:00
Alexander
99b25420da feat(assist-api): disabled ingress 2025-04-18 15:33:00 +02:00
Alexander
7dc6c18520 feat(assist-api): extra path prefix 2025-04-18 15:33:00 +02:00
Alexander
31ff31d218 feat(assist-api): added default rules section to ingress 2025-04-18 15:33:00 +02:00
Alexander
8e4292c965 feat(assist-api): trying to fix ingress declaration 2025-04-18 15:33:00 +02:00
Alexander
3ef71f5044 feat(assist-api): fixed the http method 2025-04-18 15:33:00 +02:00
Alexander
8b617fcbd7 feat(assist-api): ingress enabled in helm chart 2025-04-18 15:33:00 +02:00
Alexander
72acf77a1b feat(assist-api): removed jwt auth and added assistKey support 2025-04-18 15:33:00 +02:00
Alexander
70a10ea1d6 feat(assist-api): extra debug logs 2025-04-18 15:33:00 +02:00
Alexander
7c6a52aa73 feat(assist-api): fixed the port number 2025-04-18 15:33:00 +02:00
Alexander
b7d2d9d77a Revert "feat(assist-api): no comments"
This reverts commit bd1c4bcdea22a5099a7f5ffb246fdec0bbd6576a.
2025-04-18 15:33:00 +02:00
Alexander
28cb2ba74c feat(assist-api): no comments 2025-04-18 15:33:00 +02:00
Alexander
8280c8754c feat(assist-api): fixed an possible panic 2025-04-18 15:33:00 +02:00
Alexander
b41248571e feat(assist-server): correct redis prefix 2025-04-18 15:33:00 +02:00
Alexander
b7df5c7f87 feat(assist-api): added debug level support for logger 2025-04-18 15:33:00 +02:00
Alexander
9c0f50b2fb feat(assist-api): added the missing config 2025-04-18 15:33:00 +02:00
Alexander
96f58b94d5 feat(assist-api): added the golang part 2025-04-18 15:33:00 +02:00
Alexander
df10fa706b feat(assist-server): removed always empty projectId field 2025-04-18 15:33:00 +02:00
Alexander
471b860841 feat(assist-server): correct .expire() call 2025-04-18 15:33:00 +02:00
Alexander
1b46863089 feat(assist-server): fixed an issue with TTL 2025-04-18 15:33:00 +02:00
Alexander
c103283766 feat(assist-server): use int ttl for cache 2025-04-18 15:33:00 +02:00
Alexander
373d71e4f3 feat(ch-connector): added current url for all events 2025-04-18 15:31:55 +02:00
nick-delirium
cde427ae4c
tracker: bump proxy version to .3, prevent crash on calling obscure fn on objects 2025-04-17 17:35:27 +02:00
nick-delirium
7cfef90cc8
ui: virtualizer for filter options list 2025-04-16 15:22:37 +02:00
nick-delirium
04db655776
ui: fix auto import paths 2025-04-16 15:07:37 +02:00
nick-delirium
b91f5df89f
ui: fix imports for eventsblock 2025-04-16 12:22:16 +02:00
nick-delirium
7fd741348c
ui: fix session search on url change 2025-04-16 11:55:47 +02:00
nick-delirium
2aaafa5b22
ui: fixing security warnings 2025-04-16 11:43:45 +02:00
nick-delirium
11f9b865cf
tracker: 16.1.3 with network proxy fix 2025-04-16 11:39:17 +02:00
rjshrjndrn
60a691bbaf chore(make): Adding make file
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2025-04-16 10:07:36 +02:00
Shekar Siri
3f1f6c03f2 feat(widget-sessions): improve session filtering logic
- Refactored session filtering logic to handle nested filters properly.
- Enhanced `fetchSessions` to ensure null checks and avoid errors.
- Updated `loadData` to handle `USER_PATH` and `HEATMAP` metric types.
- Improved UI consistency by adjusting spacing and formatting.
- Replaced redundant code with cleaner, more maintainable patterns.

This change improves the reliability and readability of the session
filtering and loading logic in the WidgetSessions component.
2025-04-15 18:15:23 +02:00
nick-delirium
dcd19e3c83
player: add debug methods (get node, get node messages) 2025-04-15 15:57:01 +02:00
nick-delirium
ced855568f
tracker: drop mentions of lint-staged 2025-04-15 14:42:55 +02:00
Andrey Babushkin
c8483df795
removed sorting by id (#3304) 2025-04-15 13:31:35 +02:00
Jorgen Evens
d544da0665 fix(helm): fix broken volumeMounts indentation 2025-04-14 15:51:55 +02:00
rjshrjndrn
408c3122d3 fix(clickhouse): update user config mount paths
Properly mount clickhouse user configuration files to the users.d
directory with correct paths for each file. Also adds several
performance-related settings to the default user profile including
query cache and JSON type support.

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2025-04-14 15:37:55 +02:00
nick-delirium
c196736c3c
tracker: 16.1.2 networkProxy bump 2025-04-14 13:30:37 +02:00
Shekar Siri
d47542830f feat(SessionsBy): add specific filter for FETCH metric
Added a conditional check to handle the FETCH metric in the SessionsBy
component. When the metric is FETCH, a specific filter with key
FETCH_URL, operator is, and value derived from data.name is applied.
This ensures proper filtering behavior for FETCH-related metrics.
2025-04-14 12:00:02 +02:00
Andrey Babushkin
055ff8f64a
Assist remote canvas control (#3287)
* refactor(searchStore): reformat filterMap function parameters (#3166)

- Reformat the parameters of the filterMap function for better readability.
- Comment out the fetchSessions call in clearSearch method to avoid unnecessary session fetch.

* Increment frontend chart version (#3167)

Co-authored-by: GitHub Action <action@github.com>

* refactor(chalice): cleaned code
fix(chalice): fixed session-search-pg sortKey issue
fix(chalice): fixed CH-query-formatter to handle special chars
fix(chalice): fixed /ids response

* feat(auth): implement withCaptcha HOC for consistent reCAPTCHA (#3177)

* feat(auth): implement withCaptcha HOC for consistent reCAPTCHA

This commit refactors the reCAPTCHA implementation across the application
by introducing a Higher Order Component (withCaptcha) that encapsulates
captcha verification logic. The changes:

- Create a reusable withCaptcha HOC in withRecaptcha.tsx
- Refactor Login, ResetPasswordRequest, and CreatePassword components
- Extract SSOLogin into a separate component
- Improve error handling and user feedback
- Standardize loading and verification states across forms
- Make captcha implementation more maintainable and consistent

* feat(auth): support msaas edition for enterprise features

Add msaas to the isEnterprise check alongside ee edition to properly
display enterprise features. Use userStore.isEnterprise in SSOLogin
component instead of directly checking authDetails.edition for
consistent
enterprise status detection.

* Increment frontend chart version (#3179)

Co-authored-by: GitHub Action <action@github.com>

* feat(assist): improved caching mechanism for cluster mode (#3180)

* Increment assist chart version (#3181)

Co-authored-by: GitHub Action <action@github.com>

* ui: fix table column export

* Increment frontend chart version

* fix(auth): remove unnecessary captcha token validation (#3188)

The token validation checks were redundant as the validation is already
handled by the captcha wrapper component. This change simplifies the
password reset flow while maintaining security.

* Increment frontend chart version (#3189)

Co-authored-by: GitHub Action <action@github.com>

* ui: onboarding fixes

* ui: fixes for onboarding ui

* Increment frontend chart version

* feat(helm): add TOKEN_SECRET environment variable

Add TOKEN_SECRET environment variable to HTTP service deployment and
generate a random value for it in vars.yaml.

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* fix(GraphQL): remove unused useTranslation hook (#3200) (#3206)

Co-authored-by: PiRDub <pirddeveloppeur@gmail.com>

* Increment frontend chart version

* chore(http): remove default token_string

scripts/helmcharts/openreplay/charts/http/scripts/entrypoint.sh

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* fix(dashboard): update filter condition in MetricsList

Change the filter type comparison from checking against 'all' to
checking against an empty string. This ensures proper filtering
behavior when filtering metrics in the dashboard component.

* Increment frontend chart version

* ui: shrink icons when no space, adjust player area for events export … (#3217)

* ui: shrink icons when no space, adjust player area for events export panel, fix panel size

* ui: rm log

* Increment frontend chart version

* refactor(chalice): changed user-journey

* Increment chalice chart version

* refactor(auth): separate SSO support from enterprise edition

Add dedicated isSSOSupported property to correctly identify when SSO
authentication is available, properly handling the 'msaas' edition
case separately from enterprise edition checks. This fixes SSO
visibility in the login interface.

* Increment frontend chart version

* UI patches (28.03) (#3231)

* ui: force getting url for location in tabmanagers

* Assist add turn servers (#3229)

* fixed conflicts

* add offers

* add config to sicket query

* add config to sicket query

* add config init

* removed console logs

* removed wrong updates

* fixed conflicts

* add offers

* add config to sicket query

* add config to sicket query

* add config init

* removed console logs

* removed wrong updates

* ui: fix chat draggable, fix default params

---------

Co-authored-by: nick-delirium <nikita@openreplay.com>

* ui: fix spritemap generation for assist sessions

* ui: fix yarnlock

* fix errors

* updated widget link

* resolved conflicts

* updated widget url

---------

Co-authored-by: Andrey Babushkin <55714097+reyand43@users.noreply.github.com>
Co-authored-by: Андрей Бабушкин <andreybabushkin2000@gmail.com>

* fix(init): remove duplicate clone

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* Increment assist chart version

* Increment frontend chart version

* ui: add old devtool filters

* ui: filter keys

* Increment frontend chart version

* ui: fix modules mapper

* ui: fix modules label

* Increment frontend chart version

* ui: fix double fetches for sessions

* Increment frontend chart version

* pulled updates (#3254)

* Increment frontend chart version (#3255)

Co-authored-by: GitHub Action <action@github.com>

* Increment assist chart version (#3256)

Co-authored-by: GitHub Action <action@github.com>

* feat(chalice): added for_spot=True for authenticate_sso (#3259)

* Increment chalice chart version (#3260)

Co-authored-by: GitHub Action <action@github.com>

* Assist patch canvas (#3265)

* add agent info to assist and tracker

* removed AGENTS_CONNECTED event

* Increment frontend chart version (#3266)

Co-authored-by: GitHub Action <action@github.com>

* Increment assist chart version (#3267)

Co-authored-by: GitHub Action <action@github.com>

* resolved conflict

* removed comments

* add global method support

* fix errors

* remove wrong updates

* remove wrong updates

* add onDrag as option

---------

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
Co-authored-by: Shekar Siri <sshekarsiri@gmail.com>
Co-authored-by: Mehdi Osman <estradino@users.noreply.github.com>
Co-authored-by: GitHub Action <action@github.com>
Co-authored-by: Taha Yassine Kraiem <tahayk2@gmail.com>
Co-authored-by: Alexander <zavorotynskiy@pm.me>
Co-authored-by: nick-delirium <nikita@openreplay.com>
Co-authored-by: rjshrjndrn <rjshrjndrn@gmail.com>
Co-authored-by: PiRDub <pirddeveloppeur@gmail.com>
2025-04-14 11:25:17 +02:00
nick-delirium
2bf92f40f7
ui: metrics filtering checks 2025-04-14 10:53:12 +02:00
nick-delirium
f0f78341e7
networkProxy: improve sanitizer, fix bodyreader class 2025-04-14 10:53:12 +02:00
nick-delirium
dbb805189f ui: keep spot log 2025-04-14 09:41:11 +02:00
nick-delirium
e32dbe2ee2 ui: check if spot ext exists on login comp 2025-04-14 09:41:11 +02:00
rjshrjndrn
3272f5b9fd refactor(clickhouse): split server and user config
Split the ClickHouse configuration into separate ConfigMaps for server
and user configurations. This allows more granular management of the
different configuration types and proper mounting to their respective
paths.

- Created separate serverConfig and userConfig under configOverride
- Added user-default.xml under userConfig
- Updated StatefulSet to mount each ConfigMap separately

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2025-04-11 17:20:26 +02:00
Shekar Siri
ea4e2ab198 feat(search): enhance filter value handling
- Added `checkFilterValue` function to validate and update filter values
  in `SearchStoreLive`.
- Updated `FilterItem` to handle undefined `value` gracefully by providing
  a default empty array.

These changes improve robustness in filter value processing.
2025-04-11 14:35:19 +02:00
Shekar Siri
990e1fa1c4 feat(search): add rounding to next minutes for date ranges
- Introduced `roundToNextMinutes` utility function to round timestamps
  to the next specified minute interval.
- Updated `Search` class to use the rounding function for non-custom
  date ranges.
- Modified `getRange` in `period.js` to align LAST_24_HOURS with
  15-minute intervals.
- Added `roundToNextMinutes` implementation in `utils/index.ts`.
2025-04-11 11:59:04 +02:00
Shekar Siri
5ca97ceedd feat(dashboard): set initial drill down period
Change default drill down period from LAST_7_DAYS to LAST_24_HOURS
and preserve current period when drilling down on chart click
2025-04-11 10:47:32 +02:00
rjshrjndrn
d3b8c35058 chore(action): cloning specific tag
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2025-04-10 15:41:56 +02:00
rjshrjndrn
1b851a8b72 feat(clickhouse): add config override capability
Adds support for overriding ClickHouse server configurations by:
- Creating a new ConfigMap to store custom XML configurations
- Mounting the ConfigMap to ClickHouse pods under /etc/clickhouse-server/config.d
- Adding configOverride field to values.yaml with examples

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2025-04-09 16:36:04 +02:00
Andrey Babushkin
553e3f6045
Assist fix canvas clearing (#3276)
* add stop canvas socket event

* change tracker version

* removed comments
2025-04-07 14:10:31 +02:00
rjshrjndrn
3f73bae22f fix(helm): proper aws endpoint detection
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2025-04-04 23:44:08 +02:00
Alexander
9160b42113 feat(assist-server): fixed an issue with sessionIDs collector 2025-04-04 17:53:19 +02:00
Alexander
36e1a2fca2 feat(assist-server): removed unnecessary prefix for ws connections 2025-04-04 16:34:45 +02:00
Alexander
cbbd480cca feat(assist-server): changed default port 2025-04-04 16:23:16 +02:00
Alexander
77ae0cac0e Revert "feat(assist): temporary changed the default assist path"
This reverts commit 5771323800.
2025-04-04 16:18:19 +02:00
Alexander
5771323800 feat(assist): temporary changed the default assist path 2025-04-04 16:13:03 +02:00
Alexander
aab8691cf5 Merge remote-tracking branch 'origin/dev' into dev 2025-04-04 16:08:24 +02:00
Alexander
d9ff3f4691 feat(assist-server): use the default prefix url 2025-04-04 16:08:09 +02:00
rjshrjndrn
09c2ce0976 ci(action): Build and patch github tags
feat(workflow): update commit timestamp for patching

Add a step to set the commit timestamp of the HEAD commit to be 1
second newer than the oldest of the last 3 commits. This ensures
proper chronological order while preserving the commit content.

- Fetch deeper history to access commit history
- Get oldest timestamp from recent commits
- Set new commit date with BSD-compatible date command
- Verify timestamp change with git log

The workflow was previously checking out 'main' branch with a
comment indicating it needed to be fixed. This change makes it
properly checkout the tag specified by the workflow input.

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2025-04-04 15:57:50 +02:00
Alexander
0141a42911 feat(assist-server): fixed the helm chart 2025-04-04 15:48:31 +02:00
Alexander
b55e44d450 feat(assist-server): moved the build.sh script to the root 2025-04-04 15:44:19 +02:00
Alexander
f70cce7e23 feat(assist-server): removed unnecessary comments 2025-04-04 15:13:45 +02:00
Alexander
8b3be469b6 feat(assist-server): added host configuration 2025-04-04 15:09:37 +02:00
Alexander
dc975bc19a feat(actions): small fix in assist-server action 2025-04-04 12:11:48 +02:00
Alexander
c1d51b98a2
feat(assist-server): added a first part of the assist v2 (#3269) 2025-04-04 12:05:36 +02:00
nick-delirium
5a51bfb984
update codecov yml 2025-04-04 10:46:13 +02:00
Andrey Babushkin
b55b9e5515
Assist fix canvas stream (#3263)
* add agent info to assist and tracker

* removed AGENTS_CONNECTED event
2025-04-03 18:06:09 +02:00
Andrey Babushkin
af7b46516f
Assist fix canvas stream (#3261)
* add agent info to assist and tracker

* removed AGENTS_CONNECTED event
2025-04-03 16:14:46 +02:00
rjshrjndrn
05e0306823 fix(actions): add dynamic token secret
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2025-04-03 16:10:19 +02:00
Alexander
77a8371543 feat(analytics): added mock (because it's impossible to build at the moment) 2025-04-03 15:33:48 +02:00
Mehdi Osman
e4406ad26b
Update .env.sample 2025-04-03 09:06:31 -04:00
Alexander
a8971d842b feat(chalice): added for_spot=True for authenticate_sso 2025-04-02 16:38:08 +02:00
nick-delirium
c003057cf0
ui: fix events filtering, net panel scroll and default tab 2025-04-02 14:40:13 +02:00
nick-delirium
586472c7dd
ui: bump tab tooltip delay 2025-04-01 17:16:25 +02:00
nick-delirium
ecb192f16e
tracker: hoist deps to root level 2025-04-01 11:49:39 +02:00
nick-delirium
6dc585417f
tracker: fix tests (use workflow) 2025-04-01 11:40:06 +02:00
nick-delirium
264444c92a
tracker: setup bun workspaces for tracker/assist 2025-04-01 11:35:42 +02:00
nick-delirium
b2fcd7094b
tracker: patch for potential empty call_end msg #3249 2025-04-01 11:05:42 +02:00
Andrey Babushkin
f3b98dad8a
updated version (#3253) 2025-03-31 18:09:27 +02:00
Andrey Babushkin
c27213c65d
add test turn (#3236)
* add test turn

* removed stun

* add ice candidates buffer and removed config to another socket event

* removed config from NEW_AGENTS

* changed WEBRTC_CONFIG event receiver

* fixed error

* fixed errors

* add buffer cleaning
2025-03-31 18:00:27 +02:00
nick-delirium
f61c5e99b5
ui: fix double fetches for sessions 2025-03-31 17:14:23 +02:00
nick-delirium
6412f14b08
ui: fix modules label 2025-03-31 11:52:23 +02:00
nick-delirium
0a620c6ba3
ui: fix modules mapper 2025-03-31 11:47:10 +02:00
nick-delirium
685741f039
tracker: yarn -> bun 2025-03-31 11:15:38 +02:00
nick-delirium
4ee78e1a5c
ui: filter keys 2025-03-31 10:33:51 +02:00
nick-delirium
77735d9d72
ui: use metadata as filter on click 2025-03-31 10:29:27 +02:00
nick-delirium
e3065e0530 ui: add old devtool filters 2025-03-31 10:11:34 +02:00
rjshrjndrn
d9d4221ad3 fix(init): remove duplicate clone
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2025-03-28 21:40:58 +01:00
nick-delirium
0bbde3e75a
tracker: assist 11.0.4; pass peer creds 2025-03-28 17:55:26 +01:00
nick-delirium
7dec8bb943
ui: fix toast auto close 2025-03-28 17:26:50 +01:00
Taha Yassine Kraiem
c6a5ed6c3b fix(chalice): fixed redundant event-names 2025-03-28 17:19:36 +01:00
Taha Yassine Kraiem
99d62fa549 feat(chalice): support regex operator for heatmaps 2025-03-28 16:53:49 +01:00
Taha Yassine Kraiem
c0bb05bc0f feat(chalice): support regex operator for sessions search 2025-03-28 16:53:49 +01:00
Taha Yassine Kraiem
70258e5c1d refactor(chalice): simplified supportedTypes for product analytics 2025-03-28 16:53:49 +01:00
Taha Yassine Kraiem
6ec146b24b feat(chalice): support regex for events search 2025-03-28 16:53:49 +01:00
Taha Yassine Kraiem
9f464e3b41 refactor(chalice): refactored code 2025-03-28 16:53:49 +01:00
nick-delirium
e95bdab478 ui: fix spritemap generation for assist sessions 2025-03-28 16:42:16 +01:00
Andrey Babushkin
421b3d1dc5
Assist add turn servers (#3229)
* fixed conflicts

* add offers

* add config to sicket query

* add config to sicket query

* add config init

* removed console logs

* removed wrong updates

* fixed conflicts

* add offers

* add config to sicket query

* add config to sicket query

* add config init

* removed console logs

* removed wrong updates

* ui: fix chat draggable, fix default params

---------

Co-authored-by: nick-delirium <nikita@openreplay.com>
2025-03-28 16:27:01 +01:00
nick-delirium
437a25fb97
networkProxy: update dev deps 2025-03-28 11:20:15 +01:00
nick-delirium
cb55a17227
ui: force getting url for location in tabmanagers 2025-03-28 10:57:39 +01:00
Taha Yassine Kraiem
9d160abda5 refactor(chalice): optimized search sessions using user-events 2025-03-27 16:34:49 +01:00
Taha Yassine Kraiem
3758cf6565 refactor(chalice): search sessions using user-events 2025-03-27 14:18:08 +01:00
Taha Yassine Kraiem
9db5e2a8f7 refactor(chalice): refactored code 2025-03-27 14:18:08 +01:00
Taha Yassine Kraiem
e0dba41065 refactor(chalice): upgraded dependencies 2025-03-27 14:18:08 +01:00
Taha Yassine Kraiem
8fbaf25799 feat(DB): use incremental&refreshable materialized views to fill extra tables 2025-03-27 14:18:08 +01:00
Shekar Siri
65072f607f refactor(auth): separate SSO support from enterprise edition
Add dedicated isSSOSupported property to correctly identify when SSO
authentication is available, properly handling the 'msaas' edition
case separately from enterprise edition checks. This fixes SSO
visibility in the login interface.
2025-03-27 12:28:37 +01:00
nick-delirium
cb4bf932c4
ui: fix fresh sessions lookup 2025-03-27 10:57:37 +01:00
nick-delirium
20b938365c
ui: minor session list fixes 2025-03-27 10:43:30 +01:00
Taha Yassine Kraiem
8e68ebd52b refactor(chalice): changed user-journey
(cherry picked from commit fc86555644)
2025-03-27 10:25:47 +01:00
nick-delirium
293382ea85
tracker: 16.1.1 2025-03-27 09:34:12 +01:00
nick-delirium
ac35bf5179
tracker: assist 11.0.3 clicks fix 2025-03-26 17:47:14 +01:00
nick-delirium
eb610d1c21 tracker: fix remote control clicks on svg 2025-03-26 17:42:27 +01:00
Delirium
ac0ccb2169
ui: shrink icons when no space, adjust player area for events export … (#3217)
* ui: shrink icons when no space, adjust player area for events export panel, fix panel size

* ui: rm log
2025-03-26 16:37:45 +01:00
Taha Yassine Kraiem
20a57d7ca1 feat(chalice): initial lexicon for events & properties 2025-03-26 13:27:42 +01:00
Taha Yassine Kraiem
856e716507 refactor(chalice): changed product analytics to return full filters with possible types 2025-03-26 13:27:42 +01:00
Taha Yassine Kraiem
bb17f672fe feat(DB): use incremental&refreshable materialized views to fill extra tables 2025-03-26 13:27:42 +01:00
nick-delirium
d087736df0
ui: fix default series state, fix metricType in comparison 2025-03-26 10:05:03 +01:00
Shekar Siri
ce546bcfa3 fix(ui): adjust CirclePlay icon spacing in player controls
Add marginLeft style property to eliminate unwanted spacing between
the text and icon in the "Play Full Session" button, improving the
visual alignment and consistency of the player controls.
2025-03-25 18:36:27 +01:00
Shekar Siri
9f681aca45 fix(dashboard): update filter condition in MetricsList
Change the filter type comparison from checking against 'all' to
checking against an empty string. This ensures proper filtering
behavior when filtering metrics in the dashboard component.
2025-03-25 18:08:29 +01:00
Taha Yassine Kraiem
0500f30d14 feat(DB): use incremental materialized views to fill extra tables
refactor(chalice): changed product analytics
2025-03-25 17:44:31 +01:00
Taha Yassine Kraiem
ec2c42c688 refactor(DB): changed product analytics DB structure 2025-03-25 17:44:31 +01:00
Taha Yassine Kraiem
7f0bc100f5 refactor(chalice): changed product analytics search payload 2025-03-25 17:44:31 +01:00
Taha Yassine Kraiem
522a985ef3 refactor(chalice): refactored pagination-query-string 2025-03-25 17:44:31 +01:00
nick-delirium
634d0e8a0f ui: rm speed index card 2025-03-25 17:39:14 +01:00
nick-delirium
28b4fc7598 ui: upgrade react to 19.0.0 2025-03-25 17:39:14 +01:00
Alexander
0d4c256ca8 feat(tasks): removed unnecessary wrapper 2025-03-25 17:16:57 +01:00
Alexander
35f63a8fb1 feat(dbpool): fixed an issue in metrics call 2025-03-25 17:02:06 +01:00
nick-delirium
a4e96822ed
spot: skip saas check for ingest 2025-03-25 16:52:48 +01:00
Alexander
96f984a76a feat(spot): fixed an issue in metrics call 2025-03-25 16:46:21 +01:00
nick-delirium
5f15dfafe7 ui: auto detect ingest for spot (if not cloud) 2025-03-25 16:05:36 +01:00
nick-delirium
b9cca6b388
spot: restore currtime after thumbnail 2025-03-25 15:44:07 +01:00
nick-delirium
712f07988e spot: fix deps 2025-03-25 15:39:02 +01:00
nick-delirium
08bddb3165 switch meta tag to mp4 2025-03-25 15:39:02 +01:00
nick-delirium
3efb879cdf spot: up audio bitrate a bit 2025-03-25 15:39:02 +01:00
nick-delirium
ccf44fda70 spot: try mp4 support with avc1 2025-03-25 15:39:02 +01:00
nick-delirium
ce525a4ccf spot: more fixes for network, reinit stage for content script 2025-03-25 15:39:02 +01:00
nick-delirium
c6299c4592 spot: add err ctx, add iterator for values 2025-03-25 15:39:02 +01:00
nick-delirium
a371c79151 spot: more fixes for debugger approach, check settings before enabling network 2025-03-25 15:39:02 +01:00
nick-delirium
f59a8c24f4 spot: small refactoring + testing debugger for network capture 2025-03-25 15:39:02 +01:00
nick-delirium
8be6f63711 spot: .14
Signed-off-by: nick-delirium <nikita@openreplay.com>
2025-03-25 15:39:02 +01:00
nick-delirium
8ba35b1324 spot: mix network requests with webRequest data for better tracking 2025-03-25 15:39:02 +01:00
nick-delirium
28dea3b225
tracker: release 16.1.0 2025-03-25 15:17:43 +01:00
Andrey Babushkin
666643a6ae
Improve tracker perfomance (#3208)
* fix(helm): add CORS config to Assist ingress

Configure CORS headers and debug session information for the Assist
service's ingress to ensure proper cross-origin requests handling and
improved debugging capabilities.

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* increase perfomance ticker and remove empty batches

* add commit

* updated Changelog

---------

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
Co-authored-by: rjshrjndrn <rjshrjndrn@gmail.com>
2025-03-25 15:08:49 +01:00
nick-delirium
4cf688f15c spot: update network proxy for auto sanitizer 2025-03-25 14:52:43 +01:00
nick-delirium
1e57c90449 networkProxy: auto sanitize sensitive tokens 2025-03-25 14:52:43 +01:00
Alexander
c0678bab15 feat(db): insert current_url for errors and issues 2025-03-25 14:09:37 +01:00
rjshrjndrn
187a69a61a fix(assist): ingress session id
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2025-03-25 11:26:40 +01:00
rjshrjndrn
2e96a072e9 chore(http): remove default token_string
scripts/helmcharts/openreplay/charts/http/scripts/entrypoint.sh

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2025-03-24 19:33:49 +01:00
Andrey Babushkin
5a410e63b3
Update batch writer (#3205)
* fix(helm): add CORS config to Assist ingress

Configure CORS headers and debug session information for the Assist
service's ingress to ensure proper cross-origin requests handling and
improved debugging capabilities.

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* add timestamp to prepare method

* update test

---------

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
Co-authored-by: rjshrjndrn <rjshrjndrn@gmail.com>
2025-03-24 17:25:09 +01:00
Shekar Siri
300a857a5c fix(userStore): simplify error handling on save
Replace complex error parsing with direct error message display.
This improves code maintainability while still providing useful
feedback to users when saving their account data fails.
2025-03-24 17:14:14 +01:00
nick-delirium
eba22e0efa
ui: always show sessiontags 2025-03-24 17:12:18 +01:00
rjshrjndrn
664f6b9014 feat(helm): add TOKEN_SECRET environment variable
Add TOKEN_SECRET environment variable to HTTP service deployment and
generate a random value for it in vars.yaml.

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2025-03-24 16:53:06 +01:00
nick-delirium
5bbd7cff10 tracker: change nodebound warn level 2025-03-24 15:09:03 +01:00
nick-delirium
6f172d4f01 tracker: keep spaces, remove data from page location msg 2025-03-24 15:09:03 +01:00
nick-delirium
829e1c8bde tracker: fix jest config, update test cases 2025-03-24 15:09:03 +01:00
nick-delirium
e7d309dadf tracker: "secure by default" mode; 16.1.0 2025-03-24 15:09:03 +01:00
nick-delirium
4bac12308a tracker: secure mode for sanitizer settings 2025-03-24 15:09:03 +01:00
nick-delirium
2aba1d9a52 ui: comments etc 2025-03-24 15:06:00 +01:00
nick-delirium
1f4e32e4f2 ui: improve network panel row mapping 2025-03-24 15:06:00 +01:00
nick-delirium
49f98967d6 ui: fixes for onboarding ui 2025-03-24 14:27:37 +01:00
PiRDub
356fa02094
fix(GraphQL): remove unused useTranslation hook (#3200) 2025-03-24 13:30:19 +01:00
Andrey Babushkin
a8e47e59ad
Update batch writer (#3198)
* fix(helm): add CORS config to Assist ingress

Configure CORS headers and debug session information for the Assist
service's ingress to ensure proper cross-origin requests handling and
improved debugging capabilities.

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* add timestamp to prepare method

---------

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
Co-authored-by: rjshrjndrn <rjshrjndrn@gmail.com>
2025-03-24 12:42:05 +01:00
nick-delirium
c760d29fb4
ui: update icon in langbanner 2025-03-24 11:10:43 +01:00
nick-delirium
d77a518cf0 ui: change language selection ui 2025-03-24 11:09:22 +01:00
Alexander
e04c2aa251 feat(ender): handle the negative duration sessions 2025-03-24 10:02:42 +01:00
rjshrjndrn
e6eb41536d fix(helm): improve session routing and CORS handling
- Add http-snippet with map function to extract sessionID from peerId
- Update ingress annotations for Assist chart
- Add proper CORS headers to support cross-origin requests
- Remove debugging headers that were previously enabled

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2025-03-22 20:10:52 +01:00
Mehdi Osman
4b3ad60565
Revert transport to websocket only 2025-03-22 13:55:45 -04:00
Mehdi Osman
90669b0604
Revert to websocket 2025-03-22 13:53:47 -04:00
Taha Yassine Kraiem
f4bf1b8960 fix(chalice): fixed wrong import 2025-03-21 16:58:34 +01:00
nick-delirium
70423c6d8e
experimental: only polling for assist 2025-03-21 16:38:43 +01:00
Taha Yassine Kraiem
ae313c17d4 feat(chalice): search product analytics 2025-03-21 16:22:49 +01:00
rjshrjndrn
0e45fa53ad fix(helm): add CORS config to Assist ingress
Configure CORS headers and debug session information for the Assist
service's ingress to ensure proper cross-origin requests handling and
improved debugging capabilities.

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2025-03-21 16:06:41 +01:00
nick-delirium
fe20f83130
ui: add inuse error for signup 2025-03-21 15:50:25 +01:00
rjshrjndrn
d04e6686ca fix(helm): add CORS config to Assist ingress
Configure CORS headers and debug session information for the Assist
service's ingress to ensure proper cross-origin requests handling and
improved debugging capabilities.

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2025-03-21 15:50:15 +01:00
Shekar Siri
6adb45e15f fix(auth): remove unnecessary captcha token validation
The token validation checks were redundant as the validation is already
handled by the captcha wrapper component. This change simplifies the
password reset flow while maintaining security.
2025-03-21 15:42:28 +01:00
Andrey Babushkin
a1337faeee
combine in 1 line (#3191) 2025-03-21 15:19:32 +01:00
nick-delirium
7e065ab02f
tracker: 16.0.3, fix local spritemap parsing 2025-03-21 15:10:00 +01:00
nick-delirium
1e2dde09b4
ui: onboarding fixes 2025-03-21 10:43:51 +01:00
nick-delirium
3cdfe76134
ui: add sessionId header for AssistManager.ts 2025-03-21 10:18:33 +01:00
nick-delirium
39855651d5
ui: use polling for first request 2025-03-21 09:52:00 +01:00
Taha Yassine Kraiem
dd469d2349 refactor(chalice): initial product analytics 2025-03-20 17:13:17 +01:00
Taha Yassine Kraiem
3d448320bf refactor(DB): changed DB structure for product analytics 2025-03-20 17:13:17 +01:00
Taha Yassine Kraiem
7b0771a581 refactor(chalice): upgraded dependencies 2025-03-20 17:13:17 +01:00
Taha Yassine Kraiem
988b396223 refactor(chalice): moved CH sessions-search to FOSS
refactor(DB): changed DB structures for CH sessions-search in FOSS
refactor(DB): preparing for v1.23.0
2025-03-20 17:13:17 +01:00
nick-delirium
fa3b585785
ui: fix table column export 2025-03-20 16:06:48 +01:00
Alexander
91e0ebeb56 feat(assist): improved caching mechanism for cluster mode 2025-03-20 13:52:14 +01:00
rjshrjndrn
8e68eb9a20 feat(assist): enhance WebSocket session persistence
Add session extraction from peerId parameter for better WebSocket
connection stability. This improves assist session routing by:

- Extracting sessionID from peerId parameter using regex
- Setting upstream hash-by to use the extracted session ID
- Adding debug headers to monitor session routing

TODO: Convert this to map

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2025-03-20 12:38:36 +01:00
nick-delirium
13bd3d9121
tracker: add sessId header for assist polling 2025-03-20 12:13:40 +01:00
nick-delirium
048ae0913c
ui: refetch live session list on proj change 2025-03-19 17:36:33 +01:00
Shekar Siri
73fff8b817 feat(auth): support msaas edition for enterprise features
Add msaas to the isEnterprise check alongside ee edition to properly
display enterprise features. Use userStore.isEnterprise in SSOLogin
component instead of directly checking authDetails.edition for
consistent
enterprise status detection.
2025-03-19 14:40:05 +01:00
Shekar Siri
605fa96a34
feat(auth): implement withCaptcha HOC for consistent reCAPTCHA (#3175)
* refactor(searchStore): reformat filterMap function parameters (#3166)

- Reformat the parameters of the filterMap function for better readability.
- Comment out the fetchSessions call in clearSearch method to avoid unnecessary session fetch.

* Increment frontend chart version (#3167)

Co-authored-by: GitHub Action <action@github.com>

* refactor(chalice): cleaned code
fix(chalice): fixed session-search-pg sortKey issue
fix(chalice): fixed CH-query-formatter to handle special chars
fix(chalice): fixed /ids response

* feat(auth): implement withCaptcha HOC for consistent reCAPTCHA

This commit refactors the reCAPTCHA implementation across the application
by introducing a Higher Order Component (withCaptcha) that encapsulates
captcha verification logic. The changes:

- Create a reusable withCaptcha HOC in withRecaptcha.tsx
- Refactor Login, ResetPasswordRequest, and CreatePassword components
- Extract SSOLogin into a separate component
- Improve error handling and user feedback
- Standardize loading and verification states across forms
- Make captcha implementation more maintainable and consistent

---------

Co-authored-by: Mehdi Osman <estradino@users.noreply.github.com>
Co-authored-by: GitHub Action <action@github.com>
Co-authored-by: Taha Yassine Kraiem <tahayk2@gmail.com>
2025-03-19 11:37:50 +01:00
Andrey Babushkin
2cb33d7894
changhe sort events logic (#3174) 2025-03-18 18:27:48 +01:00
nick-delirium
15d427418d
tracker: fix autogen version 2025-03-18 16:37:09 +01:00
nick-delirium
ed3e553726
tracker: assist 11.0.1 changelog 2025-03-18 16:36:10 +01:00
nick-delirium
7eace68de6
ui: add loading state for LiveSessionReloadButton.tsx 2025-03-18 15:30:24 +01:00
Taha Yassine Kraiem
8009882cef refactor(chalice): cleaned code
fix(chalice): fixed session-search-pg sortKey issue
fix(chalice): fixed CH-query-formatter to handle special chars
fix(chalice): fixed /ids response

(cherry picked from commit b505645782)
2025-03-18 13:52:56 +01:00
Andrey Babushkin
7365d8639c
updated widget link (#3158)
* updated widget link

* fix calls

* updated widget url
2025-03-18 11:07:09 +01:00
nick-delirium
4c967d4bc1
ui: update tracker import examples 2025-03-17 13:42:34 +01:00
Alexander
3fdf799bd7 feat(http): unsupported tracker error with projectID in logs 2025-03-17 13:32:00 +01:00
nick-delirium
9aca716e6b
tracker: 16.0.2 fix str dictionary keys 2025-03-17 11:25:54 +01:00
Shekar Siri
cf9ecdc9a4 refactor(searchStore): reformat filterMap function parameters
- Reformat the parameters of the filterMap function for better readability.
- Comment out the fetchSessions call in clearSearch method to avoid unnecessary session fetch.
2025-03-14 19:47:42 +01:00
335 changed files with 13779 additions and 6560 deletions

View file

@ -47,6 +47,7 @@ runs:
"JWT_SECRET:.global.jwtSecret" "JWT_SECRET:.global.jwtSecret"
"JWT_SPOT_REFRESH_SECRET:.chalice.env.JWT_SPOT_REFRESH_SECRET" "JWT_SPOT_REFRESH_SECRET:.chalice.env.JWT_SPOT_REFRESH_SECRET"
"JWT_SPOT_SECRET:.global.jwtSpotSecret" "JWT_SPOT_SECRET:.global.jwtSpotSecret"
"JWT_SECRET:.global.tokenSecret"
"LICENSE_KEY:.global.enterpriseEditionLicense" "LICENSE_KEY:.global.enterpriseEditionLicense"
"MINIO_ACCESS_KEY:.global.s3.accessKey" "MINIO_ACCESS_KEY:.global.s3.accessKey"
"MINIO_SECRET_KEY:.global.s3.secretKey" "MINIO_SECRET_KEY:.global.s3.secretKey"

122
.github/workflows/assist-server-ee.yaml vendored Normal file
View file

@ -0,0 +1,122 @@
# This action will push the assist changes to aws
on:
workflow_dispatch:
inputs:
skip_security_checks:
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
required: false
default: "false"
push:
branches:
- dev
paths:
- "ee/assist-server/**"
name: Build and Deploy Assist-Server EE
jobs:
deploy:
name: Deploy
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
# We need to diff with old commit
# to see which workers got changed.
fetch-depth: 2
- uses: ./.github/composite-actions/update-keys
with:
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
assist_key: ${{ secrets.ASSIST_KEY }}
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
license_key: ${{ secrets.EE_LICENSE_KEY }}
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
pg_password: ${{ secrets.EE_PG_PASSWORD }}
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
name: Update Keys
- name: Docker login
run: |
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
- uses: azure/k8s-set-context@v1
with:
method: kubeconfig
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
id: setcontext
- name: Building and Pushing Assist-Server image
id: build-image
env:
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
ENVIRONMENT: staging
run: |
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
cd assist-server
PUSH_IMAGE=0 bash -x ./build.sh ee
[[ "x$skip_security_checks" == "xtrue" ]] || {
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
images=("assist-server")
for image in ${images[*]};do
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
done
err_code=$?
[[ $err_code -ne 0 ]] && {
exit $err_code
}
} && {
echo "Skipping Security Checks"
}
images=("assist-server")
for image in ${images[*]};do
docker push $DOCKER_REPO/$image:$IMAGE_TAG
done
- name: Creating old image input
run: |
#
# Create yaml with existing image tags
#
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
echo > /tmp/image_override.yaml
for line in `cat /tmp/image_tag.txt`;
do
image_array=($(echo "$line" | tr ':' '\n'))
cat <<EOF >> /tmp/image_override.yaml
${image_array[0]}:
image:
# We've to strip off the -ee, as helm will append it.
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
EOF
done
- name: Deploy to kubernetes
run: |
pwd
cd scripts/helmcharts/
# Update changed image tag
sed -i "/assist-server/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
cat /tmp/image_override.yaml
# Deploy command
mkdir -p /tmp/charts
mv openreplay/charts/{ingress-nginx,assist-server,quickwit,connector} /tmp/charts/
rm -rf openreplay/charts/*
mv /tmp/charts/* openreplay/charts/
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
env:
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# We're not passing -ee flag, because helm will add that.
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
ENVIRONMENT: staging

185
.github/workflows/patch-build-old.yaml vendored Normal file
View file

@ -0,0 +1,185 @@
# Ref: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions
on:
workflow_dispatch:
inputs:
services:
description: 'Comma separated names of services to build(in small letters).'
required: true
default: 'chalice,frontend'
tag:
description: 'Tag to build patches from.'
required: true
type: string
name: Build patches from tag, rewrite commit HEAD to older timestamp, and Push the tag
jobs:
deploy:
name: Build Patch from old tag
runs-on: ubuntu-latest
env:
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
steps:
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 4
ref: ${{ github.event.inputs.tag }}
- name: Set Remote with GITHUB_TOKEN
run: |
git config --unset http.https://github.com/.extraheader
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}.git
- name: Create backup tag with timestamp
run: |
set -e # Exit immediately if a command exits with a non-zero status
TIMESTAMP=$(date +%Y%m%d%H%M%S)
BACKUP_TAG="${{ github.event.inputs.tag }}-backup-${TIMESTAMP}"
echo "BACKUP_TAG=${BACKUP_TAG}" >> $GITHUB_ENV
echo "INPUT_TAG=${{ github.event.inputs.tag }}" >> $GITHUB_ENV
git tag $BACKUP_TAG || { echo "Failed to create backup tag"; exit 1; }
git push origin $BACKUP_TAG || { echo "Failed to push backup tag"; exit 1; }
echo "Created backup tag: $BACKUP_TAG"
# Get the oldest commit date from the last 3 commits in raw format
OLDEST_COMMIT_TIMESTAMP=$(git log -3 --pretty=format:"%at" | tail -1)
echo "Oldest commit timestamp: $OLDEST_COMMIT_TIMESTAMP"
# Add 1 second to the timestamp
NEW_TIMESTAMP=$((OLDEST_COMMIT_TIMESTAMP + 1))
echo "NEW_TIMESTAMP=$NEW_TIMESTAMP" >> $GITHUB_ENV
- name: Setup yq
uses: mikefarah/yq@master
# Configure AWS credentials for the first registry
- name: Configure AWS credentials for RELEASE_ARM_REGISTRY
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.AWS_DEPOT_ACCESS_KEY }}
aws-secret-access-key: ${{ secrets.AWS_DEPOT_SECRET_KEY }}
aws-region: ${{ secrets.AWS_DEPOT_DEFAULT_REGION }}
- name: Login to Amazon ECR for RELEASE_ARM_REGISTRY
id: login-ecr-arm
run: |
aws ecr get-login-password --region ${{ secrets.AWS_DEPOT_DEFAULT_REGION }} | docker login --username AWS --password-stdin ${{ secrets.RELEASE_ARM_REGISTRY }}
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ secrets.RELEASE_OSS_REGISTRY }}
- uses: depot/setup-action@v1
- name: Get HEAD Commit ID
run: echo "HEAD_COMMIT_ID=$(git rev-parse HEAD)" >> $GITHUB_ENV
- name: Define Branch Name
run: echo "BRANCH_NAME=patch/main/${HEAD_COMMIT_ID}" >> $GITHUB_ENV
- name: Build
id: build-image
env:
DOCKER_REPO_ARM: ${{ secrets.RELEASE_ARM_REGISTRY }}
DOCKER_REPO_OSS: ${{ secrets.RELEASE_OSS_REGISTRY }}
MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
MSAAS_REPO_FOLDER: /tmp/msaas
run: |
set -exo pipefail
git config --local user.email "action@github.com"
git config --local user.name "GitHub Action"
git checkout -b $BRANCH_NAME
working_dir=$(pwd)
function image_version(){
local service=$1
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
current_version=$(yq eval '.AppVersion' $chart_path)
new_version=$(echo $current_version | awk -F. '{$NF += 1 ; print $1"."$2"."$3}')
echo $new_version
# yq eval ".AppVersion = \"$new_version\"" -i $chart_path
}
function clone_msaas() {
[ -d $MSAAS_REPO_FOLDER ] || {
git clone -b $INPUT_TAG --recursive https://x-access-token:$MSAAS_REPO_CLONE_TOKEN@$MSAAS_REPO_URL $MSAAS_REPO_FOLDER
cd $MSAAS_REPO_FOLDER
cd openreplay && git fetch origin && git checkout $INPUT_TAG
git log -1
cd $MSAAS_REPO_FOLDER
bash git-init.sh
git checkout
}
}
function build_managed() {
local service=$1
local version=$2
echo building managed
clone_msaas
if [[ $service == 'chalice' ]]; then
cd $MSAAS_REPO_FOLDER/openreplay/api
else
cd $MSAAS_REPO_FOLDER/openreplay/$service
fi
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh >> /tmp/arm.txt
}
# Checking for backend images
ls backend/cmd >> /tmp/backend.txt
echo Services: "${{ github.event.inputs.services }}"
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
BUILD_SCRIPT_NAME="build.sh"
# Build FOSS
for SERVICE in "${SERVICES[@]}"; do
# Check if service is backend
if grep -q $SERVICE /tmp/backend.txt; then
cd backend
foss_build_args="nil $SERVICE"
ee_build_args="ee $SERVICE"
else
[[ $SERVICE == 'chalice' || $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && cd $working_dir/api || cd $SERVICE
[[ $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && BUILD_SCRIPT_NAME="build_${SERVICE}.sh"
ee_build_args="ee"
fi
version=$(image_version $SERVICE)
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
echo IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
if [[ "$SERVICE" != "chalice" && "$SERVICE" != "frontend" ]]; then
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
else
build_managed $SERVICE $version
fi
cd $working_dir
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$SERVICE/Chart.yaml"
yq eval ".AppVersion = \"$version\"" -i $chart_path
git add $chart_path
git commit -m "Increment $SERVICE chart version"
done
- name: Change commit timestamp
run: |
# Convert the timestamp to a date format git can understand
NEW_DATE=$(perl -le 'print scalar gmtime($ARGV[0])." +0000"' $NEW_TIMESTAMP)
echo "Setting commit date to: $NEW_DATE"
# Amend the commit with the new date
GIT_COMMITTER_DATE="$NEW_DATE" git commit --amend --no-edit --date="$NEW_DATE"
# Verify the change
git log -1 --pretty=format:"Commit now dated: %cD"
# git tag and push
git tag $INPUT_TAG -f
git push origin $INPUT_TAG -f
# - name: Debug Job
# if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO_ARM: ${{ secrets.RELEASE_ARM_REGISTRY }}
# DOCKER_REPO_OSS: ${{ secrets.RELEASE_OSS_REGISTRY }}
# MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
# MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
# MSAAS_REPO_FOLDER: /tmp/msaas
# with:
# limit-access-to-actor: true

View file

@ -22,22 +22,14 @@ jobs:
- name: Cache tracker modules - name: Cache tracker modules
uses: actions/cache@v3 uses: actions/cache@v3
with: with:
path: tracker/tracker/node_modules path: tracker/node_modules
key: ${{ runner.OS }}-test_tracker_build-${{ hashFiles('**/bun.lockb') }} key: ${{ runner.OS }}-test_tracker_build-${{ hashFiles('**/bun.lock') }}
restore-keys: |
test_tracker_build{{ runner.OS }}-build-
test_tracker_build{{ runner.OS }}-
- name: Cache tracker-assist modules
uses: actions/cache@v3
with:
path: tracker/tracker-assist/node_modules
key: ${{ runner.OS }}-test_tracker_build-${{ hashFiles('**/bun.lockb') }}
restore-keys: | restore-keys: |
test_tracker_build{{ runner.OS }}-build- test_tracker_build{{ runner.OS }}-build-
test_tracker_build{{ runner.OS }}- test_tracker_build{{ runner.OS }}-
- name: Setup Testing packages - name: Setup Testing packages
run: | run: |
cd tracker/tracker cd tracker
bun install bun install
- name: Jest tests - name: Jest tests
run: | run: |
@ -47,10 +39,6 @@ jobs:
run: | run: |
cd tracker/tracker cd tracker/tracker
bun run build bun run build
- name: (TA) Setup Testing packages
run: |
cd tracker/tracker-assist
bun install
- name: (TA) Jest tests - name: (TA) Jest tests
run: | run: |
cd tracker/tracker-assist cd tracker/tracker-assist

1
.gitignore vendored
View file

@ -7,3 +7,4 @@ node_modules
**/*.envrc **/*.envrc
.idea .idea
*.mob* *.mob*
install-state.gz

View file

@ -6,16 +6,15 @@ name = "pypi"
[packages] [packages]
urllib3 = "==2.3.0" urllib3 = "==2.3.0"
requests = "==2.32.3" requests = "==2.32.3"
boto3 = "==1.36.12" boto3 = "==1.37.21"
pyjwt = "==2.10.1" pyjwt = "==2.10.1"
psycopg2-binary = "==2.9.10" psycopg2-binary = "==2.9.10"
psycopg = {extras = ["pool", "binary"], version = "==3.2.4"} psycopg = {extras = ["pool", "binary"], version = "==3.2.6"}
clickhouse-driver = {extras = ["lz4"], version = "==0.2.9"}
clickhouse-connect = "==0.8.15" clickhouse-connect = "==0.8.15"
elasticsearch = "==8.17.1" elasticsearch = "==8.17.2"
jira = "==3.8.0" jira = "==3.8.0"
cachetools = "==5.5.1" cachetools = "==5.5.2"
fastapi = "==0.115.8" fastapi = "==0.115.12"
uvicorn = {extras = ["standard"], version = "==0.34.0"} uvicorn = {extras = ["standard"], version = "==0.34.0"}
python-decouple = "==3.8" python-decouple = "==3.8"
pydantic = {extras = ["email"], version = "==2.10.6"} pydantic = {extras = ["email"], version = "==2.10.6"}

View file

@ -16,7 +16,7 @@ from chalicelib.utils import helper
from chalicelib.utils import pg_client, ch_client from chalicelib.utils import pg_client, ch_client
from crons import core_crons, core_dynamic_crons from crons import core_crons, core_dynamic_crons
from routers import core, core_dynamic from routers import core, core_dynamic
from routers.subs import insights, metrics, v1_api, health, usability_tests, spot, product_anaytics from routers.subs import insights, metrics, v1_api, health, usability_tests, spot, product_analytics
loglevel = config("LOGLEVEL", default=logging.WARNING) loglevel = config("LOGLEVEL", default=logging.WARNING)
print(f">Loglevel set to: {loglevel}") print(f">Loglevel set to: {loglevel}")
@ -129,6 +129,6 @@ app.include_router(spot.public_app)
app.include_router(spot.app) app.include_router(spot.app)
app.include_router(spot.app_apikey) app.include_router(spot.app_apikey)
app.include_router(product_anaytics.public_app) app.include_router(product_analytics.public_app, prefix="/pa")
app.include_router(product_anaytics.app) app.include_router(product_analytics.app, prefix="/pa")
app.include_router(product_anaytics.app_apikey) app.include_router(product_analytics.app_apikey, prefix="/pa")

View file

@ -241,3 +241,25 @@ def get_colname_by_key(project_id, key):
return None return None
return index_to_colname(meta_keys[key]) return index_to_colname(meta_keys[key])
def get_for_filters(project_id):
with pg_client.PostgresClient() as cur:
query = cur.mogrify(f"""SELECT {",".join(column_names())}
FROM public.projects
WHERE project_id = %(project_id)s
AND deleted_at ISNULL
LIMIT 1;""", {"project_id": project_id})
cur.execute(query=query)
metas = cur.fetchone()
results = []
if metas is not None:
for i, k in enumerate(metas.keys()):
if metas[k] is not None:
results.append({"id": f"meta_{i}",
"name": k,
"displayName": metas[k],
"possibleTypes": ["String"],
"autoCaptured": False,
"icon": None})
return {"total": len(results), "list": results}

View file

@ -6,7 +6,7 @@ from chalicelib.utils import helper
from chalicelib.utils import sql_helper as sh from chalicelib.utils import sql_helper as sh
def filter_stages(stages: List[schemas.SessionSearchEventSchema2]): def filter_stages(stages: List[schemas.SessionSearchEventSchema]):
ALLOW_TYPES = [schemas.EventType.CLICK, schemas.EventType.INPUT, ALLOW_TYPES = [schemas.EventType.CLICK, schemas.EventType.INPUT,
schemas.EventType.LOCATION, schemas.EventType.CUSTOM, schemas.EventType.LOCATION, schemas.EventType.CUSTOM,
schemas.EventType.CLICK_MOBILE, schemas.EventType.INPUT_MOBILE, schemas.EventType.CLICK_MOBILE, schemas.EventType.INPUT_MOBILE,
@ -15,10 +15,10 @@ def filter_stages(stages: List[schemas.SessionSearchEventSchema2]):
def __parse_events(f_events: List[dict]): def __parse_events(f_events: List[dict]):
return [schemas.SessionSearchEventSchema2.parse_obj(e) for e in f_events] return [schemas.SessionSearchEventSchema.parse_obj(e) for e in f_events]
def __fix_stages(f_events: List[schemas.SessionSearchEventSchema2]): def __fix_stages(f_events: List[schemas.SessionSearchEventSchema]):
if f_events is None: if f_events is None:
return return
events = [] events = []

View file

@ -160,7 +160,7 @@ s.start_ts,
s.duration""" s.duration"""
def __get_1_url(location_condition: schemas.SessionSearchEventSchema2 | None, session_id: str, project_id: int, def __get_1_url(location_condition: schemas.SessionSearchEventSchema | None, session_id: str, project_id: int,
start_time: int, start_time: int,
end_time: int) -> str | None: end_time: int) -> str | None:
full_args = { full_args = {
@ -240,13 +240,13 @@ def search_short_session(data: schemas.HeatMapSessionsSearch, project_id, user_i
value=[schemas.PlatformType.DESKTOP], value=[schemas.PlatformType.DESKTOP],
operator=schemas.SearchEventOperator.IS)) operator=schemas.SearchEventOperator.IS))
if not location_condition: if not location_condition:
data.events.append(schemas.SessionSearchEventSchema2(type=schemas.EventType.LOCATION, data.events.append(schemas.SessionSearchEventSchema(type=schemas.EventType.LOCATION,
value=[], value=[],
operator=schemas.SearchEventOperator.IS_ANY)) operator=schemas.SearchEventOperator.IS_ANY))
if no_click: if no_click:
data.events.append(schemas.SessionSearchEventSchema2(type=schemas.EventType.CLICK, data.events.append(schemas.SessionSearchEventSchema(type=schemas.EventType.CLICK,
value=[], value=[],
operator=schemas.SearchEventOperator.IS_ANY)) operator=schemas.SearchEventOperator.IS_ANY))
data.filters.append(schemas.SessionSearchFilterSchema(type=schemas.FilterType.EVENTS_COUNT, data.filters.append(schemas.SessionSearchFilterSchema(type=schemas.FilterType.EVENTS_COUNT,
value=[0], value=[0],

View file

@ -24,8 +24,9 @@ def get_by_url(project_id, data: schemas.GetHeatMapPayloadSchema):
"main_events.`$event_name` = 'CLICK'", "main_events.`$event_name` = 'CLICK'",
"isNotNull(JSON_VALUE(CAST(main_events.`$properties` AS String), '$.normalized_x'))" "isNotNull(JSON_VALUE(CAST(main_events.`$properties` AS String), '$.normalized_x'))"
] ]
if data.operator == schemas.SearchEventOperator.PATTERN:
if data.operator == schemas.SearchEventOperator.IS: constraints.append("match(main_events.`$properties`.url_path'.:String,%(url)s)")
elif data.operator == schemas.SearchEventOperator.IS:
constraints.append("JSON_VALUE(CAST(main_events.`$properties` AS String), '$.url_path') = %(url)s") constraints.append("JSON_VALUE(CAST(main_events.`$properties` AS String), '$.url_path') = %(url)s")
else: else:
constraints.append("JSON_VALUE(CAST(main_events.`$properties` AS String), '$.url_path') ILIKE %(url)s") constraints.append("JSON_VALUE(CAST(main_events.`$properties` AS String), '$.url_path') ILIKE %(url)s")
@ -179,7 +180,7 @@ toUnixTimestamp(s.datetime)*1000 AS start_ts,
s.duration AS duration""" s.duration AS duration"""
def __get_1_url(location_condition: schemas.SessionSearchEventSchema2 | None, session_id: str, project_id: int, def __get_1_url(location_condition: schemas.SessionSearchEventSchema | None, session_id: str, project_id: int,
start_time: int, start_time: int,
end_time: int) -> str | None: end_time: int) -> str | None:
full_args = { full_args = {
@ -262,13 +263,13 @@ def search_short_session(data: schemas.HeatMapSessionsSearch, project_id, user_i
value=[schemas.PlatformType.DESKTOP], value=[schemas.PlatformType.DESKTOP],
operator=schemas.SearchEventOperator.IS)) operator=schemas.SearchEventOperator.IS))
if not location_condition: if not location_condition:
data.events.append(schemas.SessionSearchEventSchema2(type=schemas.EventType.LOCATION, data.events.append(schemas.SessionSearchEventSchema(type=schemas.EventType.LOCATION,
value=[], value=[],
operator=schemas.SearchEventOperator.IS_ANY)) operator=schemas.SearchEventOperator.IS_ANY))
if no_click: if no_click:
data.events.append(schemas.SessionSearchEventSchema2(type=schemas.EventType.CLICK, data.events.append(schemas.SessionSearchEventSchema(type=schemas.EventType.CLICK,
value=[], value=[],
operator=schemas.SearchEventOperator.IS_ANY)) operator=schemas.SearchEventOperator.IS_ANY))
data.filters.append(schemas.SessionSearchFilterSchema(type=schemas.FilterType.EVENTS_COUNT, data.filters.append(schemas.SessionSearchFilterSchema(type=schemas.FilterType.EVENTS_COUNT,
value=[0], value=[0],

View file

@ -241,7 +241,7 @@ def get_simple_funnel(filter_d: schemas.CardSeriesFilterSchema, project: schemas
:return: :return:
""" """
stages: List[schemas.SessionSearchEventSchema2] = filter_d.events stages: List[schemas.SessionSearchEventSchema] = filter_d.events
filters: List[schemas.SessionSearchFilterSchema] = filter_d.filters filters: List[schemas.SessionSearchFilterSchema] = filter_d.filters
stage_constraints = ["main.timestamp <= %(endTimestamp)s"] stage_constraints = ["main.timestamp <= %(endTimestamp)s"]

View file

@ -15,7 +15,7 @@ logger = logging.getLogger(__name__)
def get_simple_funnel(filter_d: schemas.CardSeriesFilterSchema, project: schemas.ProjectContext, def get_simple_funnel(filter_d: schemas.CardSeriesFilterSchema, project: schemas.ProjectContext,
metric_format: schemas.MetricExtendedFormatType) -> List[RealDictRow]: metric_format: schemas.MetricExtendedFormatType) -> List[RealDictRow]:
stages: List[schemas.SessionSearchEventSchema2] = filter_d.events stages: List[schemas.SessionSearchEventSchema] = filter_d.events
filters: List[schemas.SessionSearchFilterSchema] = filter_d.filters filters: List[schemas.SessionSearchFilterSchema] = filter_d.filters
platform = project.platform platform = project.platform
constraints = ["e.project_id = %(project_id)s", constraints = ["e.project_id = %(project_id)s",

View file

@ -85,6 +85,9 @@ def __complete_missing_steps(start_time, end_time, density, neutral, rows, time_
# compute avg_time_from_previous at the same level as sessions_count (this was removed in v1.22) # compute avg_time_from_previous at the same level as sessions_count (this was removed in v1.22)
# if start-point is selected, the selected event is ranked n°1 # if start-point is selected, the selected event is ranked n°1
def path_analysis(project_id: int, data: schemas.CardPathAnalysis): def path_analysis(project_id: int, data: schemas.CardPathAnalysis):
if not data.hide_excess:
data.hide_excess = True
data.rows = 50
sub_events = [] sub_events = []
start_points_conditions = [] start_points_conditions = []
step_0_conditions = [] step_0_conditions = []

View file

@ -1,14 +0,0 @@
from chalicelib.utils.ch_client import ClickHouseClient
def search_events(project_id: int, data: dict):
with ClickHouseClient() as ch_client:
r = ch_client.format(
"""SELECT *
FROM taha.events
WHERE project_id=%(project_id)s
ORDER BY created_at;""",
params={"project_id": project_id})
x = ch_client.execute(r)
return x

View file

@ -0,0 +1,139 @@
import logging
import schemas
from chalicelib.utils import helper
from chalicelib.utils import sql_helper as sh
from chalicelib.utils.ch_client import ClickHouseClient
from chalicelib.utils.exp_ch_helper import get_sub_condition
logger = logging.getLogger(__name__)
def get_events(project_id: int, page: schemas.PaginatedSchema):
with ClickHouseClient() as ch_client:
r = ch_client.format(
"""SELECT DISTINCT ON(event_name,auto_captured)
COUNT(1) OVER () AS total,
event_name AS name, display_name, description,
auto_captured
FROM product_analytics.all_events
WHERE project_id=%(project_id)s
ORDER BY auto_captured,display_name
LIMIT %(limit)s OFFSET %(offset)s;""",
parameters={"project_id": project_id, "limit": page.limit, "offset": (page.page - 1) * page.limit})
rows = ch_client.execute(r)
if len(rows) == 0:
return {"total": 0, "list": []}
total = rows[0]["total"]
for i, row in enumerate(rows):
row["id"] = f"event_{i}"
row["icon"] = None
row["possibleTypes"] = ["string"]
row.pop("total")
return {"total": total, "list": helper.list_to_camel_case(rows)}
def search_events(project_id: int, data: schemas.EventsSearchPayloadSchema):
with ClickHouseClient() as ch_client:
full_args = {"project_id": project_id, "startDate": data.startTimestamp, "endDate": data.endTimestamp,
"projectId": project_id, "limit": data.limit, "offset": (data.page - 1) * data.limit}
constraints = ["project_id = %(projectId)s",
"created_at >= toDateTime(%(startDate)s/1000)",
"created_at <= toDateTime(%(endDate)s/1000)"]
ev_constraints = []
for i, f in enumerate(data.filters):
if not f.is_event:
f.value = helper.values_for_operator(value=f.value, op=f.operator)
f_k = f"f_value{i}"
full_args = {**full_args, f_k: sh.single_value(f.value), **sh.multi_values(f.value, value_key=f_k)}
is_any = sh.isAny_opreator(f.operator)
is_undefined = sh.isUndefined_operator(f.operator)
full_args = {**full_args, f_k: sh.single_value(f.value), **sh.multi_values(f.value, value_key=f_k)}
if f.is_predefined:
column = f.name
else:
column = f"properties.{f.name}"
if is_any:
condition = f"notEmpty{column})"
elif is_undefined:
condition = f"empty({column})"
else:
condition = sh.multi_conditions(
get_sub_condition(col_name=column, val_name=f_k, operator=f.operator),
values=f.value, value_key=f_k)
constraints.append(condition)
else:
e_k = f"e_value{i}"
full_args = {**full_args, e_k: f.name}
condition = f"`$event_name` = %({e_k})s"
sub_conditions = []
for j, ef in enumerate(f.properties.filters):
p_k = f"e_{i}_p_{j}"
full_args = {**full_args, **sh.multi_values(ef.value, value_key=p_k)}
if ef.is_predefined:
sub_condition = get_sub_condition(col_name=ef.name, val_name=p_k, operator=ef.operator)
else:
sub_condition = get_sub_condition(col_name=f"properties.{ef.name}",
val_name=p_k, operator=ef.operator)
sub_conditions.append(sh.multi_conditions(sub_condition, ef.value, value_key=p_k))
if len(sub_conditions) > 0:
condition += " AND (" + (" " + f.properties.operator + " ").join(sub_conditions) + ")"
ev_constraints.append(condition)
constraints.append("(" + " OR ".join(ev_constraints) + ")")
query = ch_client.format(
f"""SELECT COUNT(1) OVER () AS total,
event_id,
`$event_name`,
created_at,
`distinct_id`,
`$browser`,
`$import`,
`$os`,
`$country`,
`$state`,
`$city`,
`$screen_height`,
`$screen_width`,
`$source`,
`$user_id`,
`$device`
FROM product_analytics.events
WHERE {" AND ".join(constraints)}
ORDER BY created_at
LIMIT %(limit)s OFFSET %(offset)s;""",
parameters=full_args)
rows = ch_client.execute(query)
if len(rows) == 0:
return {"total": 0, "rows": [], "src": 2}
total = rows[0]["total"]
for r in rows:
r.pop("total")
return {"total": total, "rows": rows, "src": 2}
def get_lexicon(project_id: int, page: schemas.PaginatedSchema):
with ClickHouseClient() as ch_client:
r = ch_client.format(
"""SELECT COUNT(1) OVER () AS total,
all_events.event_name AS name,
*
FROM product_analytics.all_events
WHERE project_id=%(project_id)s
ORDER BY display_name
LIMIT %(limit)s OFFSET %(offset)s;""",
parameters={"project_id": project_id, "limit": page.limit, "offset": (page.page - 1) * page.limit})
rows = ch_client.execute(r)
if len(rows) == 0:
return {"total": 0, "list": []}
total = rows[0]["total"]
for i, row in enumerate(rows):
row["id"] = f"event_{i}"
row["icon"] = None
row["possibleTypes"] = ["string"]
row.pop("total")
return {"total": total, "list": helper.list_to_camel_case(rows)}

View file

@ -0,0 +1,83 @@
from chalicelib.utils import helper, exp_ch_helper
from chalicelib.utils.ch_client import ClickHouseClient
import schemas
def get_all_properties(project_id: int, page: schemas.PaginatedSchema):
with ClickHouseClient() as ch_client:
r = ch_client.format(
"""SELECT COUNT(1) OVER () AS total,
property_name AS name, display_name,
array_agg(DISTINCT event_properties.value_type) AS possible_types
FROM product_analytics.all_properties
LEFT JOIN product_analytics.event_properties USING (project_id, property_name)
WHERE all_properties.project_id=%(project_id)s
GROUP BY property_name,display_name
ORDER BY display_name
LIMIT %(limit)s OFFSET %(offset)s;""",
parameters={"project_id": project_id,
"limit": page.limit,
"offset": (page.page - 1) * page.limit})
properties = ch_client.execute(r)
if len(properties) == 0:
return {"total": 0, "list": []}
total = properties[0]["total"]
properties = helper.list_to_camel_case(properties)
for i, p in enumerate(properties):
p["id"] = f"prop_{i}"
p["icon"] = None
p["possibleTypes"] = exp_ch_helper.simplify_clickhouse_types(p["possibleTypes"])
p.pop("total")
return {"total": total, "list": properties}
def get_event_properties(project_id: int, event_name):
with ClickHouseClient() as ch_client:
r = ch_client.format(
"""SELECT all_properties.property_name,
all_properties.display_name
FROM product_analytics.event_properties
INNER JOIN product_analytics.all_properties USING (property_name)
WHERE event_properties.project_id=%(project_id)s
AND all_properties.project_id=%(project_id)s
AND event_properties.event_name=%(event_name)s
ORDER BY created_at;""",
parameters={"project_id": project_id, "event_name": event_name})
properties = ch_client.execute(r)
return helper.list_to_camel_case(properties)
def get_lexicon(project_id: int, page: schemas.PaginatedSchema):
with ClickHouseClient() as ch_client:
r = ch_client.format(
"""SELECT COUNT(1) OVER () AS total,
all_properties.property_name AS name,
all_properties.*,
possible_types.values AS possible_types,
possible_values.values AS sample_values
FROM product_analytics.all_properties
LEFT JOIN (SELECT project_id, property_name, array_agg(DISTINCT value_type) AS values
FROM product_analytics.event_properties
WHERE project_id=%(project_id)s
GROUP BY 1, 2) AS possible_types
USING (project_id, property_name)
LEFT JOIN (SELECT project_id, property_name, array_agg(DISTINCT value) AS values
FROM product_analytics.property_values_samples
WHERE project_id=%(project_id)s
GROUP BY 1, 2) AS possible_values USING (project_id, property_name)
WHERE project_id=%(project_id)s
ORDER BY display_name
LIMIT %(limit)s OFFSET %(offset)s;""",
parameters={"project_id": project_id,
"limit": page.limit,
"offset": (page.page - 1) * page.limit})
properties = ch_client.execute(r)
if len(properties) == 0:
return {"total": 0, "list": []}
total = properties[0]["total"]
for i, p in enumerate(properties):
p["id"] = f"prop_{i}"
p["icon"] = None
p.pop("total")
return {"total": total, "list": helper.list_to_camel_case(properties)}

View file

@ -6,8 +6,18 @@ logger = logging.getLogger(__name__)
from . import sessions_pg from . import sessions_pg
from . import sessions_pg as sessions_legacy from . import sessions_pg as sessions_legacy
from . import sessions_ch from . import sessions_ch
from . import sessions_search_pg
from . import sessions_search_pg as sessions_search_legacy
if config("EXP_METRICS", cast=bool, default=False): if config("EXP_SESSIONS_SEARCH", cast=bool, default=False):
logger.info(">>> Using experimental sessions search")
from . import sessions_ch as sessions from . import sessions_ch as sessions
from . import sessions_search_ch as sessions_search
else: else:
from . import sessions_pg as sessions from . import sessions_pg as sessions
from . import sessions_search_pg as sessions_search
# if config("EXP_METRICS", cast=bool, default=False):
# from . import sessions_ch as sessions
# else:
# from . import sessions_pg as sessions

View file

@ -6,6 +6,7 @@ from chalicelib.core import events, metadata
from . import performance_event, sessions_legacy from . import performance_event, sessions_legacy
from chalicelib.utils import pg_client, helper, metrics_helper, ch_client, exp_ch_helper from chalicelib.utils import pg_client, helper, metrics_helper, ch_client, exp_ch_helper
from chalicelib.utils import sql_helper as sh from chalicelib.utils import sql_helper as sh
from chalicelib.utils.exp_ch_helper import get_sub_condition
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -48,8 +49,8 @@ def search2_series(data: schemas.SessionsSearchPayloadSchema, project_id: int, d
query = f"""SELECT gs.generate_series AS timestamp, query = f"""SELECT gs.generate_series AS timestamp,
COALESCE(COUNT(DISTINCT processed_sessions.user_id),0) AS count COALESCE(COUNT(DISTINCT processed_sessions.user_id),0) AS count
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS gs FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS gs
LEFT JOIN (SELECT multiIf(s.user_id IS NOT NULL AND s.user_id != '', s.user_id, LEFT JOIN (SELECT multiIf(isNotNull(s.user_id) AND notEmpty(s.user_id), s.user_id,
s.user_anonymous_id IS NOT NULL AND s.user_anonymous_id != '', isNotNull(s.user_anonymous_id) AND notEmpty(s.user_anonymous_id),
s.user_anonymous_id, toString(s.user_uuid)) AS user_id, s.user_anonymous_id, toString(s.user_uuid)) AS user_id,
s.datetime AS datetime s.datetime AS datetime
{query_part}) AS processed_sessions ON(TRUE) {query_part}) AS processed_sessions ON(TRUE)
@ -148,7 +149,7 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
for e in data.events: for e in data.events:
if e.type == schemas.EventType.LOCATION: if e.type == schemas.EventType.LOCATION:
if e.operator not in extra_conditions: if e.operator not in extra_conditions:
extra_conditions[e.operator] = schemas.SessionSearchEventSchema2.model_validate({ extra_conditions[e.operator] = schemas.SessionSearchEventSchema.model_validate({
"type": e.type, "type": e.type,
"isEvent": True, "isEvent": True,
"value": [], "value": [],
@ -173,7 +174,7 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
for e in data.events: for e in data.events:
if e.type == schemas.EventType.REQUEST_DETAILS: if e.type == schemas.EventType.REQUEST_DETAILS:
if e.operator not in extra_conditions: if e.operator not in extra_conditions:
extra_conditions[e.operator] = schemas.SessionSearchEventSchema2.model_validate({ extra_conditions[e.operator] = schemas.SessionSearchEventSchema.model_validate({
"type": e.type, "type": e.type,
"isEvent": True, "isEvent": True,
"value": [], "value": [],
@ -253,7 +254,7 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
FROM (SELECT s.user_id AS user_id {extra_col} FROM (SELECT s.user_id AS user_id {extra_col}
{query_part} {query_part}
WHERE isNotNull(user_id) WHERE isNotNull(user_id)
AND user_id != '') AS filtred_sessions AND notEmpty(user_id)) AS filtred_sessions
{extra_where} {extra_where}
GROUP BY {main_col} GROUP BY {main_col}
ORDER BY total DESC ORDER BY total DESC
@ -277,7 +278,7 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
return sessions return sessions
def __is_valid_event(is_any: bool, event: schemas.SessionSearchEventSchema2): def __is_valid_event(is_any: bool, event: schemas.SessionSearchEventSchema):
return not (not is_any and len(event.value) == 0 and event.type not in [schemas.EventType.REQUEST_DETAILS, return not (not is_any and len(event.value) == 0 and event.type not in [schemas.EventType.REQUEST_DETAILS,
schemas.EventType.GRAPHQL] \ schemas.EventType.GRAPHQL] \
or event.type in [schemas.PerformanceEventType.LOCATION_DOM_COMPLETE, or event.type in [schemas.PerformanceEventType.LOCATION_DOM_COMPLETE,
@ -330,7 +331,11 @@ def json_condition(table_alias, json_column, json_key, op, values, value_key, ch
extract_func = "JSONExtractFloat" if numeric_type == "float" else "JSONExtractInt" extract_func = "JSONExtractFloat" if numeric_type == "float" else "JSONExtractInt"
condition = f"{extract_func}(toString({table_alias}.`{json_column}`), '{json_key}') {op} %({value_key})s" condition = f"{extract_func}(toString({table_alias}.`{json_column}`), '{json_key}') {op} %({value_key})s"
else: else:
condition = f"JSONExtractString(toString({table_alias}.`{json_column}`), '{json_key}') {op} %({value_key})s" # condition = f"JSONExtractString(toString({table_alias}.`{json_column}`), '{json_key}') {op} %({value_key})s"
condition = get_sub_condition(
col_name=f"JSONExtractString(toString({table_alias}.`{json_column}`), '{json_key}')",
val_name=value_key, operator=op
)
conditions.append(sh.multi_conditions(condition, values, value_key=value_key)) conditions.append(sh.multi_conditions(condition, values, value_key=value_key))
@ -660,7 +665,8 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
event.value = helper.values_for_operator(value=event.value, op=event.operator) event.value = helper.values_for_operator(value=event.value, op=event.operator)
full_args = {**full_args, full_args = {**full_args,
**sh.multi_values(event.value, value_key=e_k), **sh.multi_values(event.value, value_key=e_k),
**sh.multi_values(event.source, value_key=s_k)} **sh.multi_values(event.source, value_key=s_k),
e_k: event.value[0] if len(event.value) > 0 else event.value}
if event_type == events.EventType.CLICK.ui_type: if event_type == events.EventType.CLICK.ui_type:
event_from = event_from % f"{MAIN_EVENTS_TABLE} AS main " event_from = event_from % f"{MAIN_EVENTS_TABLE} AS main "
@ -671,24 +677,44 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
events_conditions.append({"type": event_where[-1]}) events_conditions.append({"type": event_where[-1]})
if not is_any: if not is_any:
if schemas.ClickEventExtraOperator.has_value(event.operator): if schemas.ClickEventExtraOperator.has_value(event.operator):
event_where.append(json_condition( # event_where.append(json_condition(
"main", # "main",
"$properties", # "$properties",
"selector", op, event.value, e_k) # "selector", op, event.value, e_k)
# )
event_where.append(
sh.multi_conditions(
get_sub_condition(col_name=f"main.`$properties`.selector",
val_name=e_k, operator=event.operator),
event.value, value_key=e_k)
) )
events_conditions[-1]["condition"] = event_where[-1] events_conditions[-1]["condition"] = event_where[-1]
else: else:
if is_not: if is_not:
event_where.append(json_condition( # event_where.append(json_condition(
"sub", "$properties", _column, op, event.value, e_k # "sub", "$properties", _column, op, event.value, e_k
)) # ))
event_where.append(
sh.multi_conditions(
get_sub_condition(col_name=f"sub.`$properties`.{_column}",
val_name=e_k, operator=event.operator),
event.value, value_key=e_k)
)
events_conditions_not.append( events_conditions_not.append(
{ {
"type": f"sub.`$event_name`='{exp_ch_helper.get_event_type(event_type, platform=platform)}'"}) "type": f"sub.`$event_name`='{exp_ch_helper.get_event_type(event_type, platform=platform)}'"
}
)
events_conditions_not[-1]["condition"] = event_where[-1] events_conditions_not[-1]["condition"] = event_where[-1]
else: else:
# event_where.append(
# json_condition("main", "$properties", _column, op, event.value, e_k)
# )
event_where.append( event_where.append(
json_condition("main", "$properties", _column, op, event.value, e_k) sh.multi_conditions(
get_sub_condition(col_name=f"main.`$properties`.{_column}",
val_name=e_k, operator=event.operator),
event.value, value_key=e_k)
) )
events_conditions[-1]["condition"] = event_where[-1] events_conditions[-1]["condition"] = event_where[-1]
else: else:
@ -870,12 +896,15 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
events_conditions[-1]["condition"] = [] events_conditions[-1]["condition"] = []
if not is_any and event.value not in [None, "*", ""]: if not is_any and event.value not in [None, "*", ""]:
event_where.append( event_where.append(
sh.multi_conditions(f"(toString(main1.`$properties`.message) {op} %({e_k})s OR toString(main1.`$properties`.name) {op} %({e_k})s)", sh.multi_conditions(
event.value, value_key=e_k)) f"(toString(main1.`$properties`.message) {op} %({e_k})s OR toString(main1.`$properties`.name) {op} %({e_k})s)",
event.value, value_key=e_k))
events_conditions[-1]["condition"].append(event_where[-1]) events_conditions[-1]["condition"].append(event_where[-1])
events_extra_join += f" AND {event_where[-1]}" events_extra_join += f" AND {event_where[-1]}"
if len(event.source) > 0 and event.source[0] not in [None, "*", ""]: if len(event.source) > 0 and event.source[0] not in [None, "*", ""]:
event_where.append(sh.multi_conditions(f"toString(main1.`$properties`.source) = %({s_k})s", event.source, value_key=s_k)) event_where.append(
sh.multi_conditions(f"toString(main1.`$properties`.source) = %({s_k})s", event.source,
value_key=s_k))
events_conditions[-1]["condition"].append(event_where[-1]) events_conditions[-1]["condition"].append(event_where[-1])
events_extra_join += f" AND {event_where[-1]}" events_extra_join += f" AND {event_where[-1]}"
@ -1191,8 +1220,35 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
else: else:
logging.warning(f"undefined GRAPHQL filter: {f.type}") logging.warning(f"undefined GRAPHQL filter: {f.type}")
events_conditions[-1]["condition"] = " AND ".join(events_conditions[-1]["condition"]) events_conditions[-1]["condition"] = " AND ".join(events_conditions[-1]["condition"])
elif event_type == schemas.EventType.EVENT:
event_from = event_from % f"{MAIN_EVENTS_TABLE} AS main "
_column = events.EventType.CLICK.column
event_where.append(f"main.`$event_name`=%({e_k})s AND main.session_id>0")
events_conditions.append({"type": event_where[-1], "condition": ""})
else: else:
continue continue
if event.properties is not None and len(event.properties.filters) > 0:
sub_conditions = []
for l, property in enumerate(event.properties.filters):
a_k = f"{e_k}_att_{l}"
full_args = {**full_args,
**sh.multi_values(property.value, value_key=a_k)}
if property.is_predefined:
condition = get_sub_condition(col_name=f"main.{property.name}",
val_name=a_k, operator=property.operator)
else:
condition = get_sub_condition(col_name=f"main.properties.{property.name}",
val_name=a_k, operator=property.operator)
event_where.append(
sh.multi_conditions(condition, property.value, value_key=a_k)
)
sub_conditions.append(event_where[-1])
if len(sub_conditions) > 0:
sub_conditions = (" " + event.properties.operator + " ").join(sub_conditions)
events_conditions[-1]["condition"] += " AND " if len(events_conditions[-1]["condition"]) > 0 else ""
events_conditions[-1]["condition"] += "(" + sub_conditions + ")"
if event_index == 0 or or_events: if event_index == 0 or or_events:
event_where += ss_constraints event_where += ss_constraints
if is_not: if is_not:

View file

@ -1,6 +1,5 @@
import ast import ast
import logging import logging
from typing import List, Union
import schemas import schemas
from chalicelib.core import events, metadata, projects from chalicelib.core import events, metadata, projects
@ -219,7 +218,7 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_
} }
def __is_valid_event(is_any: bool, event: schemas.SessionSearchEventSchema2): def __is_valid_event(is_any: bool, event: schemas.SessionSearchEventSchema):
return not (not is_any and len(event.value) == 0 and event.type not in [schemas.EventType.REQUEST_DETAILS, return not (not is_any and len(event.value) == 0 and event.type not in [schemas.EventType.REQUEST_DETAILS,
schemas.EventType.GRAPHQL] \ schemas.EventType.GRAPHQL] \
or event.type in [schemas.PerformanceEventType.LOCATION_DOM_COMPLETE, or event.type in [schemas.PerformanceEventType.LOCATION_DOM_COMPLETE,

View file

@ -143,7 +143,7 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
for e in data.events: for e in data.events:
if e.type == schemas.EventType.LOCATION: if e.type == schemas.EventType.LOCATION:
if e.operator not in extra_conditions: if e.operator not in extra_conditions:
extra_conditions[e.operator] = schemas.SessionSearchEventSchema2.model_validate({ extra_conditions[e.operator] = schemas.SessionSearchEventSchema.model_validate({
"type": e.type, "type": e.type,
"isEvent": True, "isEvent": True,
"value": [], "value": [],
@ -160,7 +160,7 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
for e in data.events: for e in data.events:
if e.type == schemas.EventType.REQUEST_DETAILS: if e.type == schemas.EventType.REQUEST_DETAILS:
if e.operator not in extra_conditions: if e.operator not in extra_conditions:
extra_conditions[e.operator] = schemas.SessionSearchEventSchema2.model_validate({ extra_conditions[e.operator] = schemas.SessionSearchEventSchema.model_validate({
"type": e.type, "type": e.type,
"isEvent": True, "isEvent": True,
"value": [], "value": [],
@ -273,7 +273,7 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
return sessions return sessions
def __is_valid_event(is_any: bool, event: schemas.SessionSearchEventSchema2): def __is_valid_event(is_any: bool, event: schemas.SessionSearchEventSchema):
return not (not is_any and len(event.value) == 0 and event.type not in [schemas.EventType.REQUEST_DETAILS, return not (not is_any and len(event.value) == 0 and event.type not in [schemas.EventType.REQUEST_DETAILS,
schemas.EventType.GRAPHQL] \ schemas.EventType.GRAPHQL] \
or event.type in [schemas.PerformanceEventType.LOCATION_DOM_COMPLETE, or event.type in [schemas.PerformanceEventType.LOCATION_DOM_COMPLETE,

View file

@ -141,7 +141,7 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project: schemas.
) AS users_sessions;""", ) AS users_sessions;""",
full_args) full_args)
elif ids_only: elif ids_only:
main_query = cur.format(query=f"""SELECT DISTINCT ON(s.session_id) s.session_id main_query = cur.format(query=f"""SELECT DISTINCT ON(s.session_id) s.session_id AS session_id
{query_part} {query_part}
ORDER BY s.session_id desc ORDER BY s.session_id desc
LIMIT %(sessions_limit)s OFFSET %(sessions_limit_s)s;""", LIMIT %(sessions_limit)s OFFSET %(sessions_limit_s)s;""",
@ -175,11 +175,11 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project: schemas.
ORDER BY sort_key {data.order} ORDER BY sort_key {data.order}
LIMIT %(sessions_limit)s OFFSET %(sessions_limit_s)s) AS sorted_sessions;""", LIMIT %(sessions_limit)s OFFSET %(sessions_limit_s)s) AS sorted_sessions;""",
parameters=full_args) parameters=full_args)
logging.debug("--------------------")
logging.debug(main_query)
logging.debug("--------------------")
try: try:
logging.debug("--------------------")
sessions_list = cur.execute(main_query) sessions_list = cur.execute(main_query)
logging.debug("--------------------")
except Exception as err: except Exception as err:
logging.warning("--------- SESSIONS-CH SEARCH QUERY EXCEPTION -----------") logging.warning("--------- SESSIONS-CH SEARCH QUERY EXCEPTION -----------")
logging.warning(main_query) logging.warning(main_query)
@ -262,7 +262,7 @@ def search_by_metadata(tenant_id, user_id, m_key, m_value, project_id=None):
FROM public.user_favorite_sessions FROM public.user_favorite_sessions
WHERE user_favorite_sessions.user_id = %(userId)s WHERE user_favorite_sessions.user_id = %(userId)s
) AS favorite_sessions USING (session_id) ) AS favorite_sessions USING (session_id)
WHERE s.project_id = %(id)s AND s.duration IS NOT NULL AND s.{col_name} = %(value)s WHERE s.project_id = %(id)s AND isNotNull(s.duration) AND s.{col_name} = %(value)s
) AS full_sessions ) AS full_sessions
ORDER BY favorite DESC, issue_score DESC ORDER BY favorite DESC, issue_score DESC
LIMIT 10 LIMIT 10

View file

@ -122,7 +122,10 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project: schemas.
sort = 'session_id' sort = 'session_id'
if data.sort is not None and data.sort != "session_id": if data.sort is not None and data.sort != "session_id":
# sort += " " + data.order + "," + helper.key_to_snake_case(data.sort) # sort += " " + data.order + "," + helper.key_to_snake_case(data.sort)
sort = helper.key_to_snake_case(data.sort) if data.sort == 'datetime':
sort = 'start_ts'
else:
sort = helper.key_to_snake_case(data.sort)
meta_keys = metadata.get(project_id=project.project_id) meta_keys = metadata.get(project_id=project.project_id)
main_query = cur.mogrify(f"""SELECT COUNT(full_sessions) AS count, main_query = cur.mogrify(f"""SELECT COUNT(full_sessions) AS count,

View file

@ -11,9 +11,3 @@ if smtp.has_smtp():
logger.info("valid SMTP configuration found") logger.info("valid SMTP configuration found")
else: else:
logger.info("no SMTP configuration found or SMTP validation failed") logger.info("no SMTP configuration found or SMTP validation failed")
if config("EXP_CH_DRIVER", cast=bool, default=True):
logging.info(">>> Using new CH driver")
from . import ch_client_exp as ch_client
else:
from . import ch_client

View file

@ -1,73 +1,185 @@
import logging import logging
import threading
import time
from functools import wraps
from queue import Queue, Empty
import clickhouse_driver import clickhouse_connect
from clickhouse_connect.driver.query import QueryContext
from decouple import config from decouple import config
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
_CH_CONFIG = {"host": config("ch_host"),
"user": config("ch_user", default="default"),
"password": config("ch_password", default=""),
"port": config("ch_port_http", cast=int),
"client_name": config("APP_NAME", default="PY")}
CH_CONFIG = dict(_CH_CONFIG)
settings = {} settings = {}
if config('ch_timeout', cast=int, default=-1) > 0: if config('ch_timeout', cast=int, default=-1) > 0:
logger.info(f"CH-max_execution_time set to {config('ch_timeout')}s") logging.info(f"CH-max_execution_time set to {config('ch_timeout')}s")
settings = {**settings, "max_execution_time": config('ch_timeout', cast=int)} settings = {**settings, "max_execution_time": config('ch_timeout', cast=int)}
if config('ch_receive_timeout', cast=int, default=-1) > 0: if config('ch_receive_timeout', cast=int, default=-1) > 0:
logger.info(f"CH-receive_timeout set to {config('ch_receive_timeout')}s") logging.info(f"CH-receive_timeout set to {config('ch_receive_timeout')}s")
settings = {**settings, "receive_timeout": config('ch_receive_timeout', cast=int)} settings = {**settings, "receive_timeout": config('ch_receive_timeout', cast=int)}
extra_args = {}
if config("CH_COMPRESSION", cast=bool, default=True):
extra_args["compression"] = "lz4"
def transform_result(self, original_function):
@wraps(original_function)
def wrapper(*args, **kwargs):
if kwargs.get("parameters"):
if config("LOCAL_DEV", cast=bool, default=False):
logger.debug(self.format(query=kwargs.get("query", ""), parameters=kwargs.get("parameters")))
else:
logger.debug(
str.encode(self.format(query=kwargs.get("query", ""), parameters=kwargs.get("parameters"))))
elif len(args) > 0:
if config("LOCAL_DEV", cast=bool, default=False):
logger.debug(args[0])
else:
logger.debug(str.encode(args[0]))
result = original_function(*args, **kwargs)
if isinstance(result, clickhouse_connect.driver.query.QueryResult):
column_names = result.column_names
result = result.result_rows
result = [dict(zip(column_names, row)) for row in result]
return result
return wrapper
class ClickHouseConnectionPool:
def __init__(self, min_size, max_size):
self.min_size = min_size
self.max_size = max_size
self.pool = Queue()
self.lock = threading.Lock()
self.total_connections = 0
# Initialize the pool with min_size connections
for _ in range(self.min_size):
client = clickhouse_connect.get_client(**CH_CONFIG,
database=config("ch_database", default="default"),
settings=settings,
**extra_args)
self.pool.put(client)
self.total_connections += 1
def get_connection(self):
try:
# Try to get a connection without blocking
client = self.pool.get_nowait()
return client
except Empty:
with self.lock:
if self.total_connections < self.max_size:
client = clickhouse_connect.get_client(**CH_CONFIG,
database=config("ch_database", default="default"),
settings=settings,
**extra_args)
self.total_connections += 1
return client
# If max_size reached, wait until a connection is available
client = self.pool.get()
return client
def release_connection(self, client):
self.pool.put(client)
def close_all(self):
with self.lock:
while not self.pool.empty():
client = self.pool.get()
client.close()
self.total_connections = 0
CH_pool: ClickHouseConnectionPool = None
RETRY_MAX = config("CH_RETRY_MAX", cast=int, default=50)
RETRY_INTERVAL = config("CH_RETRY_INTERVAL", cast=int, default=2)
RETRY = 0
def make_pool():
if not config('CH_POOL', cast=bool, default=True):
return
global CH_pool
global RETRY
if CH_pool is not None:
try:
CH_pool.close_all()
except Exception as error:
logger.error("Error while closing all connexions to CH", exc_info=error)
try:
CH_pool = ClickHouseConnectionPool(min_size=config("CH_MINCONN", cast=int, default=4),
max_size=config("CH_MAXCONN", cast=int, default=8))
if CH_pool is not None:
logger.info("Connection pool created successfully for CH")
except ConnectionError as error:
logger.error("Error while connecting to CH", exc_info=error)
if RETRY < RETRY_MAX:
RETRY += 1
logger.info(f"waiting for {RETRY_INTERVAL}s before retry n°{RETRY}")
time.sleep(RETRY_INTERVAL)
make_pool()
else:
raise error
class ClickHouseClient: class ClickHouseClient:
__client = None __client = None
def __init__(self, database=None): def __init__(self, database=None):
extra_args = {} if self.__client is None:
if config("CH_COMPRESSION", cast=bool, default=True): if database is not None or not config('CH_POOL', cast=bool, default=True):
extra_args["compression"] = "lz4" self.__client = clickhouse_connect.get_client(**CH_CONFIG,
self.__client = clickhouse_driver.Client(host=config("ch_host"), database=database if database else config("ch_database",
database=database if database else config("ch_database", default="default"),
default="default"), settings=settings,
user=config("ch_user", default="default"), **extra_args)
password=config("ch_password", default=""),
port=config("ch_port", cast=int), else:
settings=settings, self.__client = CH_pool.get_connection()
**extra_args) \
if self.__client is None else self.__client self.__client.execute = transform_result(self, self.__client.query)
self.__client.format = self.format
def __enter__(self): def __enter__(self):
return self
def execute(self, query, parameters=None, **args):
try:
results = self.__client.execute(query=query, params=parameters, with_column_types=True, **args)
keys = tuple(x for x, y in results[1])
return [dict(zip(keys, i)) for i in results[0]]
except Exception as err:
logger.error("--------- CH EXCEPTION -----------", exc_info=err)
logger.error("--------- CH QUERY EXCEPTION -----------")
logger.error(self.format(query=query, parameters=parameters)
.replace('\n', '\\n')
.replace(' ', ' ')
.replace(' ', ' '))
logger.error("--------------------")
raise err
def insert(self, query, params=None, **args):
return self.__client.execute(query=query, params=params, **args)
def client(self):
return self.__client return self.__client
def format(self, query, parameters): def format(self, query, parameters=None):
if parameters is None: if parameters:
return query ctx = QueryContext(query=query, parameters=parameters)
return self.__client.substitute_params(query, parameters, self.__client.connection.context) return ctx.final_query
return query
def __exit__(self, *args): def __exit__(self, *args):
pass if config('CH_POOL', cast=bool, default=True):
CH_pool.release_connection(self.__client)
else:
self.__client.close()
async def init(): async def init():
logger.info(f">CH_POOL:not defined") logger.info(f">use CH_POOL:{config('CH_POOL', default=True)}")
if config('CH_POOL', cast=bool, default=True):
make_pool()
async def terminate(): async def terminate():
pass global CH_pool
if CH_pool is not None:
try:
CH_pool.close_all()
logger.info("Closed all connexions to CH")
except Exception as error:
logger.error("Error while closing all connexions to CH", exc_info=error)

View file

@ -1,177 +0,0 @@
import logging
import threading
import time
from functools import wraps
from queue import Queue, Empty
import clickhouse_connect
from clickhouse_connect.driver.query import QueryContext
from decouple import config
logger = logging.getLogger(__name__)
_CH_CONFIG = {"host": config("ch_host"),
"user": config("ch_user", default="default"),
"password": config("ch_password", default=""),
"port": config("ch_port_http", cast=int),
"client_name": config("APP_NAME", default="PY")}
CH_CONFIG = dict(_CH_CONFIG)
settings = {}
if config('ch_timeout', cast=int, default=-1) > 0:
logging.info(f"CH-max_execution_time set to {config('ch_timeout')}s")
settings = {**settings, "max_execution_time": config('ch_timeout', cast=int)}
if config('ch_receive_timeout', cast=int, default=-1) > 0:
logging.info(f"CH-receive_timeout set to {config('ch_receive_timeout')}s")
settings = {**settings, "receive_timeout": config('ch_receive_timeout', cast=int)}
extra_args = {}
if config("CH_COMPRESSION", cast=bool, default=True):
extra_args["compression"] = "lz4"
def transform_result(self, original_function):
@wraps(original_function)
def wrapper(*args, **kwargs):
logger.debug(str.encode(self.format(query=kwargs.get("query", ""), parameters=kwargs.get("parameters"))))
result = original_function(*args, **kwargs)
if isinstance(result, clickhouse_connect.driver.query.QueryResult):
column_names = result.column_names
result = result.result_rows
result = [dict(zip(column_names, row)) for row in result]
return result
return wrapper
class ClickHouseConnectionPool:
def __init__(self, min_size, max_size):
self.min_size = min_size
self.max_size = max_size
self.pool = Queue()
self.lock = threading.Lock()
self.total_connections = 0
# Initialize the pool with min_size connections
for _ in range(self.min_size):
client = clickhouse_connect.get_client(**CH_CONFIG,
database=config("ch_database", default="default"),
settings=settings,
**extra_args)
self.pool.put(client)
self.total_connections += 1
def get_connection(self):
try:
# Try to get a connection without blocking
client = self.pool.get_nowait()
return client
except Empty:
with self.lock:
if self.total_connections < self.max_size:
client = clickhouse_connect.get_client(**CH_CONFIG,
database=config("ch_database", default="default"),
settings=settings,
**extra_args)
self.total_connections += 1
return client
# If max_size reached, wait until a connection is available
client = self.pool.get()
return client
def release_connection(self, client):
self.pool.put(client)
def close_all(self):
with self.lock:
while not self.pool.empty():
client = self.pool.get()
client.close()
self.total_connections = 0
CH_pool: ClickHouseConnectionPool = None
RETRY_MAX = config("CH_RETRY_MAX", cast=int, default=50)
RETRY_INTERVAL = config("CH_RETRY_INTERVAL", cast=int, default=2)
RETRY = 0
def make_pool():
if not config('CH_POOL', cast=bool, default=True):
return
global CH_pool
global RETRY
if CH_pool is not None:
try:
CH_pool.close_all()
except Exception as error:
logger.error("Error while closing all connexions to CH", exc_info=error)
try:
CH_pool = ClickHouseConnectionPool(min_size=config("CH_MINCONN", cast=int, default=4),
max_size=config("CH_MAXCONN", cast=int, default=8))
if CH_pool is not None:
logger.info("Connection pool created successfully for CH")
except ConnectionError as error:
logger.error("Error while connecting to CH", exc_info=error)
if RETRY < RETRY_MAX:
RETRY += 1
logger.info(f"waiting for {RETRY_INTERVAL}s before retry n°{RETRY}")
time.sleep(RETRY_INTERVAL)
make_pool()
else:
raise error
class ClickHouseClient:
__client = None
def __init__(self, database=None):
if self.__client is None:
if database is not None or not config('CH_POOL', cast=bool, default=True):
self.__client = clickhouse_connect.get_client(**CH_CONFIG,
database=database if database else config("ch_database",
default="default"),
settings=settings,
**extra_args)
else:
self.__client = CH_pool.get_connection()
self.__client.execute = transform_result(self, self.__client.query)
self.__client.format = self.format
def __enter__(self):
return self.__client
def format(self, query, *, parameters=None):
if parameters is None:
return query
return query % {
key: f"'{value}'" if isinstance(value, str) else value
for key, value in parameters.items()
}
def __exit__(self, *args):
if config('CH_POOL', cast=bool, default=True):
CH_pool.release_connection(self.__client)
else:
self.__client.close()
async def init():
logger.info(f">use CH_POOL:{config('CH_POOL', default=True)}")
if config('CH_POOL', cast=bool, default=True):
make_pool()
async def terminate():
global CH_pool
if CH_pool is not None:
try:
CH_pool.close_all()
logger.info("Closed all connexions to CH")
except Exception as error:
logger.error("Error while closing all connexions to CH", exc_info=error)

View file

@ -1,7 +1,10 @@
import logging
import re
from typing import Union from typing import Union
import schemas import schemas
import logging from chalicelib.utils import sql_helper as sh
from schemas import SearchEventOperator
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -66,3 +69,94 @@ def get_event_type(event_type: Union[schemas.EventType, schemas.PerformanceEvent
if event_type not in defs: if event_type not in defs:
raise Exception(f"unsupported EventType:{event_type}") raise Exception(f"unsupported EventType:{event_type}")
return defs.get(event_type) return defs.get(event_type)
# AI generated
def simplify_clickhouse_type(ch_type: str) -> str:
"""
Simplify a ClickHouse data type name to a broader category like:
int, float, decimal, datetime, string, uuid, enum, array, tuple, map, nested, etc.
"""
# 1) Strip out common wrappers like Nullable(...) or LowCardinality(...)
# Possibly multiple wrappers: e.g. "LowCardinality(Nullable(Int32))"
pattern_wrappers = re.compile(r'(Nullable|LowCardinality)\((.*)\)')
while True:
match = pattern_wrappers.match(ch_type)
if match:
ch_type = match.group(2)
else:
break
# 2) Normalize (lowercase) for easier checks
normalized_type = ch_type.lower()
# 3) Use pattern matching or direct checks for known categories
# (You can adapt this as you see fit for your environment.)
# Integers: Int8, Int16, Int32, Int64, Int128, Int256, UInt8, UInt16, ...
if re.match(r'^(u?int)(8|16|32|64|128|256)$', normalized_type):
return "int"
# Floats: Float32, Float64
if re.match(r'^float(32|64)$', normalized_type):
return "float"
# Decimal: Decimal(P, S)
if normalized_type.startswith("decimal"):
return "decimal"
# Date/DateTime
if normalized_type.startswith("date"):
return "datetime"
if normalized_type.startswith("datetime"):
return "datetime"
# Strings: String, FixedString(N)
if normalized_type.startswith("string"):
return "string"
if normalized_type.startswith("fixedstring"):
return "string"
# UUID
if normalized_type.startswith("uuid"):
return "uuid"
# Enums: Enum8(...) or Enum16(...)
if normalized_type.startswith("enum8") or normalized_type.startswith("enum16"):
return "enum"
# Arrays: Array(T)
if normalized_type.startswith("array"):
return "array"
# Tuples: Tuple(T1, T2, ...)
if normalized_type.startswith("tuple"):
return "tuple"
# Map(K, V)
if normalized_type.startswith("map"):
return "map"
# Nested(...)
if normalized_type.startswith("nested"):
return "nested"
# If we didn't match above, just return the original type in lowercase
return normalized_type
def simplify_clickhouse_types(ch_types: list[str]) -> list[str]:
"""
Takes a list of ClickHouse types and returns a list of simplified types
by calling `simplify_clickhouse_type` on each.
"""
return list(set([simplify_clickhouse_type(t) for t in ch_types]))
def get_sub_condition(col_name: str, val_name: str,
operator: Union[schemas.SearchEventOperator, schemas.MathOperator]):
if operator == SearchEventOperator.PATTERN:
return f"match({col_name}, %({val_name})s)"
op = sh.get_sql_operator(operator)
return f"{col_name} {op} %({val_name})s"

View file

@ -14,6 +14,9 @@ def get_sql_operator(op: Union[schemas.SearchEventOperator, schemas.ClickEventEx
schemas.SearchEventOperator.NOT_CONTAINS: "NOT ILIKE", schemas.SearchEventOperator.NOT_CONTAINS: "NOT ILIKE",
schemas.SearchEventOperator.STARTS_WITH: "ILIKE", schemas.SearchEventOperator.STARTS_WITH: "ILIKE",
schemas.SearchEventOperator.ENDS_WITH: "ILIKE", schemas.SearchEventOperator.ENDS_WITH: "ILIKE",
# this is not used as an operator, it is used in order to maintain a valid value for conditions
schemas.SearchEventOperator.PATTERN: "regex",
# Selector operators: # Selector operators:
schemas.ClickEventExtraOperator.IS: "=", schemas.ClickEventExtraOperator.IS: "=",
schemas.ClickEventExtraOperator.IS_NOT: "!=", schemas.ClickEventExtraOperator.IS_NOT: "!=",
@ -72,4 +75,3 @@ def single_value(values):
if isinstance(v, Enum): if isinstance(v, Enum):
values[i] = v.value values[i] = v.value
return values return values

View file

@ -74,4 +74,5 @@ EXP_CH_DRIVER=true
EXP_AUTOCOMPLETE=true EXP_AUTOCOMPLETE=true
EXP_ALERTS=true EXP_ALERTS=true
EXP_ERRORS_SEARCH=true EXP_ERRORS_SEARCH=true
EXP_METRICS=true EXP_METRICS=true
EXP_SESSIONS_SEARCH=true

View file

@ -1,591 +0,0 @@
-- -- Original Q3
-- WITH ranked_events AS (SELECT *
-- FROM ranked_events_1736344377403),
-- n1 AS (SELECT event_number_in_session,
-- event_type,
-- e_value,
-- next_type,
-- next_value,
-- COUNT(1) AS sessions_count
-- FROM ranked_events
-- WHERE event_number_in_session = 1
-- AND isNotNull(next_value)
-- GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
-- ORDER BY sessions_count DESC
-- LIMIT 8),
-- n2 AS (SELECT *
-- FROM (SELECT re.event_number_in_session AS event_number_in_session,
-- re.event_type AS event_type,
-- re.e_value AS e_value,
-- re.next_type AS next_type,
-- re.next_value AS next_value,
-- COUNT(1) AS sessions_count
-- FROM n1
-- INNER JOIN ranked_events AS re
-- ON (n1.next_value = re.e_value AND n1.next_type = re.event_type)
-- WHERE re.event_number_in_session = 2
-- GROUP BY re.event_number_in_session, re.event_type, re.e_value, re.next_type,
-- re.next_value) AS sub_level
-- ORDER BY sessions_count DESC
-- LIMIT 8),
-- n3 AS (SELECT *
-- FROM (SELECT re.event_number_in_session AS event_number_in_session,
-- re.event_type AS event_type,
-- re.e_value AS e_value,
-- re.next_type AS next_type,
-- re.next_value AS next_value,
-- COUNT(1) AS sessions_count
-- FROM n2
-- INNER JOIN ranked_events AS re
-- ON (n2.next_value = re.e_value AND n2.next_type = re.event_type)
-- WHERE re.event_number_in_session = 3
-- GROUP BY re.event_number_in_session, re.event_type, re.e_value, re.next_type,
-- re.next_value) AS sub_level
-- ORDER BY sessions_count DESC
-- LIMIT 8),
-- n4 AS (SELECT *
-- FROM (SELECT re.event_number_in_session AS event_number_in_session,
-- re.event_type AS event_type,
-- re.e_value AS e_value,
-- re.next_type AS next_type,
-- re.next_value AS next_value,
-- COUNT(1) AS sessions_count
-- FROM n3
-- INNER JOIN ranked_events AS re
-- ON (n3.next_value = re.e_value AND n3.next_type = re.event_type)
-- WHERE re.event_number_in_session = 4
-- GROUP BY re.event_number_in_session, re.event_type, re.e_value, re.next_type,
-- re.next_value) AS sub_level
-- ORDER BY sessions_count DESC
-- LIMIT 8),
-- n5 AS (SELECT *
-- FROM (SELECT re.event_number_in_session AS event_number_in_session,
-- re.event_type AS event_type,
-- re.e_value AS e_value,
-- re.next_type AS next_type,
-- re.next_value AS next_value,
-- COUNT(1) AS sessions_count
-- FROM n4
-- INNER JOIN ranked_events AS re
-- ON (n4.next_value = re.e_value AND n4.next_type = re.event_type)
-- WHERE re.event_number_in_session = 5
-- GROUP BY re.event_number_in_session, re.event_type, re.e_value, re.next_type,
-- re.next_value) AS sub_level
-- ORDER BY sessions_count DESC
-- LIMIT 8)
-- SELECT *
-- FROM (SELECT event_number_in_session,
-- event_type,
-- e_value,
-- next_type,
-- next_value,
-- sessions_count
-- FROM n1
-- UNION ALL
-- SELECT event_number_in_session,
-- event_type,
-- e_value,
-- next_type,
-- next_value,
-- sessions_count
-- FROM n2
-- UNION ALL
-- SELECT event_number_in_session,
-- event_type,
-- e_value,
-- next_type,
-- next_value,
-- sessions_count
-- FROM n3
-- UNION ALL
-- SELECT event_number_in_session,
-- event_type,
-- e_value,
-- next_type,
-- next_value,
-- sessions_count
-- FROM n4
-- UNION ALL
-- SELECT event_number_in_session,
-- event_type,
-- e_value,
-- next_type,
-- next_value,
-- sessions_count
-- FROM n5) AS chart_steps
-- ORDER BY event_number_in_session;
-- Q1
-- CREATE TEMPORARY TABLE pre_ranked_events_1736344377403 AS
CREATE TABLE pre_ranked_events_1736344377403 ENGINE = Memory AS
(WITH initial_event AS (SELECT events.session_id, MIN(datetime) AS start_event_timestamp
FROM experimental.events AS events
WHERE ((event_type = 'LOCATION' AND (url_path = '/en/deployment/')))
AND events.project_id = toUInt16(65)
AND events.datetime >= toDateTime(1735599600000 / 1000)
AND events.datetime < toDateTime(1736290799999 / 1000)
GROUP BY 1),
pre_ranked_events AS (SELECT *
FROM (SELECT session_id,
event_type,
datetime,
url_path AS e_value,
row_number() OVER (PARTITION BY session_id
ORDER BY datetime ,
message_id ) AS event_number_in_session
FROM experimental.events AS events
INNER JOIN initial_event ON (events.session_id = initial_event.session_id)
WHERE events.project_id = toUInt16(65)
AND events.datetime >= toDateTime(1735599600000 / 1000)
AND events.datetime < toDateTime(1736290799999 / 1000)
AND (events.event_type = 'LOCATION')
AND events.datetime >= initial_event.start_event_timestamp
) AS full_ranked_events
WHERE event_number_in_session <= 5)
SELECT *
FROM pre_ranked_events);
;
SELECT *
FROM pre_ranked_events_1736344377403
WHERE event_number_in_session < 3;
-- ---------Q2-----------
-- CREATE TEMPORARY TABLE ranked_events_1736344377403 AS
DROP TABLE ranked_events_1736344377403;
CREATE TABLE ranked_events_1736344377403 ENGINE = Memory AS
(WITH pre_ranked_events AS (SELECT *
FROM pre_ranked_events_1736344377403),
start_points AS (SELECT DISTINCT session_id
FROM pre_ranked_events
WHERE ((event_type = 'LOCATION' AND (e_value = '/en/deployment/')))
AND pre_ranked_events.event_number_in_session = 1),
ranked_events AS (SELECT pre_ranked_events.*,
leadInFrame(e_value)
OVER (PARTITION BY session_id ORDER BY datetime
ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS next_value,
leadInFrame(toNullable(event_type))
OVER (PARTITION BY session_id ORDER BY datetime
ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS next_type
FROM start_points
INNER JOIN pre_ranked_events USING (session_id))
SELECT *
FROM ranked_events);
-- ranked events
SELECT event_number_in_session,
event_type,
e_value,
next_type,
next_value,
COUNT(1) AS sessions_count
FROM ranked_events_1736344377403
WHERE event_number_in_session = 2
-- AND e_value='/en/deployment/deploy-docker/'
-- AND next_value NOT IN ('/en/deployment/','/en/plugins/','/en/using-or/')
-- AND e_value NOT IN ('/en/deployment/deploy-docker/','/en/getting-started/','/en/deployment/deploy-ubuntu/')
AND isNotNull(next_value)
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
ORDER BY event_number_in_session, sessions_count DESC;
SELECT event_number_in_session,
event_type,
e_value,
COUNT(1) AS sessions_count
FROM ranked_events_1736344377403
WHERE event_number_in_session = 1
GROUP BY event_number_in_session, event_type, e_value
ORDER BY event_number_in_session, sessions_count DESC;
SELECT COUNT(1) AS sessions_count
FROM ranked_events_1736344377403
WHERE event_number_in_session = 2
AND isNull(next_value)
;
-- ---------Q3 MORE -----------
WITH ranked_events AS (SELECT *
FROM ranked_events_1736344377403),
n1 AS (SELECT event_number_in_session,
event_type,
e_value,
next_type,
next_value,
COUNT(1) AS sessions_count
FROM ranked_events
WHERE event_number_in_session = 1
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
ORDER BY sessions_count DESC),
n2 AS (SELECT event_number_in_session,
event_type,
e_value,
next_type,
next_value,
COUNT(1) AS sessions_count
FROM ranked_events
WHERE event_number_in_session = 2
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
ORDER BY sessions_count DESC),
n3 AS (SELECT event_number_in_session,
event_type,
e_value,
next_type,
next_value,
COUNT(1) AS sessions_count
FROM ranked_events
WHERE event_number_in_session = 3
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
ORDER BY sessions_count DESC),
drop_n AS (-- STEP 1
SELECT event_number_in_session,
event_type,
e_value,
'DROP' AS next_type,
NULL AS next_value,
sessions_count
FROM n1
WHERE isNull(n1.next_type)
UNION ALL
-- STEP 2
SELECT event_number_in_session,
event_type,
e_value,
'DROP' AS next_type,
NULL AS next_value,
sessions_count
FROM n2
WHERE isNull(n2.next_type)),
-- TODO: make this as top_steps, where every step will go to next as top/others
top_n1 AS (-- STEP 1
SELECT event_number_in_session,
event_type,
e_value,
next_type,
next_value,
sessions_count
FROM n1
WHERE isNotNull(next_type)
ORDER BY sessions_count DESC
LIMIT 3),
top_n2 AS (-- STEP 2
SELECT event_number_in_session,
event_type,
e_value,
next_type,
next_value,
sessions_count
FROM n2
WHERE (event_type, e_value) IN (SELECT event_type,
e_value
FROM n2
WHERE isNotNull(next_type)
GROUP BY event_type, e_value
ORDER BY SUM(sessions_count) DESC
LIMIT 3)
ORDER BY sessions_count DESC),
top_n AS (SELECT *
FROM top_n1
UNION ALL
SELECT *
FROM top_n2),
u_top_n AS (SELECT DISTINCT event_number_in_session,
event_type,
e_value
FROM top_n),
others_n AS (
-- STEP 1
SELECT event_number_in_session,
event_type,
e_value,
next_type,
next_value,
sessions_count
FROM n1
WHERE isNotNull(next_type)
ORDER BY sessions_count DESC
LIMIT 1000000 OFFSET 3
UNION ALL
-- STEP 2
SELECT event_number_in_session,
event_type,
e_value,
next_type,
next_value,
sessions_count
FROM n2
WHERE isNotNull(next_type)
-- GROUP BY event_number_in_session, event_type, e_value
ORDER BY sessions_count DESC
LIMIT 1000000 OFFSET 3)
SELECT *
FROM (
-- Top
SELECT *
FROM top_n
-- UNION ALL
-- -- Others
-- SELECT event_number_in_session,
-- event_type,
-- e_value,
-- 'OTHER' AS next_type,
-- NULL AS next_value,
-- SUM(sessions_count)
-- FROM others_n
-- GROUP BY event_number_in_session, event_type, e_value
-- UNION ALL
-- -- Top go to Drop
-- SELECT drop_n.event_number_in_session,
-- drop_n.event_type,
-- drop_n.e_value,
-- drop_n.next_type,
-- drop_n.next_value,
-- drop_n.sessions_count
-- FROM drop_n
-- INNER JOIN u_top_n ON (drop_n.event_number_in_session = u_top_n.event_number_in_session
-- AND drop_n.event_type = u_top_n.event_type
-- AND drop_n.e_value = u_top_n.e_value)
-- ORDER BY drop_n.event_number_in_session
-- -- -- UNION ALL
-- -- -- Top go to Others
-- SELECT top_n.event_number_in_session,
-- top_n.event_type,
-- top_n.e_value,
-- 'OTHER' AS next_type,
-- NULL AS next_value,
-- SUM(top_n.sessions_count) AS sessions_count
-- FROM top_n
-- LEFT JOIN others_n ON (others_n.event_number_in_session = (top_n.event_number_in_session + 1)
-- AND top_n.next_type = others_n.event_type
-- AND top_n.next_value = others_n.e_value)
-- WHERE others_n.event_number_in_session IS NULL
-- AND top_n.next_type IS NOT NULL
-- GROUP BY event_number_in_session, event_type, e_value
-- UNION ALL
-- -- Others got to Top
-- SELECT others_n.event_number_in_session,
-- 'OTHER' AS event_type,
-- NULL AS e_value,
-- others_n.s_next_type AS next_type,
-- others_n.s_next_value AS next_value,
-- SUM(sessions_count) AS sessions_count
-- FROM others_n
-- INNER JOIN top_n ON (others_n.event_number_in_session = top_n.event_number_in_session + 1 AND
-- others_n.s_next_type = top_n.event_type AND
-- others_n.s_next_value = top_n.event_type)
-- GROUP BY others_n.event_number_in_session, next_type, next_value
-- UNION ALL
-- -- TODO: find if this works or not
-- -- Others got to Others
-- SELECT others_n.event_number_in_session,
-- 'OTHER' AS event_type,
-- NULL AS e_value,
-- 'OTHERS' AS next_type,
-- NULL AS next_value,
-- SUM(sessions_count) AS sessions_count
-- FROM others_n
-- LEFT JOIN u_top_n ON ((others_n.event_number_in_session + 1) = u_top_n.event_number_in_session
-- AND others_n.s_next_type = u_top_n.event_type
-- AND others_n.s_next_value = u_top_n.e_value)
-- WHERE u_top_n.event_number_in_session IS NULL
-- GROUP BY others_n.event_number_in_session
)
ORDER BY event_number_in_session;
-- ---------Q3 TOP ON VALUE ONLY -----------
WITH ranked_events AS (SELECT *
FROM ranked_events_1736344377403),
n1 AS (SELECT event_number_in_session,
event_type,
e_value,
next_type,
next_value,
COUNT(1) AS sessions_count
FROM ranked_events
WHERE event_number_in_session = 1
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
ORDER BY sessions_count DESC),
n2 AS (SELECT event_number_in_session,
event_type,
e_value,
next_type,
next_value,
COUNT(1) AS sessions_count
FROM ranked_events
WHERE event_number_in_session = 2
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
ORDER BY sessions_count DESC),
n3 AS (SELECT event_number_in_session,
event_type,
e_value,
next_type,
next_value,
COUNT(1) AS sessions_count
FROM ranked_events
WHERE event_number_in_session = 3
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
ORDER BY sessions_count DESC),
drop_n AS (-- STEP 1
SELECT event_number_in_session,
event_type,
e_value,
'DROP' AS next_type,
NULL AS next_value,
sessions_count
FROM n1
WHERE isNull(n1.next_type)
UNION ALL
-- STEP 2
SELECT event_number_in_session,
event_type,
e_value,
'DROP' AS next_type,
NULL AS next_value,
sessions_count
FROM n2
WHERE isNull(n2.next_type)),
top_n AS (SELECT event_number_in_session,
event_type,
e_value,
SUM(sessions_count) AS sessions_count
FROM n1
GROUP BY event_number_in_session, event_type, e_value
LIMIT 1
UNION ALL
-- STEP 2
SELECT event_number_in_session,
event_type,
e_value,
SUM(sessions_count) AS sessions_count
FROM n2
GROUP BY event_number_in_session, event_type, e_value
ORDER BY sessions_count DESC
LIMIT 3
UNION ALL
-- STEP 3
SELECT event_number_in_session,
event_type,
e_value,
SUM(sessions_count) AS sessions_count
FROM n3
GROUP BY event_number_in_session, event_type, e_value
ORDER BY sessions_count DESC
LIMIT 3),
top_n_with_next AS (SELECT n1.*
FROM n1
UNION ALL
SELECT n2.*
FROM n2
INNER JOIN top_n ON (n2.event_number_in_session = top_n.event_number_in_session
AND n2.event_type = top_n.event_type
AND n2.e_value = top_n.e_value)),
others_n AS (
-- STEP 2
SELECT n2.*
FROM n2
WHERE (n2.event_number_in_session, n2.event_type, n2.e_value) NOT IN
(SELECT event_number_in_session, event_type, e_value
FROM top_n
WHERE top_n.event_number_in_session = 2)
UNION ALL
-- STEP 3
SELECT n3.*
FROM n3
WHERE (n3.event_number_in_session, n3.event_type, n3.e_value) NOT IN
(SELECT event_number_in_session, event_type, e_value
FROM top_n
WHERE top_n.event_number_in_session = 3))
SELECT *
FROM (
-- SELECT sum(top_n_with_next.sessions_count)
-- FROM top_n_with_next
-- WHERE event_number_in_session = 1
-- -- AND isNotNull(next_value)
-- AND (next_type, next_value) IN
-- (SELECT others_n.event_type, others_n.e_value FROM others_n WHERE others_n.event_number_in_session = 2)
-- -- SELECT * FROM others_n
-- -- SELECT * FROM n2
-- SELECT *
-- FROM top_n
-- );
-- Top to Top: valid
SELECT top_n_with_next.*
FROM top_n_with_next
INNER JOIN top_n
ON (top_n_with_next.event_number_in_session + 1 = top_n.event_number_in_session
AND top_n_with_next.next_type = top_n.event_type
AND top_n_with_next.next_value = top_n.e_value)
UNION ALL
-- Top to Others: valid
SELECT top_n_with_next.event_number_in_session,
top_n_with_next.event_type,
top_n_with_next.e_value,
'OTHER' AS next_type,
NULL AS next_value,
SUM(top_n_with_next.sessions_count) AS sessions_count
FROM top_n_with_next
WHERE (top_n_with_next.event_number_in_session + 1, top_n_with_next.next_type, top_n_with_next.next_value) IN
(SELECT others_n.event_number_in_session, others_n.event_type, others_n.e_value FROM others_n)
GROUP BY top_n_with_next.event_number_in_session, top_n_with_next.event_type, top_n_with_next.e_value
UNION ALL
-- Top go to Drop: valid
SELECT drop_n.event_number_in_session,
drop_n.event_type,
drop_n.e_value,
drop_n.next_type,
drop_n.next_value,
drop_n.sessions_count
FROM drop_n
INNER JOIN top_n ON (drop_n.event_number_in_session = top_n.event_number_in_session
AND drop_n.event_type = top_n.event_type
AND drop_n.e_value = top_n.e_value)
ORDER BY drop_n.event_number_in_session
UNION ALL
-- Others got to Drop: valid
SELECT others_n.event_number_in_session,
'OTHER' AS event_type,
NULL AS e_value,
'DROP' AS next_type,
NULL AS next_value,
SUM(others_n.sessions_count) AS sessions_count
FROM others_n
WHERE isNull(others_n.next_type)
AND others_n.event_number_in_session < 3
GROUP BY others_n.event_number_in_session, next_type, next_value
UNION ALL
-- Others got to Top:valid
SELECT others_n.event_number_in_session,
'OTHER' AS event_type,
NULL AS e_value,
others_n.next_type,
others_n.next_value,
SUM(others_n.sessions_count) AS sessions_count
FROM others_n
WHERE isNotNull(others_n.next_type)
AND (others_n.event_number_in_session + 1, others_n.next_type, others_n.next_value) IN
(SELECT top_n.event_number_in_session, top_n.event_type, top_n.e_value FROM top_n)
GROUP BY others_n.event_number_in_session, others_n.next_type, others_n.next_value
UNION ALL
-- Others got to Others
SELECT others_n.event_number_in_session,
'OTHER' AS event_type,
NULL AS e_value,
'OTHERS' AS next_type,
NULL AS next_value,
SUM(sessions_count) AS sessions_count
FROM others_n
WHERE isNotNull(others_n.next_type)
AND others_n.event_number_in_session < 3
AND (others_n.event_number_in_session + 1, others_n.next_type, others_n.next_value) NOT IN
(SELECT event_number_in_session, event_type, e_value FROM top_n)
GROUP BY others_n.event_number_in_session)
ORDER BY event_number_in_session, sessions_count
DESC;

View file

@ -1,16 +1,15 @@
urllib3==2.3.0 urllib3==2.3.0
requests==2.32.3 requests==2.32.3
boto3==1.36.12 boto3==1.37.21
pyjwt==2.10.1 pyjwt==2.10.1
psycopg2-binary==2.9.10 psycopg2-binary==2.9.10
psycopg[pool,binary]==3.2.4 psycopg[pool,binary]==3.2.6
clickhouse-driver[lz4]==0.2.9
clickhouse-connect==0.8.15 clickhouse-connect==0.8.15
elasticsearch==8.17.1 elasticsearch==8.17.2
jira==3.8.0 jira==3.8.0
cachetools==5.5.1 cachetools==5.5.2
fastapi==0.115.8 fastapi==0.115.12
uvicorn[standard]==0.34.0 uvicorn[standard]==0.34.0
python-decouple==3.8 python-decouple==3.8
pydantic[email]==2.10.6 pydantic[email]==2.10.6

View file

@ -1,16 +1,15 @@
urllib3==2.3.0 urllib3==2.3.0
requests==2.32.3 requests==2.32.3
boto3==1.36.12 boto3==1.37.21
pyjwt==2.10.1 pyjwt==2.10.1
psycopg2-binary==2.9.10 psycopg2-binary==2.9.10
psycopg[pool,binary]==3.2.4 psycopg[pool,binary]==3.2.6
clickhouse-driver[lz4]==0.2.9
clickhouse-connect==0.8.15 clickhouse-connect==0.8.15
elasticsearch==8.17.1 elasticsearch==8.17.2
jira==3.8.0 jira==3.8.0
cachetools==5.5.1 cachetools==5.5.2
fastapi==0.115.8 fastapi==0.115.12
uvicorn[standard]==0.34.0 uvicorn[standard]==0.34.0
python-decouple==3.8 python-decouple==3.8
pydantic[email]==2.10.6 pydantic[email]==2.10.6

View file

@ -0,0 +1,55 @@
from typing import Annotated
from fastapi import Body, Depends, Query
import schemas
from chalicelib.core import metadata
from chalicelib.core.product_analytics import events, properties
from or_dependencies import OR_context
from routers.base import get_routers
public_app, app, app_apikey = get_routers()
@app.get('/{projectId}/filters', tags=["product_analytics"])
def get_all_filters(projectId: int, filter_query: Annotated[schemas.PaginatedSchema, Query()],
context: schemas.CurrentContext = Depends(OR_context)):
return {
"data": {
"events": events.get_events(project_id=projectId, page=filter_query),
"filters": properties.get_all_properties(project_id=projectId, page=filter_query),
"metadata": metadata.get_for_filters(project_id=projectId)
}
}
@app.get('/{projectId}/events/names', tags=["product_analytics"])
def get_all_events(projectId: int, filter_query: Annotated[schemas.PaginatedSchema, Query()],
context: schemas.CurrentContext = Depends(OR_context)):
return {"data": events.get_events(project_id=projectId, page=filter_query)}
@app.get('/{projectId}/properties/search', tags=["product_analytics"])
def get_event_properties(projectId: int, event_name: str = None,
context: schemas.CurrentContext = Depends(OR_context)):
if not event_name or len(event_name) == 0:
return {"data": []}
return {"data": properties.get_event_properties(project_id=projectId, event_name=event_name)}
@app.post('/{projectId}/events/search', tags=["product_analytics"])
def search_events(projectId: int, data: schemas.EventsSearchPayloadSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
return {"data": events.search_events(project_id=projectId, data=data)}
@app.get('/{projectId}/lexicon/events', tags=["product_analytics", "lexicon"])
def get_all_lexicon_events(projectId: int, filter_query: Annotated[schemas.PaginatedSchema, Query()],
context: schemas.CurrentContext = Depends(OR_context)):
return {"data": events.get_lexicon(project_id=projectId, page=filter_query)}
@app.get('/{projectId}/lexicon/properties', tags=["product_analytics", "lexicon"])
def get_all_lexicon_properties(projectId: int, filter_query: Annotated[schemas.PaginatedSchema, Query()],
context: schemas.CurrentContext = Depends(OR_context)):
return {"data": properties.get_lexicon(project_id=projectId, page=filter_query)}

View file

@ -1,15 +0,0 @@
import schemas
from chalicelib.core.metrics import product_anaytics2
from fastapi import Depends
from or_dependencies import OR_context
from routers.base import get_routers
public_app, app, app_apikey = get_routers()
@app.post('/{projectId}/events/search', tags=["dashboard"])
def search_events(projectId: int,
# data: schemas.CreateDashboardSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
return product_anaytics2.search_events(project_id=projectId, data={})

View file

@ -1,10 +1,12 @@
from fastapi import Body, Depends from typing import Annotated
from fastapi import Body, Depends, Query
import schemas
from chalicelib.core.usability_testing import service from chalicelib.core.usability_testing import service
from chalicelib.core.usability_testing.schema import UTTestCreate, UTTestUpdate, UTTestSearch from chalicelib.core.usability_testing.schema import UTTestCreate, UTTestUpdate, UTTestSearch
from or_dependencies import OR_context from or_dependencies import OR_context
from routers.base import get_routers from routers.base import get_routers
from schemas import schemas
public_app, app, app_apikey = get_routers() public_app, app, app_apikey = get_routers()
tags = ["usability-tests"] tags = ["usability-tests"]
@ -77,9 +79,8 @@ async def update_ut_test(projectId: int, test_id: int, test_update: UTTestUpdate
@app.get('/{projectId}/usability-tests/{test_id}/sessions', tags=tags) @app.get('/{projectId}/usability-tests/{test_id}/sessions', tags=tags)
async def get_sessions(projectId: int, test_id: int, page: int = 1, limit: int = 10, async def get_sessions(projectId: int, test_id: int, filter_query: Annotated[schemas.PaginatedSchema, Query()],
live: bool = False, live: bool = False, user_id: str = None):
user_id: str = None):
""" """
Get sessions related to a specific UT test. Get sessions related to a specific UT test.
@ -88,20 +89,21 @@ async def get_sessions(projectId: int, test_id: int, page: int = 1, limit: int =
""" """
if live: if live:
return service.ut_tests_sessions_live(projectId, test_id, page, limit) return service.ut_tests_sessions_live(projectId, test_id, filter_query.page, filter_query.limit)
else: else:
return service.ut_tests_sessions(projectId, test_id, page, limit, user_id, live) return service.ut_tests_sessions(projectId, test_id, filter_query.page, filter_query.limit, user_id, live)
@app.get('/{projectId}/usability-tests/{test_id}/responses/{task_id}', tags=tags) @app.get('/{projectId}/usability-tests/{test_id}/responses/{task_id}', tags=tags)
async def get_responses(projectId: int, test_id: int, task_id: int, page: int = 1, limit: int = 10, query: str = None): async def get_responses(projectId: int, test_id: int, task_id: int,
filter_query: Annotated[schemas.PaginatedSchema, Query()], query: str = None):
""" """
Get responses related to a specific UT test. Get responses related to a specific UT test.
- **project_id**: The unique identifier of the project. - **project_id**: The unique identifier of the project.
- **test_id**: The unique identifier of the UT test. - **test_id**: The unique identifier of the UT test.
""" """
return service.get_responses(test_id, task_id, page, limit, query) return service.get_responses(test_id, task_id, filter_query.page, filter_query.limit, query)
@app.get('/{projectId}/usability-tests/{test_id}/statistics', tags=tags) @app.get('/{projectId}/usability-tests/{test_id}/statistics', tags=tags)

View file

@ -1,2 +1,4 @@
from .schemas import * from .schemas import *
from .product_analytics import *
from . import overrides as _overrides from . import overrides as _overrides
from .schemas import _PaginatedSchema as PaginatedSchema

View file

@ -0,0 +1,22 @@
from typing import Optional, List, Literal, Union, Annotated
from pydantic import Field
from .overrides import BaseModel
from .schemas import EventPropertiesSchema, SortOrderType, _TimedSchema, \
_PaginatedSchema, PropertyFilterSchema
class EventSearchSchema(BaseModel):
is_event: Literal[True] = True
name: str = Field(...)
properties: Optional[EventPropertiesSchema] = Field(default=None)
ProductAnalyticsGroupedFilter = Annotated[Union[EventSearchSchema, PropertyFilterSchema], \
Field(discriminator='is_event')]
class EventsSearchPayloadSchema(_TimedSchema, _PaginatedSchema):
filters: List[ProductAnalyticsGroupedFilter] = Field(...)
sort: str = Field(default="startTs")
order: SortOrderType = Field(default=SortOrderType.DESC)

View file

@ -404,6 +404,7 @@ class EventType(str, Enum):
REQUEST_MOBILE = "requestMobile" REQUEST_MOBILE = "requestMobile"
ERROR_MOBILE = "errorMobile" ERROR_MOBILE = "errorMobile"
SWIPE_MOBILE = "swipeMobile" SWIPE_MOBILE = "swipeMobile"
EVENT = "event"
class PerformanceEventType(str, Enum): class PerformanceEventType(str, Enum):
@ -464,6 +465,7 @@ class SearchEventOperator(str, Enum):
NOT_CONTAINS = "notContains" NOT_CONTAINS = "notContains"
STARTS_WITH = "startsWith" STARTS_WITH = "startsWith"
ENDS_WITH = "endsWith" ENDS_WITH = "endsWith"
PATTERN = "regex"
class ClickEventExtraOperator(str, Enum): class ClickEventExtraOperator(str, Enum):
@ -545,7 +547,66 @@ class RequestGraphqlFilterSchema(BaseModel):
return values return values
class SessionSearchEventSchema2(BaseModel): class EventPredefinedPropertyType(str, Enum):
TIME = "$time"
SOURCE = "$source"
DURATION_S = "$duration_s"
DESCRIPTION = "description"
AUTO_CAPTURED = "$auto_captured"
SDK_EDITION = "$sdk_edition"
SDK_VERSION = "$sdk_version"
DEVICE_ID = "$device_id"
OS = "$os"
OS_VERSION = "$os_version"
BROWSER = "$browser"
BROWSER_VERSION = "$browser_version"
DEVICE = "$device"
SCREEN_HEIGHT = "$screen_height"
SCREEN_WIDTH = "$screen_width"
CURRENT_URL = "$current_url"
INITIAL_REFERRER = "$initial_referrer"
REFERRING_DOMAIN = "$referring_domain"
REFERRER = "$referrer"
INITIAL_REFERRING_DOMAIN = "$initial_referring_domain"
SEARCH_ENGINE = "$search_engine"
SEARCH_ENGINE_KEYWORD = "$search_engine_keyword"
UTM_SOURCE = "utm_source"
UTM_MEDIUM = "utm_medium"
UTM_CAMPAIGN = "utm_campaign"
COUNTRY = "$country"
STATE = "$state"
CITY = "$city"
ISSUE_TYPE = "issue_type"
TAGS = "$tags"
IMPORT = "$import"
class PropertyFilterSchema(BaseModel):
is_event: Literal[False] = False
name: Union[EventPredefinedPropertyType, str] = Field(...)
operator: Union[SearchEventOperator, MathOperator] = Field(...)
value: List[Union[int, str]] = Field(...)
# property_type: Optional[Literal["string", "number", "date"]] = Field(default=None)
@computed_field
@property
def is_predefined(self) -> bool:
return EventPredefinedPropertyType.has_value(self.name)
@model_validator(mode="after")
def transform_name(self):
if isinstance(self.name, Enum):
self.name = self.name.value
return self
class EventPropertiesSchema(BaseModel):
operator: Literal["and", "or"] = Field(...)
filters: List[PropertyFilterSchema] = Field(...)
class SessionSearchEventSchema(BaseModel):
is_event: Literal[True] = True is_event: Literal[True] = True
value: List[Union[str, int]] = Field(...) value: List[Union[str, int]] = Field(...)
type: Union[EventType, PerformanceEventType] = Field(...) type: Union[EventType, PerformanceEventType] = Field(...)
@ -553,6 +614,7 @@ class SessionSearchEventSchema2(BaseModel):
source: Optional[List[Union[ErrorSource, int, str]]] = Field(default=None) source: Optional[List[Union[ErrorSource, int, str]]] = Field(default=None)
sourceOperator: Optional[MathOperator] = Field(default=None) sourceOperator: Optional[MathOperator] = Field(default=None)
filters: Optional[List[RequestGraphqlFilterSchema]] = Field(default_factory=list) filters: Optional[List[RequestGraphqlFilterSchema]] = Field(default_factory=list)
properties: Optional[EventPropertiesSchema] = Field(default=None)
_remove_duplicate_values = field_validator('value', mode='before')(remove_duplicate_values) _remove_duplicate_values = field_validator('value', mode='before')(remove_duplicate_values)
_single_to_list_values = field_validator('value', mode='before')(single_to_list) _single_to_list_values = field_validator('value', mode='before')(single_to_list)
@ -660,12 +722,12 @@ def add_missing_is_event(values: dict):
# this type is created to allow mixing events&filters and specifying a discriminator # this type is created to allow mixing events&filters and specifying a discriminator
GroupedFilterType = Annotated[Union[SessionSearchFilterSchema, SessionSearchEventSchema2], GroupedFilterType = Annotated[Union[SessionSearchFilterSchema, SessionSearchEventSchema],
Field(discriminator='is_event'), BeforeValidator(add_missing_is_event)] Field(discriminator='is_event'), BeforeValidator(add_missing_is_event)]
class SessionsSearchPayloadSchema(_TimedSchema, _PaginatedSchema): class SessionsSearchPayloadSchema(_TimedSchema, _PaginatedSchema):
events: List[SessionSearchEventSchema2] = Field(default_factory=list, doc_hidden=True) events: List[SessionSearchEventSchema] = Field(default_factory=list, doc_hidden=True)
filters: List[GroupedFilterType] = Field(default_factory=list) filters: List[GroupedFilterType] = Field(default_factory=list)
sort: str = Field(default="startTs") sort: str = Field(default="startTs")
order: SortOrderType = Field(default=SortOrderType.DESC) order: SortOrderType = Field(default=SortOrderType.DESC)
@ -690,6 +752,8 @@ class SessionsSearchPayloadSchema(_TimedSchema, _PaginatedSchema):
def add_missing_attributes(cls, values): def add_missing_attributes(cls, values):
# in case isEvent is wrong: # in case isEvent is wrong:
for f in values.get("filters") or []: for f in values.get("filters") or []:
if f.get("type") is None:
continue
if EventType.has_value(f["type"]) and not f.get("isEvent"): if EventType.has_value(f["type"]) and not f.get("isEvent"):
f["isEvent"] = True f["isEvent"] = True
elif FilterType.has_value(f["type"]) and f.get("isEvent"): elif FilterType.has_value(f["type"]) and f.get("isEvent"):
@ -715,6 +779,15 @@ class SessionsSearchPayloadSchema(_TimedSchema, _PaginatedSchema):
f["value"] = vals f["value"] = vals
return values return values
@model_validator(mode="after")
def check_pa_event_filter(self):
for v in self.filters + self.events:
if v.type == EventType.EVENT:
assert v.operator in (SearchEventOperator.IS, MathOperator.EQUAL), \
"operator must be {SearchEventOperator.IS} or {MathOperator.EQUAL} for EVENT type"
assert len(v.value) == 1, "value must have 1 single value for EVENT type"
return self
@model_validator(mode="after") @model_validator(mode="after")
def split_filters_events(self): def split_filters_events(self):
n_filters = [] n_filters = []
@ -1135,7 +1208,7 @@ class CardPathAnalysis(__CardSchema):
view_type: MetricOtherViewType = Field(...) view_type: MetricOtherViewType = Field(...)
metric_value: List[ProductAnalyticsSelectedEventType] = Field(default_factory=list) metric_value: List[ProductAnalyticsSelectedEventType] = Field(default_factory=list)
density: int = Field(default=4, ge=2, le=10) density: int = Field(default=4, ge=2, le=10)
rows: int = Field(default=3, ge=1, le=10) rows: int = Field(default=5, ge=1, le=10)
start_type: Literal["start", "end"] = Field(default="start") start_type: Literal["start", "end"] = Field(default="start")
start_point: List[PathAnalysisSubFilterSchema] = Field(default_factory=list) start_point: List[PathAnalysisSubFilterSchema] = Field(default_factory=list)
@ -1404,7 +1477,7 @@ class MetricSearchSchema(_PaginatedSchema):
mine_only: bool = Field(default=False) mine_only: bool = Field(default=False)
class _HeatMapSearchEventRaw(SessionSearchEventSchema2): class _HeatMapSearchEventRaw(SessionSearchEventSchema):
type: Literal[EventType.LOCATION] = Field(...) type: Literal[EventType.LOCATION] = Field(...)
@ -1529,3 +1602,30 @@ class TagCreate(TagUpdate):
class ScopeSchema(BaseModel): class ScopeSchema(BaseModel):
scope: int = Field(default=1, ge=1, le=2) scope: int = Field(default=1, ge=1, le=2)
class SessionModel(BaseModel):
duration: int
errorsCount: int
eventsCount: int
favorite: bool = Field(default=False)
issueScore: int
issueTypes: List[IssueType] = Field(default=[])
metadata: dict = Field(default={})
pagesCount: int
platform: str
projectId: int
sessionId: str
startTs: int
timezone: Optional[str]
userAnonymousId: Optional[str]
userBrowser: str
userCity: str
userCountry: str
userDevice: Optional[str]
userDeviceType: str
userId: Optional[str]
userOs: str
userState: str
userUuid: str
viewed: bool = Field(default=False)

24
assist-server/Makefile Normal file
View file

@ -0,0 +1,24 @@
ee ?= "false" # true to build ee
arch ?= "amd64" # default amd64
docker_runtime ?= "docker" # default docker runtime
docker_repo ?= "public.ecr.aws/p1t3u8a3"
docker_build_args ?= $(if $(filter depot,$(docker_runtime)),"--push","")
.PHONY: help
help: ## Prints help for targets with comments
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-25s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
##@ Docker
.PHONY: build
build: ## Build the backend. ee=true for ee build.
@DOCKER_BUILD_ARGS=$(docker_build_args) DOCKER_REPO=$(docker_repo) ARCH=$(arch) DOCKER_RUNTIME=$(docker_runtime) bash build.sh $(ee)
##@ Local Dev
.PHONY: scan
scan: ## Scan the backend
@echo scanning foss
@trivy fs -q .
@echo scanning ee
@trivy fs -q ../ee/assist-server/

61
assist-server/build.sh Normal file
View file

@ -0,0 +1,61 @@
#!/bin/bash
# Usage: IMAGE_TAG=latest DOCKER_REPO=myDockerHubID bash build.sh <ee>
ARCH=${ARCH:-amd64}
git_sha=$(git rev-parse --short HEAD)
image_tag=${IMAGE_TAG:-git_sha}
check_prereq() {
which docker || {
echo "Docker not installed, please install docker."
exit 1
}
}
source ../scripts/lib/_docker.sh
[[ $PATCH -eq 1 ]] && {
image_tag="$(grep -ER ^.ppVersion ../scripts/helmcharts/openreplay/charts/$chart | xargs | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
image_tag="${image_tag}-ee"
}
update_helm_release() {
chart=$1
HELM_TAG="$(grep -iER ^version ../scripts/helmcharts/openreplay/charts/$chart | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
# Update the chart version
sed -i "s#^version.*#version: $HELM_TAG# g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
# Update image tags
sed -i "s#ppVersion.*#ppVersion: \"$image_tag\"#g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
# Commit the changes
git add ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
git commit -m "chore(helm): Updating $chart image release"
}
function build_api() {
destination="_assist-server_ee"
[[ -d ../${destination} ]] && {
echo "Removing previous build cache"
rm -rf ../${destination}
}
cp -R ../assist-server ../${destination}
cd ../${destination} || exit 1
cp -rf ../ee/assist-server/* ./
docker build -f ./Dockerfile --build-arg GIT_SHA=$git_sha -t ${DOCKER_REPO:-'local'}/assist-server:${image_tag} .
cd ../assist-server || exit 1
rm -rf ../${destination}
[[ $PUSH_IMAGE -eq 1 ]] && {
docker push ${DOCKER_REPO:-'local'}/assist-server:${image_tag}
docker tag ${DOCKER_REPO:-'local'}/assist-server:${image_tag} ${DOCKER_REPO:-'local'}/assist-server:latest
docker push ${DOCKER_REPO:-'local'}/assist-server:latest
}
[[ $SIGN_IMAGE -eq 1 ]] && {
cosign sign --key $SIGN_KEY ${DOCKER_REPO:-'local'}/assist-server:${image_tag}
}
echo "build completed for assist-server"
}
check_prereq
build_api $1
if [[ $PATCH -eq 1 ]]; then
update_helm_release assist-server
fi

View file

@ -19,14 +19,16 @@ const EVENTS_DEFINITION = {
} }
}; };
EVENTS_DEFINITION.emit = { EVENTS_DEFINITION.emit = {
NEW_AGENT: "NEW_AGENT", NEW_AGENT: "NEW_AGENT",
NO_AGENTS: "NO_AGENT", NO_AGENTS: "NO_AGENT",
AGENT_DISCONNECT: "AGENT_DISCONNECTED", AGENT_DISCONNECT: "AGENT_DISCONNECTED",
AGENTS_CONNECTED: "AGENTS_CONNECTED", AGENTS_CONNECTED: "AGENTS_CONNECTED",
NO_SESSIONS: "SESSION_DISCONNECTED", AGENTS_INFO_CONNECTED: "AGENTS_INFO_CONNECTED",
SESSION_ALREADY_CONNECTED: "SESSION_ALREADY_CONNECTED", NO_SESSIONS: "SESSION_DISCONNECTED",
SESSION_RECONNECTED: "SESSION_RECONNECTED", SESSION_ALREADY_CONNECTED: "SESSION_ALREADY_CONNECTED",
UPDATE_EVENT: EVENTS_DEFINITION.listen.UPDATE_EVENT SESSION_RECONNECTED: "SESSION_RECONNECTED",
UPDATE_EVENT: EVENTS_DEFINITION.listen.UPDATE_EVENT,
WEBRTC_CONFIG: "WEBRTC_CONFIG",
}; };
const BASE_sessionInfo = { const BASE_sessionInfo = {

View file

@ -27,9 +27,14 @@ const respond = function (req, res, data) {
res.setHeader('Content-Type', 'application/json'); res.setHeader('Content-Type', 'application/json');
res.end(JSON.stringify(result)); res.end(JSON.stringify(result));
} else { } else {
res.cork(() => { if (!res.aborted) {
res.writeStatus('200 OK').writeHeader('Content-Type', 'application/json').end(JSON.stringify(result)); res.cork(() => {
}); res.writeStatus('200 OK').writeHeader('Content-Type', 'application/json').end(JSON.stringify(result));
});
} else {
logger.debug("response aborted");
return;
}
} }
const duration = performance.now() - req.startTs; const duration = performance.now() - req.startTs;
IncreaseTotalRequests(); IncreaseTotalRequests();

View file

@ -42,7 +42,7 @@ const findSessionSocketId = async (io, roomId, tabId) => {
}; };
async function getRoomData(io, roomID) { async function getRoomData(io, roomID) {
let tabsCount = 0, agentsCount = 0, tabIDs = [], agentIDs = []; let tabsCount = 0, agentsCount = 0, tabIDs = [], agentIDs = [], config = null, agentInfos = [];
const connected_sockets = await io.in(roomID).fetchSockets(); const connected_sockets = await io.in(roomID).fetchSockets();
if (connected_sockets.length > 0) { if (connected_sockets.length > 0) {
for (let socket of connected_sockets) { for (let socket of connected_sockets) {
@ -52,13 +52,19 @@ async function getRoomData(io, roomID) {
} else { } else {
agentsCount++; agentsCount++;
agentIDs.push(socket.id); agentIDs.push(socket.id);
agentInfos.push({ ...socket.handshake.query.agentInfo, socketId: socket.id });
if (socket.handshake.query.config !== undefined) {
config = socket.handshake.query.config;
}
} }
} }
} else { } else {
tabsCount = -1; tabsCount = -1;
agentsCount = -1; agentsCount = -1;
agentInfos = [];
agentIDs = [];
} }
return {tabsCount, agentsCount, tabIDs, agentIDs}; return {tabsCount, agentsCount, tabIDs, agentIDs, config, agentInfos};
} }
function processNewSocket(socket) { function processNewSocket(socket) {
@ -78,7 +84,7 @@ async function onConnect(socket) {
IncreaseOnlineConnections(socket.handshake.query.identity); IncreaseOnlineConnections(socket.handshake.query.identity);
const io = getServer(); const io = getServer();
const {tabsCount, agentsCount, tabIDs, agentIDs} = await getRoomData(io, socket.handshake.query.roomId); const {tabsCount, agentsCount, tabIDs, agentInfos, agentIDs, config} = await getRoomData(io, socket.handshake.query.roomId);
if (socket.handshake.query.identity === IDENTITIES.session) { if (socket.handshake.query.identity === IDENTITIES.session) {
// Check if session with the same tabID already connected, if so, refuse new connexion // Check if session with the same tabID already connected, if so, refuse new connexion
@ -100,7 +106,9 @@ async function onConnect(socket) {
// Inform all connected agents about reconnected session // Inform all connected agents about reconnected session
if (agentsCount > 0) { if (agentsCount > 0) {
logger.debug(`notifying new session about agent-existence`); logger.debug(`notifying new session about agent-existence`);
io.to(socket.id).emit(EVENTS_DEFINITION.emit.WEBRTC_CONFIG, config);
io.to(socket.id).emit(EVENTS_DEFINITION.emit.AGENTS_CONNECTED, agentIDs); io.to(socket.id).emit(EVENTS_DEFINITION.emit.AGENTS_CONNECTED, agentIDs);
io.to(socket.id).emit(EVENTS_DEFINITION.emit.AGENTS_INFO_CONNECTED, agentInfos);
socket.to(socket.handshake.query.roomId).emit(EVENTS_DEFINITION.emit.SESSION_RECONNECTED, socket.id); socket.to(socket.handshake.query.roomId).emit(EVENTS_DEFINITION.emit.SESSION_RECONNECTED, socket.id);
} }
} else if (tabsCount <= 0) { } else if (tabsCount <= 0) {
@ -118,7 +126,8 @@ async function onConnect(socket) {
// Stats // Stats
startAssist(socket, socket.handshake.query.agentID); startAssist(socket, socket.handshake.query.agentID);
} }
socket.to(socket.handshake.query.roomId).emit(EVENTS_DEFINITION.emit.NEW_AGENT, socket.id, socket.handshake.query.agentInfo); io.to(socket.handshake.query.roomId).emit(EVENTS_DEFINITION.emit.WEBRTC_CONFIG, socket.handshake.query.config);
socket.to(socket.handshake.query.roomId).emit(EVENTS_DEFINITION.emit.NEW_AGENT, socket.id, { ...socket.handshake.query.agentInfo });
} }
// Set disconnect handler // Set disconnect handler

33
backend/Makefile Normal file
View file

@ -0,0 +1,33 @@
distro ?= foss # ee to build ee
app ?= "" # app name, default all
arch ?= "amd64" # default amd64
docker_repo ?= "public.ecr.aws/p1t3u8a3"
docker_runtime ?= "docker" # default docker runtime
image_tag ?= "" # image tag to build. Default is git sha short
docker_build_args ?= $(if $(filter depot,$(docker_runtime)),"--push","")
.PHONY: help
help: ## Prints help for targets with comments
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-25s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
##@ Docker
.PHONY: build
build: ## Build the backend. ee=true for ee build. app=app name for only one app. Default build all apps.
IMAGE_TAG=$(image_tag) DOCKER_BUILD_ARGS=$(docker_build_args) DOCKER_REPO=$(docker_repo) ARCH=$(arch) DOCKER_RUNTIME=$(docker_runtime) bash build.sh $(distro) $(app)
##@ Local Dev
.PHONY: scan
scan: ## Scan the backend
@trivy fs -q .
.PHONY: update
update: ## Update the backend dependecies
@echo Updating dependencies
@go get -u -v ./...
@go mod tidy
run: ## Run the backend. app=app name for app to run
@if [ $(app) == "" ]; then echo "Error: app parameter is required. Usage: make run app=<app_name>"; exit 1; fi
@go run "cmd/$(app)/main.go"

View file

@ -2,44 +2,71 @@ package main
import ( import (
"context" "context"
"os"
"os/signal"
"syscall"
analyticsConfig "openreplay/backend/internal/config/analytics"
"openreplay/backend/pkg/analytics"
"openreplay/backend/pkg/db/postgres/pool"
"openreplay/backend/pkg/logger" "openreplay/backend/pkg/logger"
"openreplay/backend/pkg/metrics"
"openreplay/backend/pkg/metrics/database"
"openreplay/backend/pkg/metrics/web"
"openreplay/backend/pkg/server"
"openreplay/backend/pkg/server/api"
) )
func main() { func main() {
ctx := context.Background() ctx := context.Background()
log := logger.New() log := logger.New()
cfg := analyticsConfig.New(log) log.Info(ctx, "Cacher service started")
// Observability
webMetrics := web.New("analytics")
dbMetrics := database.New("analytics")
metrics.New(log, append(webMetrics.List(), dbMetrics.List()...))
pgConn, err := pool.New(dbMetrics, cfg.Postgres.String()) sigchan := make(chan os.Signal, 1)
if err != nil { signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
log.Fatal(ctx, "can't init postgres connection: %s", err)
for {
select {
case sig := <-sigchan:
log.Error(ctx, "Caught signal %v: terminating", sig)
os.Exit(0)
}
} }
defer pgConn.Close()
builder, err := analytics.NewServiceBuilder(log, cfg, webMetrics, dbMetrics, pgConn)
if err != nil {
log.Fatal(ctx, "can't init services: %s", err)
}
router, err := api.NewRouter(&cfg.HTTP, log)
if err != nil {
log.Fatal(ctx, "failed while creating router: %s", err)
}
router.AddHandlers(api.NoPrefix, builder.CardsAPI, builder.DashboardsAPI, builder.ChartsAPI)
router.AddMiddlewares(builder.Auth.Middleware, builder.RateLimiter.Middleware, builder.AuditTrail.Middleware)
server.Run(ctx, log, &cfg.HTTP, router)
} }
//
//import (
// "context"
//
// analyticsConfig "openreplay/backend/internal/config/analytics"
// "openreplay/backend/pkg/analytics"
// "openreplay/backend/pkg/db/postgres/pool"
// "openreplay/backend/pkg/logger"
// "openreplay/backend/pkg/metrics"
// "openreplay/backend/pkg/metrics/database"
// "openreplay/backend/pkg/metrics/web"
// "openreplay/backend/pkg/server"
// "openreplay/backend/pkg/server/api"
//)
//
//func main() {
// ctx := context.Background()
// log := logger.New()
// cfg := analyticsConfig.New(log)
// // Observability
// webMetrics := web.New("analytics")
// dbMetrics := database.New("analytics")
// metrics.New(log, append(webMetrics.List(), dbMetrics.List()...))
//
// pgConn, err := pool.New(dbMetrics, cfg.Postgres.String())
// if err != nil {
// log.Fatal(ctx, "can't init postgres connection: %s", err)
// }
// defer pgConn.Close()
//
// builder, err := analytics.NewServiceBuilder(log, cfg, webMetrics, dbMetrics, pgConn)
// if err != nil {
// log.Fatal(ctx, "can't init services: %s", err)
// }
//
// router, err := api.NewRouter(&cfg.HTTP, log)
// if err != nil {
// log.Fatal(ctx, "failed while creating router: %s", err)
// }
// router.AddHandlers(api.NoPrefix, builder.CardsAPI, builder.DashboardsAPI, builder.ChartsAPI)
// router.AddMiddlewares(builder.Auth.Middleware, builder.RateLimiter.Middleware, builder.AuditTrail.Middleware)
//
// server.Run(ctx, log, &cfg.HTTP, router)
//}

View file

@ -31,6 +31,7 @@ require (
github.com/oschwald/maxminddb-golang v1.13.1 github.com/oschwald/maxminddb-golang v1.13.1
github.com/pkg/errors v0.9.1 github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.20.5 github.com/prometheus/client_golang v1.20.5
github.com/redis/go-redis/v9 v9.8.0
github.com/rs/xid v1.6.0 github.com/rs/xid v1.6.0
github.com/sethvargo/go-envconfig v1.1.0 github.com/sethvargo/go-envconfig v1.1.0
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce
@ -45,6 +46,7 @@ require (
github.com/DataDog/zstd v1.5.6 // indirect github.com/DataDog/zstd v1.5.6 // indirect
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/elastic/elastic-transport-go/v8 v8.6.0 // indirect github.com/elastic/elastic-transport-go/v8 v8.6.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.8 // indirect github.com/gabriel-vasile/mimetype v1.4.8 // indirect
github.com/go-faster/city v1.0.1 // indirect github.com/go-faster/city v1.0.1 // indirect

View file

@ -19,12 +19,8 @@ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg6
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 h1:kYRSnvJju5gYVyhkij+RTJ/VR6QIUaCfWeaFm2ycsjQ= github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 h1:kYRSnvJju5gYVyhkij+RTJ/VR6QIUaCfWeaFm2ycsjQ=
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/ClickHouse/ch-go v0.63.1 h1:s2JyZvWLTCSAGdtjMBBmAgQQHMco6pawLJMOXi0FODM=
github.com/ClickHouse/ch-go v0.63.1/go.mod h1:I1kJJCL3WJcBMGe1m+HVK0+nREaG+JOYYBWjrDrF3R0=
github.com/ClickHouse/ch-go v0.65.0 h1:vZAXfTQliuNNefqkPDewX3kgRxN6Q4vUENnnY+ynTRY= github.com/ClickHouse/ch-go v0.65.0 h1:vZAXfTQliuNNefqkPDewX3kgRxN6Q4vUENnnY+ynTRY=
github.com/ClickHouse/ch-go v0.65.0/go.mod h1:tCM0XEH5oWngoi9Iu/8+tjPBo04I/FxNIffpdjtwx3k= github.com/ClickHouse/ch-go v0.65.0/go.mod h1:tCM0XEH5oWngoi9Iu/8+tjPBo04I/FxNIffpdjtwx3k=
github.com/ClickHouse/clickhouse-go/v2 v2.30.1 h1:Dy0n0l+cMbPXs8hFkeeWGaPKrB+MDByUNQBSmRO3W6k=
github.com/ClickHouse/clickhouse-go/v2 v2.30.1/go.mod h1:szk8BMoQV/NgHXZ20ZbwDyvPWmpfhRKjFkc6wzASGxM=
github.com/ClickHouse/clickhouse-go/v2 v2.32.1 h1:RLhkxA6iH/bLTXeDtEj/u4yUx9Q03Y95P+cjHScQK78= github.com/ClickHouse/clickhouse-go/v2 v2.32.1 h1:RLhkxA6iH/bLTXeDtEj/u4yUx9Q03Y95P+cjHScQK78=
github.com/ClickHouse/clickhouse-go/v2 v2.32.1/go.mod h1:YtaiIFlHCGNPbOpAvFGYobtcVnmgYvD/WmzitixxWYc= github.com/ClickHouse/clickhouse-go/v2 v2.32.1/go.mod h1:YtaiIFlHCGNPbOpAvFGYobtcVnmgYvD/WmzitixxWYc=
github.com/DataDog/datadog-api-client-go/v2 v2.34.0 h1:0VVmv8uZg8vdBuEpiF2nBGUezl2QITrxdEsLgh38j8M= github.com/DataDog/datadog-api-client-go/v2 v2.34.0 h1:0VVmv8uZg8vdBuEpiF2nBGUezl2QITrxdEsLgh38j8M=
@ -75,6 +71,10 @@ github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q=
github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
@ -122,6 +122,8 @@ github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/docker/buildx v0.15.1 h1:1cO6JIc0rOoC8tlxfXoh1HH1uxaNvYH1q7J7kv5enhw= github.com/docker/buildx v0.15.1 h1:1cO6JIc0rOoC8tlxfXoh1HH1uxaNvYH1q7J7kv5enhw=
@ -132,8 +134,8 @@ github.com/docker/compose/v2 v2.28.1 h1:ORPfiVHrpnRQBDoC3F8JJyWAY8N5gWuo3Fgwyivx
github.com/docker/compose/v2 v2.28.1/go.mod h1:wDtGQFHe99sPLCHXeVbCkc+Wsl4Y/2ZxiAJa/nga6rA= github.com/docker/compose/v2 v2.28.1/go.mod h1:wDtGQFHe99sPLCHXeVbCkc+Wsl4Y/2ZxiAJa/nga6rA=
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v27.4.1+incompatible h1:ZJvcY7gfwHn1JF48PfbyXg7Jyt9ZCWDW+GGXOIxEwp4= github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8=
github.com/docker/docker v27.4.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v27.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8= github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8=
github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40= github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40=
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0= github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0=
@ -449,6 +451,8 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/r3labs/sse v0.0.0-20210224172625-26fe804710bc h1:zAsgcP8MhzAbhMnB1QQ2O7ZhWYVGYSR2iVcjzQuPV+o= github.com/r3labs/sse v0.0.0-20210224172625-26fe804710bc h1:zAsgcP8MhzAbhMnB1QQ2O7ZhWYVGYSR2iVcjzQuPV+o=
github.com/r3labs/sse v0.0.0-20210224172625-26fe804710bc/go.mod h1:S8xSOnV3CgpNrWd0GQ/OoQfMtlg2uPRSuTzcSGrzwK8= github.com/r3labs/sse v0.0.0-20210224172625-26fe804710bc/go.mod h1:S8xSOnV3CgpNrWd0GQ/OoQfMtlg2uPRSuTzcSGrzwK8=
github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI=
github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
@ -571,8 +575,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkE
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I=
go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ=
go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE=
go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A=
go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU=
go.opentelemetry.io/otel/sdk/metric v1.21.0 h1:smhI5oD714d6jHE6Tie36fPx4WDFIg+Y6RfAY4ICcR0= go.opentelemetry.io/otel/sdk/metric v1.21.0 h1:smhI5oD714d6jHE6Tie36fPx4WDFIg+Y6RfAY4ICcR0=
go.opentelemetry.io/otel/sdk/metric v1.21.0/go.mod h1:FJ8RAsoPGv/wYMgBdUJXOm+6pzFY3YdljnXtv1SBE8Q= go.opentelemetry.io/otel/sdk/metric v1.21.0/go.mod h1:FJ8RAsoPGv/wYMgBdUJXOm+6pzFY3YdljnXtv1SBE8Q=
go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k=
@ -613,8 +617,6 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ=
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus=
golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M=
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o= golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o=
@ -639,8 +641,6 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8=
golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
@ -652,8 +652,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -675,8 +675,6 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
@ -685,8 +683,8 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU=
golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@ -696,8 +694,6 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=

View file

@ -108,15 +108,15 @@ func (c *connectorImpl) newBatch(name, query string) error {
var batches = map[string]string{ var batches = map[string]string{
"sessions": "INSERT INTO experimental.sessions (session_id, project_id, user_id, user_uuid, user_os, user_os_version, user_device, user_device_type, user_country, user_state, user_city, datetime, duration, pages_count, events_count, errors_count, issue_score, referrer, issue_types, tracker_version, user_browser, user_browser_version, metadata_1, metadata_2, metadata_3, metadata_4, metadata_5, metadata_6, metadata_7, metadata_8, metadata_9, metadata_10, platform, timezone, utm_source, utm_medium, utm_campaign) VALUES (?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), ?, ?, ?, ?, ?)", "sessions": "INSERT INTO experimental.sessions (session_id, project_id, user_id, user_uuid, user_os, user_os_version, user_device, user_device_type, user_country, user_state, user_city, datetime, duration, pages_count, events_count, errors_count, issue_score, referrer, issue_types, tracker_version, user_browser, user_browser_version, metadata_1, metadata_2, metadata_3, metadata_4, metadata_5, metadata_6, metadata_7, metadata_8, metadata_9, metadata_10, platform, timezone, utm_source, utm_medium, utm_campaign) VALUES (?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), ?, ?, ?, ?, ?)",
"autocompletes": "INSERT INTO experimental.autocomplete (project_id, type, value) VALUES (?, ?, SUBSTR(?, 1, 8000))", "autocompletes": "INSERT INTO experimental.autocomplete (project_id, type, value) VALUES (?, ?, SUBSTR(?, 1, 8000))",
"pages": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$current_url", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, "pages": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$os", "$browser", "$referrer", "$country", "$state", "$city", "$current_url", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
"clicks": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$current_url", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, "clicks": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$os", "$browser", "$referrer", "$country", "$state", "$city", "$current_url", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
"inputs": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$duration_s", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, "inputs": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$os", "$browser", "$referrer", "$country", "$state", "$city", "$current_url", "$duration_s", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
"errors": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", error_id, "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, "errors": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$os", "$browser", "$referrer", "$country", "$state", "$city", "$current_url", error_id, "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
"performance": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, "performance": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$os", "$browser", "$referrer", "$country", "$state", "$city", "$current_url", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
"requests": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$duration_s", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, "requests": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$os", "$browser", "$referrer", "$country", "$state", "$city", "$current_url", "$duration_s", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
"custom": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, "custom": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$os", "$browser", "$referrer", "$country", "$state", "$city", "$current_url", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
"graphql": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, "graphql": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$os", "$browser", "$referrer", "$country", "$state", "$city", "$current_url", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
"issuesEvents": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", issue_type, issue_id, "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, "issuesEvents": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$os", "$browser", "$referrer", "$country", "$state", "$city", "$current_url", issue_type, issue_id, "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
"issues": "INSERT INTO experimental.issues (project_id, issue_id, type, context_string) VALUES (?, ?, ?, ?)", "issues": "INSERT INTO experimental.issues (project_id, issue_id, type, context_string) VALUES (?, ?, ?, ?)",
"mobile_sessions": "INSERT INTO experimental.sessions (session_id, project_id, user_id, user_uuid, user_os, user_os_version, user_device, user_device_type, user_country, user_state, user_city, datetime, duration, pages_count, events_count, errors_count, issue_score, referrer, issue_types, tracker_version, user_browser, user_browser_version, metadata_1, metadata_2, metadata_3, metadata_4, metadata_5, metadata_6, metadata_7, metadata_8, metadata_9, metadata_10, platform, timezone) VALUES (?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), ?, ?)", "mobile_sessions": "INSERT INTO experimental.sessions (session_id, project_id, user_id, user_uuid, user_os, user_os_version, user_device, user_device_type, user_country, user_state, user_city, datetime, duration, pages_count, events_count, errors_count, issue_score, referrer, issue_types, tracker_version, user_browser, user_browser_version, metadata_1, metadata_2, metadata_3, metadata_4, metadata_5, metadata_6, metadata_7, metadata_8, metadata_9, metadata_10, platform, timezone) VALUES (?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), ?, ?)",
"mobile_custom": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, "mobile_custom": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
@ -267,6 +267,13 @@ func (c *connectorImpl) InsertWebInputDuration(session *sessions.Session, msg *m
true, true,
session.Platform, session.Platform,
session.UserOSVersion, session.UserOSVersion,
session.UserOS,
session.UserBrowser,
session.Referrer,
session.UserCountry,
session.UserState,
session.UserCity,
cropString(msg.Url),
nullableUint16(uint16(msg.InputDuration)), nullableUint16(uint16(msg.InputDuration)),
jsonString, jsonString,
); err != nil { ); err != nil {
@ -307,6 +314,13 @@ func (c *connectorImpl) InsertMouseThrashing(session *sessions.Session, msg *mes
true, true,
session.Platform, session.Platform,
session.UserOSVersion, session.UserOSVersion,
session.UserOS,
session.UserBrowser,
session.Referrer,
session.UserCountry,
session.UserState,
session.UserCity,
cropString(msg.Url),
"mouse_thrashing", "mouse_thrashing",
issueID, issueID,
jsonString, jsonString,
@ -363,6 +377,13 @@ func (c *connectorImpl) InsertIssue(session *sessions.Session, msg *messages.Iss
true, true,
session.Platform, session.Platform,
session.UserOSVersion, session.UserOSVersion,
session.UserOS,
session.UserBrowser,
session.Referrer,
session.UserCountry,
session.UserState,
session.UserCity,
cropString(msg.Url),
msg.Type, msg.Type,
issueID, issueID,
jsonString, jsonString,
@ -452,6 +473,12 @@ func (c *connectorImpl) InsertWebPageEvent(session *sessions.Session, msg *messa
true, true,
session.Platform, session.Platform,
session.UserOSVersion, session.UserOSVersion,
session.UserOS,
session.UserBrowser,
session.Referrer,
session.UserCountry,
session.UserState,
session.UserCity,
cropString(msg.URL), cropString(msg.URL),
jsonString, jsonString,
); err != nil { ); err != nil {
@ -512,6 +539,12 @@ func (c *connectorImpl) InsertWebClickEvent(session *sessions.Session, msg *mess
true, true,
session.Platform, session.Platform,
session.UserOSVersion, session.UserOSVersion,
session.UserOS,
session.UserBrowser,
session.Referrer,
session.UserCountry,
session.UserState,
session.UserCity,
cropString(msg.Url), cropString(msg.Url),
jsonString, jsonString,
); err != nil { ); err != nil {
@ -551,6 +584,13 @@ func (c *connectorImpl) InsertWebErrorEvent(session *sessions.Session, msg *type
true, true,
session.Platform, session.Platform,
session.UserOSVersion, session.UserOSVersion,
session.UserOS,
session.UserBrowser,
session.Referrer,
session.UserCountry,
session.UserState,
session.UserCity,
cropString(msg.Url),
msgID, msgID,
jsonString, jsonString,
); err != nil { ); err != nil {
@ -601,6 +641,13 @@ func (c *connectorImpl) InsertWebPerformanceTrackAggr(session *sessions.Session,
true, true,
session.Platform, session.Platform,
session.UserOSVersion, session.UserOSVersion,
session.UserOS,
session.UserBrowser,
session.Referrer,
session.UserCountry,
session.UserState,
session.UserCity,
cropString(msg.Url),
jsonString, jsonString,
); err != nil { ); err != nil {
c.checkError("performance", err) c.checkError("performance", err)
@ -652,6 +699,13 @@ func (c *connectorImpl) InsertRequest(session *sessions.Session, msg *messages.N
true, true,
session.Platform, session.Platform,
session.UserOSVersion, session.UserOSVersion,
session.UserOS,
session.UserBrowser,
session.Referrer,
session.UserCountry,
session.UserState,
session.UserCity,
cropString(msg.URL),
nullableUint16(uint16(msg.Duration)), nullableUint16(uint16(msg.Duration)),
jsonString, jsonString,
); err != nil { ); err != nil {
@ -683,6 +737,13 @@ func (c *connectorImpl) InsertCustom(session *sessions.Session, msg *messages.Cu
true, true,
session.Platform, session.Platform,
session.UserOSVersion, session.UserOSVersion,
session.UserOS,
session.UserBrowser,
session.Referrer,
session.UserCountry,
session.UserState,
session.UserCity,
cropString(msg.Url),
jsonString, jsonString,
); err != nil { ); err != nil {
c.checkError("custom", err) c.checkError("custom", err)
@ -714,6 +775,13 @@ func (c *connectorImpl) InsertGraphQL(session *sessions.Session, msg *messages.G
true, true,
session.Platform, session.Platform,
session.UserOSVersion, session.UserOSVersion,
session.UserOS,
session.UserBrowser,
session.Referrer,
session.UserCountry,
session.UserState,
session.UserCity,
cropString(msg.Url),
jsonString, jsonString,
); err != nil { ); err != nil {
c.checkError("graphql", err) c.checkError("graphql", err)

View file

@ -84,7 +84,10 @@ func (p *poolImpl) Begin() (*Tx, error) {
tx, err := p.conn.Begin(context.Background()) tx, err := p.conn.Begin(context.Background())
p.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "begin", "") p.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "begin", "")
p.metrics.IncreaseTotalRequests("begin", "") p.metrics.IncreaseTotalRequests("begin", "")
return &Tx{tx, p.metrics}, err return &Tx{
origTx: tx,
metrics: p.metrics,
}, err
} }
func (p *poolImpl) Close() { func (p *poolImpl) Close() {
@ -94,13 +97,13 @@ func (p *poolImpl) Close() {
// TX - start // TX - start
type Tx struct { type Tx struct {
pgx.Tx origTx pgx.Tx
metrics database.Database metrics database.Database
} }
func (tx *Tx) TxExec(sql string, args ...interface{}) error { func (tx *Tx) TxExec(sql string, args ...interface{}) error {
start := time.Now() start := time.Now()
_, err := tx.Exec(context.Background(), sql, args...) _, err := tx.origTx.Exec(context.Background(), sql, args...)
method, table := methodName(sql) method, table := methodName(sql)
tx.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table) tx.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
tx.metrics.IncreaseTotalRequests(method, table) tx.metrics.IncreaseTotalRequests(method, table)
@ -109,7 +112,7 @@ func (tx *Tx) TxExec(sql string, args ...interface{}) error {
func (tx *Tx) TxQueryRow(sql string, args ...interface{}) pgx.Row { func (tx *Tx) TxQueryRow(sql string, args ...interface{}) pgx.Row {
start := time.Now() start := time.Now()
res := tx.QueryRow(context.Background(), sql, args...) res := tx.origTx.QueryRow(context.Background(), sql, args...)
method, table := methodName(sql) method, table := methodName(sql)
tx.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table) tx.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
tx.metrics.IncreaseTotalRequests(method, table) tx.metrics.IncreaseTotalRequests(method, table)
@ -118,7 +121,7 @@ func (tx *Tx) TxQueryRow(sql string, args ...interface{}) pgx.Row {
func (tx *Tx) TxRollback() error { func (tx *Tx) TxRollback() error {
start := time.Now() start := time.Now()
err := tx.Rollback(context.Background()) err := tx.origTx.Rollback(context.Background())
tx.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "rollback", "") tx.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "rollback", "")
tx.metrics.IncreaseTotalRequests("rollback", "") tx.metrics.IncreaseTotalRequests("rollback", "")
return err return err
@ -126,7 +129,7 @@ func (tx *Tx) TxRollback() error {
func (tx *Tx) TxCommit() error { func (tx *Tx) TxCommit() error {
start := time.Now() start := time.Now()
err := tx.Commit(context.Background()) err := tx.origTx.Commit(context.Background())
tx.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "commit", "") tx.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "commit", "")
tx.metrics.IncreaseTotalRequests("commit", "") tx.metrics.IncreaseTotalRequests("commit", "")
return err return err

View file

@ -2,7 +2,9 @@ package redis
import ( import (
"errors" "errors"
"github.com/go-redis/redis"
"github.com/redis/go-redis/v9"
config "openreplay/backend/internal/config/redis" config "openreplay/backend/internal/config/redis"
) )

View file

@ -5,10 +5,11 @@ import (
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"fmt" "fmt"
"github.com/google/uuid"
"hash/fnv" "hash/fnv"
"strconv" "strconv"
"github.com/google/uuid"
. "openreplay/backend/pkg/messages" . "openreplay/backend/pkg/messages"
) )
@ -23,41 +24,7 @@ type ErrorEvent struct {
Payload string Payload string
Tags map[string]*string Tags map[string]*string
OriginType int OriginType int
} Url string
func unquote(s string) string {
if s[0] == '"' {
return s[1 : len(s)-1]
}
return s
}
func parseTags(tagsJSON string) (tags map[string]*string, err error) {
if len(tagsJSON) == 0 {
return nil, fmt.Errorf("empty tags")
}
if tagsJSON[0] == '[' {
var tagsArr []json.RawMessage
if err = json.Unmarshal([]byte(tagsJSON), &tagsArr); err != nil {
return
}
tags = make(map[string]*string)
for _, keyBts := range tagsArr {
tags[unquote(string(keyBts))] = nil
}
} else if tagsJSON[0] == '{' {
var tagsObj map[string]json.RawMessage
if err = json.Unmarshal([]byte(tagsJSON), &tagsObj); err != nil {
return
}
tags = make(map[string]*string)
for key, valBts := range tagsObj {
val := unquote(string(valBts))
tags[key] = &val
}
}
return
} }
func WrapJSException(m *JSException) (*ErrorEvent, error) { func WrapJSException(m *JSException) (*ErrorEvent, error) {
@ -69,6 +36,7 @@ func WrapJSException(m *JSException) (*ErrorEvent, error) {
Message: m.Message, Message: m.Message,
Payload: m.Payload, Payload: m.Payload,
OriginType: m.TypeID(), OriginType: m.TypeID(),
Url: m.Url,
}, nil }, nil
} }
@ -81,6 +49,7 @@ func WrapIntegrationEvent(m *IntegrationEvent) *ErrorEvent {
Message: m.Message, Message: m.Message,
Payload: m.Payload, Payload: m.Payload,
OriginType: m.TypeID(), OriginType: m.TypeID(),
Url: m.Url,
} }
} }

View file

@ -26,7 +26,11 @@ func New() Logger {
encoderConfig := zap.NewProductionEncoderConfig() encoderConfig := zap.NewProductionEncoderConfig()
encoderConfig.EncodeTime = zapcore.TimeEncoderOfLayout("2006-01-02 15:04:05.000") encoderConfig.EncodeTime = zapcore.TimeEncoderOfLayout("2006-01-02 15:04:05.000")
jsonEncoder := zapcore.NewJSONEncoder(encoderConfig) jsonEncoder := zapcore.NewJSONEncoder(encoderConfig)
core := zapcore.NewCore(jsonEncoder, zapcore.AddSync(os.Stdout), zap.InfoLevel) logLevel := zap.InfoLevel
if os.Getenv("DEBUG") == "true" {
logLevel = zap.DebugLevel
}
core := zapcore.NewCore(jsonEncoder, zapcore.AddSync(os.Stdout), logLevel)
baseLogger := zap.New(core, zap.AddCaller()) baseLogger := zap.New(core, zap.AddCaller())
logger := baseLogger.WithOptions(zap.AddCallerSkip(1)) logger := baseLogger.WithOptions(zap.AddCallerSkip(1))
customLogger := &loggerImpl{l: logger} customLogger := &loggerImpl{l: logger}

View file

@ -1,6 +1,7 @@
package projects package projects
import ( import (
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"time" "time"
@ -40,10 +41,10 @@ func (c *cacheImpl) Set(project *Project) error {
if err != nil { if err != nil {
return err return err
} }
if _, err = c.db.Redis.Set(fmt.Sprintf("project:id:%d", project.ProjectID), projectBytes, time.Minute*10).Result(); err != nil { if _, err = c.db.Redis.Set(context.Background(), fmt.Sprintf("project:id:%d", project.ProjectID), projectBytes, time.Minute*10).Result(); err != nil {
return err return err
} }
if _, err = c.db.Redis.Set(fmt.Sprintf("project:key:%s", project.ProjectKey), projectBytes, time.Minute*10).Result(); err != nil { if _, err = c.db.Redis.Set(context.Background(), fmt.Sprintf("project:key:%s", project.ProjectKey), projectBytes, time.Minute*10).Result(); err != nil {
return err return err
} }
c.metrics.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "set", "project") c.metrics.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "set", "project")
@ -56,7 +57,7 @@ func (c *cacheImpl) GetByID(projectID uint32) (*Project, error) {
return nil, ErrDisabledCache return nil, ErrDisabledCache
} }
start := time.Now() start := time.Now()
result, err := c.db.Redis.Get(fmt.Sprintf("project:id:%d", projectID)).Result() result, err := c.db.Redis.Get(context.Background(), fmt.Sprintf("project:id:%d", projectID)).Result()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -74,7 +75,7 @@ func (c *cacheImpl) GetByKey(projectKey string) (*Project, error) {
return nil, ErrDisabledCache return nil, ErrDisabledCache
} }
start := time.Now() start := time.Now()
result, err := c.db.Redis.Get(fmt.Sprintf("project:key:%s", projectKey)).Result() result, err := c.db.Redis.Get(context.Background(), fmt.Sprintf("project:key:%s", projectKey)).Result()
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -1,6 +1,7 @@
package redisstream package redisstream
import ( import (
"context"
"log" "log"
"net" "net"
"sort" "sort"
@ -8,8 +9,8 @@ import (
"strings" "strings"
"time" "time"
_redis "github.com/go-redis/redis"
"github.com/pkg/errors" "github.com/pkg/errors"
_redis "github.com/redis/go-redis/v9"
"openreplay/backend/pkg/messages" "openreplay/backend/pkg/messages"
"openreplay/backend/pkg/queue/types" "openreplay/backend/pkg/queue/types"
@ -38,7 +39,7 @@ func NewConsumer(group string, streams []string, messageIterator messages.Messag
log.Fatalln(err) log.Fatalln(err)
} }
for _, stream := range streams { for _, stream := range streams {
err := redis.XGroupCreateMkStream(stream, group, "0").Err() err := redis.XGroupCreateMkStream(context.Background(), stream, group, "0").Err()
if err != nil && err.Error() != "BUSYGROUP Consumer Group name already exists" { if err != nil && err.Error() != "BUSYGROUP Consumer Group name already exists" {
log.Fatalln(err) log.Fatalln(err)
} }
@ -75,7 +76,7 @@ func (c *Consumer) Rebalanced() <-chan *types.PartitionsRebalancedEvent {
func (c *Consumer) ConsumeNext() error { func (c *Consumer) ConsumeNext() error {
// MBTODO: read in go routine, send messages to channel // MBTODO: read in go routine, send messages to channel
res, err := c.redis.XReadGroup(&_redis.XReadGroupArgs{ res, err := c.redis.XReadGroup(context.Background(), &_redis.XReadGroupArgs{
Group: c.group, Group: c.group,
Consumer: c.group, Consumer: c.group,
Streams: c.streams, Streams: c.streams,
@ -115,7 +116,7 @@ func (c *Consumer) ConsumeNext() error {
bID := ts<<13 | (idx & 0x1FFF) // Max: 4096 messages/ms for 69 years bID := ts<<13 | (idx & 0x1FFF) // Max: 4096 messages/ms for 69 years
c.messageIterator.Iterate([]byte(valueString), messages.NewBatchInfo(sessionID, r.Stream, bID, 0, int64(ts))) c.messageIterator.Iterate([]byte(valueString), messages.NewBatchInfo(sessionID, r.Stream, bID, 0, int64(ts)))
if c.autoCommit { if c.autoCommit {
if err = c.redis.XAck(r.Stream, c.group, m.ID).Err(); err != nil { if err = c.redis.XAck(context.Background(), r.Stream, c.group, m.ID).Err(); err != nil {
return errors.Wrapf(err, "Acknoledgment error for messageID %v", m.ID) return errors.Wrapf(err, "Acknoledgment error for messageID %v", m.ID)
} }
} else { } else {
@ -134,7 +135,7 @@ func (c *Consumer) Commit() error {
if len(idsInfo.id) == 0 { if len(idsInfo.id) == 0 {
continue continue
} }
if err := c.redis.XAck(stream, c.group, idsInfo.id...).Err(); err != nil { if err := c.redis.XAck(context.Background(), stream, c.group, idsInfo.id...).Err(); err != nil {
return errors.Wrapf(err, "Redisstreams: Acknoledgment error on commit %v", err) return errors.Wrapf(err, "Redisstreams: Acknoledgment error on commit %v", err)
} }
c.idsPending[stream].id = nil c.idsPending[stream].id = nil
@ -156,7 +157,7 @@ func (c *Consumer) CommitBack(gap int64) error {
maxI := sort.Search(len(idsInfo.ts), func(i int) bool { maxI := sort.Search(len(idsInfo.ts), func(i int) bool {
return idsInfo.ts[i] > maxTs return idsInfo.ts[i] > maxTs
}) })
if err := c.redis.XAck(stream, c.group, idsInfo.id[:maxI]...).Err(); err != nil { if err := c.redis.XAck(context.Background(), stream, c.group, idsInfo.id[:maxI]...).Err(); err != nil {
return errors.Wrapf(err, "Redisstreams: Acknoledgment error on commit %v", err) return errors.Wrapf(err, "Redisstreams: Acknoledgment error on commit %v", err)
} }
c.idsPending[stream].id = idsInfo.id[maxI:] c.idsPending[stream].id = idsInfo.id[maxI:]

View file

@ -1,9 +1,11 @@
package redisstream package redisstream
import ( import (
"github.com/go-redis/redis" "context"
"log" "log"
"github.com/redis/go-redis/v9"
"openreplay/backend/pkg/env" "openreplay/backend/pkg/env"
) )
@ -30,10 +32,11 @@ func (p *Producer) Produce(topic string, key uint64, value []byte) error {
"sessionID": key, "sessionID": key,
"value": value, "value": value,
}, },
MaxLen: p.maxLenApprox,
} }
args.MaxLenApprox = p.maxLenApprox args.MaxLen = p.maxLenApprox
_, err := p.redis.XAdd(args).Result() _, err := p.redis.XAdd(context.Background(), args).Result()
if err != nil { if err != nil {
return err return err
} }

View file

@ -3,7 +3,8 @@ package redisstream
import ( import (
"regexp" "regexp"
"github.com/go-redis/redis" "github.com/docker/distribution/context"
"github.com/redis/go-redis/v9"
"openreplay/backend/pkg/env" "openreplay/backend/pkg/env"
) )
@ -28,7 +29,7 @@ func getRedisClient() (*redis.Client, error) {
} }
redisClient = redis.NewClient(options) redisClient = redis.NewClient(options)
if _, err := redisClient.Ping().Result(); err != nil { if _, err := redisClient.Ping(context.Background()).Result(); err != nil {
return nil, err return nil, err
} }
return redisClient, nil return redisClient, nil

View file

@ -13,7 +13,7 @@ func (e *routerImpl) health(w http.ResponseWriter, r *http.Request) {
func (e *routerImpl) healthMiddleware(next http.Handler) http.Handler { func (e *routerImpl) healthMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/" { if r.URL.Path == "/" || r.URL.Path == "/health" {
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)
return return
} }

View file

@ -135,11 +135,6 @@ func (e *handlersImpl) startSessionHandlerWeb(w http.ResponseWriter, r *http.Req
// Add tracker version to context // Add tracker version to context
r = r.WithContext(context.WithValue(r.Context(), "tracker", req.TrackerVersion)) r = r.WithContext(context.WithValue(r.Context(), "tracker", req.TrackerVersion))
if err := validateTrackerVersion(req.TrackerVersion); err != nil {
e.log.Error(r.Context(), "unsupported tracker version: %s, err: %s", req.TrackerVersion, err)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusUpgradeRequired, errors.New("please upgrade the tracker version"), startTime, r.URL.Path, bodySize)
return
}
// Handler's logic // Handler's logic
if req.ProjectKey == nil { if req.ProjectKey == nil {
@ -162,6 +157,13 @@ func (e *handlersImpl) startSessionHandlerWeb(w http.ResponseWriter, r *http.Req
// Add projectID to context // Add projectID to context
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", p.ProjectID))) r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", p.ProjectID)))
// Validate tracker version
if err := validateTrackerVersion(req.TrackerVersion); err != nil {
e.log.Error(r.Context(), "unsupported tracker version: %s, err: %s", req.TrackerVersion, err)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusUpgradeRequired, errors.New("please upgrade the tracker version"), startTime, r.URL.Path, bodySize)
return
}
// Check if the project supports mobile sessions // Check if the project supports mobile sessions
if !p.IsWeb() { if !p.IsWeb() {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusForbidden, errors.New("project doesn't support web sessions"), startTime, r.URL.Path, bodySize) e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusForbidden, errors.New("project doesn't support web sessions"), startTime, r.URL.Path, bodySize)

View file

@ -29,7 +29,7 @@ type Task struct {
Duration int Duration int
Status string Status string
Path string Path string
tx pool.Tx tx *pool.Tx
} }
func (t *Task) HasToTrim() bool { func (t *Task) HasToTrim() bool {
@ -65,7 +65,7 @@ func (t *tasksImpl) Get() (task *Task, err error) {
} }
}() }()
task = &Task{tx: pool.Tx{Tx: tx}} task = &Task{tx: tx}
sql := `SELECT spot_id, crop, duration FROM spots.tasks WHERE status = 'pending' ORDER BY added_time FOR UPDATE SKIP LOCKED LIMIT 1` sql := `SELECT spot_id, crop, duration FROM spots.tasks WHERE status = 'pending' ORDER BY added_time FOR UPDATE SKIP LOCKED LIMIT 1`
err = tx.TxQueryRow(sql).Scan(&task.SpotID, &task.Crop, &task.Duration) err = tx.TxQueryRow(sql).Scan(&task.SpotID, &task.Crop, &task.Duration)
if err != nil { if err != nil {

View file

@ -52,6 +52,7 @@ func NewTranscoder(cfg *spot.Config, log logger.Logger, objStorage objectstorage
tasks: NewTasks(conn), tasks: NewTasks(conn),
streams: NewStreams(log, conn, objStorage), streams: NewStreams(log, conn, objStorage),
spots: spots, spots: spots,
metrics: metrics,
} }
tnsc.prepareWorkers = workers.NewPool(2, 4, tnsc.prepare) tnsc.prepareWorkers = workers.NewPool(2, 4, tnsc.prepare)
tnsc.transcodeWorkers = workers.NewPool(2, 4, tnsc.transcode) tnsc.transcodeWorkers = workers.NewPool(2, 4, tnsc.transcode)

View file

@ -8,7 +8,6 @@ ignore:
- "**/*/build/**" - "**/*/build/**"
- "**/*/.test.*" - "**/*/.test.*"
- "**/*/version.ts" - "**/*/version.ts"
review: comment:
poem: false layout: "condensed_header, condensed_files, condensed_footer"
review_status: false hide_project_coverage: TRUE
collapse_walkthrough: true

9
ee/api/.gitignore vendored
View file

@ -223,11 +223,14 @@ Pipfile.lock
/chalicelib/core/sessions/performance_event.py /chalicelib/core/sessions/performance_event.py
/chalicelib/core/sessions/sessions_viewed/sessions_viewed.py /chalicelib/core/sessions/sessions_viewed/sessions_viewed.py
/chalicelib/core/sessions/unprocessed_sessions.py /chalicelib/core/sessions/unprocessed_sessions.py
/chalicelib/core/sessions/__init__.py
/chalicelib/core/sessions/sessions_legacy_mobil.py
/chalicelib/core/sessions/sessions_search_exp.py
/chalicelib/core/metrics/modules /chalicelib/core/metrics/modules
/chalicelib/core/socket_ios.py /chalicelib/core/socket_ios.py
/chalicelib/core/sourcemaps.py /chalicelib/core/sourcemaps
/chalicelib/core/sourcemaps_parser.py
/chalicelib/core/tags.py /chalicelib/core/tags.py
/chalicelib/core/product_analytics
/chalicelib/saml /chalicelib/saml
/chalicelib/utils/__init__.py /chalicelib/utils/__init__.py
/chalicelib/utils/args_transformer.py /chalicelib/utils/args_transformer.py
@ -290,3 +293,5 @@ Pipfile.lock
/chalicelib/core/errors/errors_ch.py /chalicelib/core/errors/errors_ch.py
/chalicelib/core/errors/errors_details.py /chalicelib/core/errors/errors_details.py
/chalicelib/utils/contextual_validators.py /chalicelib/utils/contextual_validators.py
/routers/subs/product_analytics.py
/schemas/product_analytics.py

View file

@ -6,25 +6,23 @@ name = "pypi"
[packages] [packages]
urllib3 = "==2.3.0" urllib3 = "==2.3.0"
requests = "==2.32.3" requests = "==2.32.3"
boto3 = "==1.36.12" boto3 = "==1.37.21"
pyjwt = "==2.10.1" pyjwt = "==2.10.1"
psycopg2-binary = "==2.9.10" psycopg2-binary = "==2.9.10"
psycopg = {extras = ["pool", "binary"], version = "==3.2.4"} psycopg = {extras = ["pool", "binary"], version = "==3.2.6"}
clickhouse-driver = {extras = ["lz4"], version = "==0.2.9"}
clickhouse-connect = "==0.8.15" clickhouse-connect = "==0.8.15"
elasticsearch = "==8.17.1" elasticsearch = "==8.17.2"
jira = "==3.8.0" jira = "==3.8.0"
cachetools = "==5.5.1" cachetools = "==5.5.2"
fastapi = "==0.115.8" fastapi = "==0.115.12"
uvicorn = {extras = ["standard"], version = "==0.34.0"} uvicorn = {extras = ["standard"], version = "==0.34.0"}
gunicorn = "==23.0.0" gunicorn = "==23.0.0"
python-decouple = "==3.8" python-decouple = "==3.8"
pydantic = {extras = ["email"], version = "==2.10.6"} pydantic = {extras = ["email"], version = "==2.10.6"}
apscheduler = "==3.11.0" apscheduler = "==3.11.0"
python3-saml = "==1.16.0"
python-multipart = "==0.0.20" python-multipart = "==0.0.20"
redis = "==5.2.1" redis = "==5.2.1"
azure-storage-blob = "==12.24.1" azure-storage-blob = "==12.25.0"
[dev-packages] [dev-packages]

View file

@ -21,7 +21,7 @@ from chalicelib.utils import pg_client, ch_client
from crons import core_crons, ee_crons, core_dynamic_crons from crons import core_crons, ee_crons, core_dynamic_crons
from routers import core, core_dynamic from routers import core, core_dynamic
from routers import ee from routers import ee
from routers.subs import insights, metrics, v1_api, health, usability_tests, spot, product_anaytics from routers.subs import insights, metrics, v1_api, health, usability_tests, spot, product_analytics
from routers.subs import v1_api_ee from routers.subs import v1_api_ee
if config("ENABLE_SSO", cast=bool, default=True): if config("ENABLE_SSO", cast=bool, default=True):
@ -150,9 +150,9 @@ app.include_router(spot.public_app)
app.include_router(spot.app) app.include_router(spot.app)
app.include_router(spot.app_apikey) app.include_router(spot.app_apikey)
app.include_router(product_anaytics.public_app) app.include_router(product_analytics.public_app, prefix="/ap")
app.include_router(product_anaytics.app) app.include_router(product_analytics.app, prefix="/ap")
app.include_router(product_anaytics.app_apikey) app.include_router(product_analytics.app_apikey, prefix="/ap")
if config("ENABLE_SSO", cast=bool, default=True): if config("ENABLE_SSO", cast=bool, default=True):
app.include_router(saml.public_app) app.include_router(saml.public_app)

View file

@ -1,17 +0,0 @@
import logging
from decouple import config
logger = logging.getLogger(__name__)
from . import sessions_pg
from . import sessions_pg as sessions_legacy
from . import sessions_ch
from . import sessions_search as sessions_search_legacy
if config("EXP_SESSIONS_SEARCH", cast=bool, default=False):
logger.info(">>> Using experimental sessions search")
from . import sessions_ch as sessions
from . import sessions_search_exp as sessions_search
else:
from . import sessions_pg as sessions
from . import sessions_search as sessions_search

View file

@ -927,12 +927,12 @@ def authenticate_sso(email: str, internal_id: str):
aud=AUDIENCE, jwt_jti=j_r.jwt_refresh_jti), aud=AUDIENCE, jwt_jti=j_r.jwt_refresh_jti),
"refreshTokenMaxAge": config("JWT_REFRESH_EXPIRATION", cast=int), "refreshTokenMaxAge": config("JWT_REFRESH_EXPIRATION", cast=int),
"spotJwt": authorizers.generate_jwt(user_id=r['userId'], tenant_id=r['tenantId'], "spotJwt": authorizers.generate_jwt(user_id=r['userId'], tenant_id=r['tenantId'],
iat=j_r.spot_jwt_iat, aud=spot.AUDIENCE), iat=j_r.spot_jwt_iat, aud=spot.AUDIENCE, for_spot=True),
"spotRefreshToken": authorizers.generate_jwt_refresh(user_id=r['userId'], "spotRefreshToken": authorizers.generate_jwt_refresh(user_id=r['userId'],
tenant_id=r['tenantId'], tenant_id=r['tenantId'],
iat=j_r.spot_jwt_refresh_iat, iat=j_r.spot_jwt_refresh_iat,
aud=spot.AUDIENCE, aud=spot.AUDIENCE,
jwt_jti=j_r.spot_jwt_refresh_jti), jwt_jti=j_r.spot_jwt_refresh_jti, for_spot=True),
"spotRefreshTokenMaxAge": config("JWT_SPOT_REFRESH_EXPIRATION", cast=int) "spotRefreshTokenMaxAge": config("JWT_SPOT_REFRESH_EXPIRATION", cast=int)
} }
return response return response

View file

@ -44,12 +44,15 @@ rm -rf ./chalicelib/core/sessions/sessions_search.py
rm -rf ./chalicelib/core/sessions/performance_event.py rm -rf ./chalicelib/core/sessions/performance_event.py
rm -rf ./chalicelib/core/sessions/sessions_viewed/sessions_viewed.py rm -rf ./chalicelib/core/sessions/sessions_viewed/sessions_viewed.py
rm -rf ./chalicelib/core/sessions/unprocessed_sessions.py rm -rf ./chalicelib/core/sessions/unprocessed_sessions.py
rm -rf ./chalicelib/core/sessions/__init__.py
rm -rf ./chalicelib/core/sessions/sessions_legacy_mobil.py
rm -rf ./chalicelib/core/sessions/sessions_search_exp.py
rm -rf ./chalicelib/core/metrics/modules rm -rf ./chalicelib/core/metrics/modules
rm -rf ./chalicelib/core/socket_ios.py rm -rf ./chalicelib/core/socket_ios.py
rm -rf ./chalicelib/core/sourcemaps.py rm -rf ./chalicelib/core/sourcemaps
rm -rf ./chalicelib/core/sourcemaps_parser.py
rm -rf ./chalicelib/core/user_testing.py rm -rf ./chalicelib/core/user_testing.py
rm -rf ./chalicelib/core/tags.py rm -rf ./chalicelib/core/tags.py
rm -rf ./chalicelib/core/product_analytics
rm -rf ./chalicelib/saml rm -rf ./chalicelib/saml
rm -rf ./chalicelib/utils/__init__.py rm -rf ./chalicelib/utils/__init__.py
rm -rf ./chalicelib/utils/args_transformer.py rm -rf ./chalicelib/utils/args_transformer.py
@ -110,3 +113,5 @@ rm -rf ./chalicelib/core/errors/errors_pg.py
rm -rf ./chalicelib/core/errors/errors_ch.py rm -rf ./chalicelib/core/errors/errors_ch.py
rm -rf ./chalicelib/core/errors/errors_details.py rm -rf ./chalicelib/core/errors/errors_details.py
rm -rf ./chalicelib/utils/contextual_validators.py rm -rf ./chalicelib/utils/contextual_validators.py
rm -rf ./routers/subs/product_analytics.py
rm -rf ./schemas/product_analytics.py

View file

@ -1,19 +1,18 @@
urllib3==2.3.0 urllib3==2.3.0
requests==2.32.3 requests==2.32.3
boto3==1.36.12 boto3==1.37.21
pyjwt==2.10.1 pyjwt==2.10.1
psycopg2-binary==2.9.10 psycopg2-binary==2.9.10
psycopg[pool,binary]==3.2.4 psycopg[pool,binary]==3.2.6
clickhouse-driver[lz4]==0.2.9
clickhouse-connect==0.8.15 clickhouse-connect==0.8.15
elasticsearch==8.17.1 elasticsearch==8.17.2
jira==3.8.0 jira==3.8.0
cachetools==5.5.1 cachetools==5.5.2
fastapi==0.115.8 fastapi==0.115.12
uvicorn[standard]==0.34.0 uvicorn[standard]==0.34.0
python-decouple==3.8 python-decouple==3.8
pydantic[email]==2.10.6 pydantic[email]==2.10.6
apscheduler==3.11.0 apscheduler==3.11.0
azure-storage-blob==12.24.1 azure-storage-blob==12.25.0

View file

@ -1,19 +1,18 @@
urllib3==2.3.0 urllib3==2.3.0
requests==2.32.3 requests==2.32.3
boto3==1.36.12 boto3==1.37.21
pyjwt==2.10.1 pyjwt==2.10.1
psycopg2-binary==2.9.10 psycopg2-binary==2.9.10
psycopg[pool,binary]==3.2.4 psycopg[pool,binary]==3.2.6
clickhouse-driver[lz4]==0.2.9
clickhouse-connect==0.8.15 clickhouse-connect==0.8.15
elasticsearch==8.17.1 elasticsearch==8.17.2
jira==3.8.0 jira==3.8.0
cachetools==5.5.1 cachetools==5.5.2
fastapi==0.115.8 fastapi==0.115.12
python-decouple==3.8 python-decouple==3.8
pydantic[email]==2.10.6 pydantic[email]==2.10.6
apscheduler==3.11.0 apscheduler==3.11.0
redis==5.2.1 redis==5.2.1
azure-storage-blob==12.24.1 azure-storage-blob==12.25.0

View file

@ -1,16 +1,15 @@
urllib3==2.3.0 urllib3==2.3.0
requests==2.32.3 requests==2.32.3
boto3==1.36.12 boto3==1.37.21
pyjwt==2.10.1 pyjwt==2.10.1
psycopg2-binary==2.9.10 psycopg2-binary==2.9.10
psycopg[pool,binary]==3.2.4 psycopg[pool,binary]==3.2.6
clickhouse-driver[lz4]==0.2.9
clickhouse-connect==0.8.15 clickhouse-connect==0.8.15
elasticsearch==8.17.1 elasticsearch==8.17.2
jira==3.8.0 jira==3.8.0
cachetools==5.5.1 cachetools==5.5.2
fastapi==0.115.8 fastapi==0.115.12
uvicorn[standard]==0.34.0 uvicorn[standard]==0.34.0
gunicorn==23.0.0 gunicorn==23.0.0
python-decouple==3.8 python-decouple==3.8
@ -19,10 +18,9 @@ apscheduler==3.11.0
# TODO: enable after xmlsec fix https://github.com/xmlsec/python-xmlsec/issues/252 # TODO: enable after xmlsec fix https://github.com/xmlsec/python-xmlsec/issues/252
#--no-binary is used to avoid libxml2 library version incompatibilities between xmlsec and lxml #--no-binary is used to avoid libxml2 library version incompatibilities between xmlsec and lxml
python3-saml==1.16.0
--no-binary=lxml
python-multipart==0.0.20 python-multipart==0.0.20
redis==5.2.1 redis==5.2.1
#confluent-kafka==2.1.0 #confluent-kafka==2.1.0
azure-storage-blob==12.24.1 azure-storage-blob==12.25.0

View file

@ -1,4 +1,5 @@
from .schemas import * from .schemas import *
from .schemas_ee import * from .schemas_ee import *
from .assist_stats_schema import * from .assist_stats_schema import *
from .product_analytics import *
from . import overrides as _overrides from . import overrides as _overrides

View file

@ -4,7 +4,7 @@ from pydantic import Field, EmailStr, field_validator, model_validator
from chalicelib.utils.TimeUTC import TimeUTC from chalicelib.utils.TimeUTC import TimeUTC
from . import schemas from . import schemas
from .overrides import BaseModel, Enum, ORUnion from .overrides import BaseModel, Enum
from .transformers_validators import remove_whitespace from .transformers_validators import remove_whitespace
@ -91,33 +91,6 @@ class TrailSearchPayloadSchema(schemas._PaginatedSchema):
return values return values
class SessionModel(BaseModel):
duration: int
errorsCount: int
eventsCount: int
favorite: bool = Field(default=False)
issueScore: int
issueTypes: List[schemas.IssueType] = Field(default=[])
metadata: dict = Field(default={})
pagesCount: int
platform: str
projectId: int
sessionId: str
startTs: int
timezone: Optional[str]
userAnonymousId: Optional[str]
userBrowser: str
userCity: str
userCountry: str
userDevice: Optional[str]
userDeviceType: str
userId: Optional[str]
userOs: str
userState: str
userUuid: str
viewed: bool = Field(default=False)
class AssistRecordUpdatePayloadSchema(BaseModel): class AssistRecordUpdatePayloadSchema(BaseModel):
name: str = Field(..., min_length=1) name: str = Field(..., min_length=1)
_transform_name = field_validator('name', mode="before")(remove_whitespace) _transform_name = field_validator('name', mode="before")(remove_whitespace)

16
ee/assist/.gitignore vendored
View file

@ -2,20 +2,4 @@
node_modules node_modules
npm-debug.log npm-debug.log
.cache .cache
test.html
build.sh
servers/peerjs-server.js
servers/sourcemaps-handler.js
servers/sourcemaps-server.js
/utils/geoIP.js
/utils/health.js
/utils/HeapSnapshot.js
/utils/helper.js
/utils/assistHelper.js
/utils/httpHandlers.js
/utils/socketHandlers.js
.local
*.mmdb *.mmdb

View file

@ -1,11 +1,8 @@
#ARCH can be amd64 or arm64
ARG ARCH=amd64 ARG ARCH=amd64
FROM --platform=linux/$ARCH node:23-alpine FROM --platform=linux/$ARCH node:23-alpine
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>" LABEL Maintainer="Zavorotynskiy Alexander <zavorotynskiy@pm.me>"
RUN apk add --no-cache tini git libc6-compat RUN apk add --no-cache tini git libc6-compat
# && ln -s /lib/libc.musl-x86_64.so.1 /lib/ld-linux-x86-64.so.2
ARG envarg ARG envarg
ENV ENTERPRISE_BUILD=${envarg} \ ENV ENTERPRISE_BUILD=${envarg} \
MAXMINDDB_FILE=/home/openreplay/geoip.mmdb \ MAXMINDDB_FILE=/home/openreplay/geoip.mmdb \

164
ee/assist/app/assist.js Normal file
View file

@ -0,0 +1,164 @@
const jwt = require('jsonwebtoken');
const uaParser = require('ua-parser-js');
const {geoip} = require('./geoIP');
const {logger} = require('./logger');
let PROJECT_KEY_LENGTH = parseInt(process.env.PROJECT_KEY_LENGTH) || 20;
const IDENTITIES = {agent: 'agent', session: 'session'};
const EVENTS_DEFINITION = {
listen: {
UPDATE_EVENT: "UPDATE_SESSION", // tab become active/inactive, page title change, changed session object (rare case), call start/end
CONNECT_ERROR: "connect_error",
CONNECT_FAILED: "connect_failed",
ERROR: "error"
},
//The following list of events will be only emitted by the server
server: {
UPDATE_SESSION: "SERVER_UPDATE_SESSION"
}
};
EVENTS_DEFINITION.emit = {
NEW_AGENT: "NEW_AGENT",
NO_AGENTS: "NO_AGENT",
AGENT_DISCONNECT: "AGENT_DISCONNECTED",
AGENTS_CONNECTED: "AGENTS_CONNECTED",
NO_SESSIONS: "SESSION_DISCONNECTED",
SESSION_ALREADY_CONNECTED: "SESSION_ALREADY_CONNECTED",
SESSION_RECONNECTED: "SESSION_RECONNECTED",
UPDATE_EVENT: EVENTS_DEFINITION.listen.UPDATE_EVENT
};
const BASE_sessionInfo = {
"pageTitle": "Page",
"active": false,
"sessionID": "0",
"metadata": {},
"userID": "",
"userUUID": "",
"projectKey": "",
"timestamp": 0,
"trackerVersion": "",
"userOs": "",
"userBrowser": "",
"userBrowserVersion": "",
"userDevice": "",
"userDeviceType": "",
"userCountry": "",
"userState": "",
"userCity": ""
};
const extractPeerId = (peerId) => {
const parts = peerId.split("-");
if (parts.length < 2 || parts.length > 3) {
logger.debug(`Invalid peerId format: ${peerId}`);
return {};
}
if (PROJECT_KEY_LENGTH > 0 && parts[0].length !== PROJECT_KEY_LENGTH) {
logger.debug(`Invalid project key length in peerId: ${peerId}`);
return {};
}
const [projectKey, sessionId, tabId = generateRandomTabId()] = parts;
return { projectKey, sessionId, tabId };
};
const generateRandomTabId = () => (Math.random() + 1).toString(36).substring(2);
function processPeerInfo(socket) {
socket._connectedAt = new Date();
const { projectKey, sessionId, tabId } = extractPeerId(socket.handshake.query.peerId || "");
Object.assign(socket.handshake.query, {
roomId: projectKey && sessionId ? `${projectKey}-${sessionId}` : null,
projectKey,
sessId: sessionId,
tabId
});
logger.debug(`Connection details: projectKey:${projectKey}, sessionId:${sessionId}, tabId:${tabId}, roomId:${socket.handshake.query.roomId}`);
}
/**
* extracts and populate socket with information
* @Param {socket} used socket
* */
const extractSessionInfo = function (socket) {
if (socket.handshake.query.sessionInfo !== undefined) {
logger.debug(`received headers: ${socket.handshake.headers}`);
socket.handshake.query.sessionInfo = JSON.parse(socket.handshake.query.sessionInfo);
socket.handshake.query.sessionInfo = {...BASE_sessionInfo, ...socket.handshake.query.sessionInfo};
let ua = uaParser(socket.handshake.headers['user-agent']);
socket.handshake.query.sessionInfo.userOs = ua.os.name || null;
socket.handshake.query.sessionInfo.userBrowser = ua.browser.name || null;
socket.handshake.query.sessionInfo.userBrowserVersion = ua.browser.version || null;
socket.handshake.query.sessionInfo.userDevice = ua.device.model || null;
socket.handshake.query.sessionInfo.userDeviceType = ua.device.type || 'desktop';
socket.handshake.query.sessionInfo.userCountry = null;
socket.handshake.query.sessionInfo.userState = null;
socket.handshake.query.sessionInfo.userCity = null;
if (geoip() !== null) {
logger.debug(`looking for location of ${socket.handshake.headers['x-forwarded-for'] || socket.handshake.address}`);
try {
let ip = socket.handshake.headers['x-forwarded-for'] || socket.handshake.address;
ip = ip.split(",")[0];
let info = geoip().city(ip);
socket.handshake.query.sessionInfo.userCountry = info.country.isoCode;
socket.handshake.query.sessionInfo.userCity = info.city.names.en;
socket.handshake.query.sessionInfo.userState = info.subdivisions.length > 0 ? info.subdivisions[0].names.en : null;
} catch (e) {
logger.debug(`geoip-country failed: ${e}`);
}
}
}
}
function errorHandler(listenerName, error) {
logger.error(`Error detected from ${listenerName}\n${error}`);
}
const JWT_TOKEN_PREFIX = "Bearer ";
function check(socket, next) {
if (socket.handshake.query.identity === IDENTITIES.session) {
return next();
}
if (socket.handshake.query.peerId && socket.handshake.auth && socket.handshake.auth.token) {
let token = socket.handshake.auth.token;
if (token.startsWith(JWT_TOKEN_PREFIX)) {
token = token.substring(JWT_TOKEN_PREFIX.length);
}
jwt.verify(token, process.env.ASSIST_JWT_SECRET, (err, decoded) => {
logger.debug(`JWT payload: ${decoded}`);
if (err) {
logger.debug(err);
return next(new Error('Authentication error'));
}
const {projectKey, sessionId} = extractPeerId(socket.handshake.query.peerId);
if (!projectKey || !sessionId) {
logger.debug(`Missing attribute: projectKey:${projectKey}, sessionId:${sessionId}`);
return next(new Error('Authentication error'));
}
if (String(projectKey) !== String(decoded.projectKey) || String(sessionId) !== String(decoded.sessionId)) {
logger.debug(`Trying to access projectKey:${projectKey} instead of ${decoded.projectKey} or
to sessionId:${sessionId} instead of ${decoded.sessionId}`);
return next(new Error('Authorization error'));
}
socket.decoded = decoded;
return next();
});
} else {
logger.debug(`something missing in handshake: ${socket.handshake}`);
return next(new Error('Authentication error'));
}
}
module.exports = {
processPeerInfo,
extractPeerId,
extractSessionInfo,
EVENTS_DEFINITION,
IDENTITIES,
errorHandler,
authorizer: {check}
};

194
ee/assist/app/cache.js Normal file
View file

@ -0,0 +1,194 @@
const {logger} = require('./logger');
const Redis = require("ioredis");
const crypto = require("crypto");
const { Mutex } = require("async-mutex");
const REDIS_URL = process.env.REDIS_URL || "localhost:6379";
const redisClient = new Redis(REDIS_URL);
redisClient.on("error", (error) => {
logger.error(`Redis cache error : ${error}`);
});
function generateNodeID() {
const buffer = crypto.randomBytes(8);
return "node_"+buffer.readBigUInt64BE(0).toString();
}
const batchSize = parseInt(process.env.REDIS_BATCH_SIZE) || 1000;
const PING_INTERVAL = parseInt(process.env.PING_INTERVAL_SECONDS) || 25;
const CACHE_REFRESH_INTERVAL = parseInt(process.env.CACHE_REFRESH_INTERVAL_SECONDS) || 5;
const pingInterval = Math.floor(PING_INTERVAL + PING_INTERVAL/2);
const cacheRefreshInterval = Math.floor(CACHE_REFRESH_INTERVAL * 4);
const cacheRefreshIntervalMs = CACHE_REFRESH_INTERVAL * 1000;
let lastCacheUpdateTime = 0;
let cacheRefresher = null;
const nodeID = process.env.HOSTNAME || generateNodeID();
const mutex = new Mutex();
const localCache = {
addedSessions: new Set(),
updatedSessions: new Set(),
refreshedSessions: new Set(),
deletedSessions: new Set()
};
const sendAssistEvent = async function (payload) {
try {
if (typeof payload !== "string") {
logger.warn("sendAssistEvent received non-string payload. Converting to string.");
payload = JSON.stringify(payload);
}
await redisClient.rpush("assist:stats", payload);
logger.debug("Assist event sent to Redis: " + payload);
} catch (error) {
logger.error(`Failed to send assist event to Redis: ${error}`);
}
};
const addSession = async function (sessionID) {
await mutex.runExclusive(() => {
localCache.addedSessions.add(sessionID);
});
}
const updateSession = async function (sessionID) {
await mutex.runExclusive(() => {
localCache.addedSessions.add(sessionID); // to update the session's cache
localCache.updatedSessions.add(sessionID); // to add sessionID to the list of recently updated sessions
});
}
const renewSession = async function (sessionID) {
await mutex.runExclusive(() => {
localCache.refreshedSessions.add(sessionID);
})
}
const removeSession = async function (sessionID) {
await mutex.runExclusive(() => {
localCache.deletedSessions.add(sessionID);
});
}
const updateNodeCache = async function (io) {
logger.debug('Background refresh triggered');
try {
const startTime = performance.now();
let currStepTs = performance.now();
const sessionIDs = new Set();
const result = await io.fetchSockets();
let toAdd = new Map();
let toUpdate = [];
let toRenew = [];
let toDelete = [];
await mutex.runExclusive(() => {
result.forEach((socket) => {
if (socket.handshake.query.sessId) {
const sessID = socket.handshake.query.sessId;
if (sessionIDs.has(sessID)) {
return;
}
sessionIDs.add(sessID);
if (localCache.addedSessions.has(sessID)) {
toAdd.set(sessID, socket.handshake.query.sessionInfo);
}
}
});
toUpdate = [...localCache.updatedSessions];
toRenew = [...localCache.refreshedSessions];
toDelete = [...localCache.deletedSessions];
// Clear the local cache
localCache.addedSessions.clear();
localCache.updatedSessions.clear();
localCache.refreshedSessions.clear();
localCache.deletedSessions.clear();
})
// insert new sessions in pipeline
const toAddArray = Array.from(toAdd.keys());
for (let i = 0; i < toAddArray.length; i += batchSize) {
const batch = toAddArray.slice(i, i + batchSize);
const pipeline = redisClient.pipeline();
for (const sessionID of batch) {
pipeline.set(`assist:online_sessions:${sessionID}`, JSON.stringify(toAdd.get(sessionID)), 'EX', pingInterval);
}
await pipeline.exec();
}
logger.info(`step 1 (toAdd) complete: ${(performance.now() - currStepTs).toFixed(2)}ms, ${toAddArray.length} sockets`);
currStepTs = performance.now();
// renew sessions in pipeline
for (let i = 0; i < toRenew.length; i += batchSize) {
const batch = toRenew.slice(i, i + batchSize);
const pipeline = redisClient.pipeline();
for (const sessionID of batch) {
pipeline.expire(`assist:online_sessions:${sessionID}`, pingInterval);
}
await pipeline.exec();
}
logger.info(`step 2 (toRenew) complete: ${(performance.now() - currStepTs).toFixed(2)}ms, ${toRenew.length} sockets`);
currStepTs = performance.now();
// delete sessions in pipeline
for (let i = 0; i < toDelete.length; i += batchSize) {
const batch = toDelete.slice(i, i + batchSize);
const pipeline = redisClient.pipeline();
for (const sessionID of batch) {
pipeline.del(`assist:online_sessions:${sessionID}`);
}
await pipeline.exec();
}
logger.info(`step 3 (toDelete) complete: ${(performance.now() - currStepTs).toFixed(2)}ms, ${toDelete.length} sockets`);
currStepTs = performance.now();
// add recently updated sessions
if (toUpdate.length > 0) {
await redisClient.sadd('assist:updated_sessions', toUpdate);
}
// store the node sessions
await redisClient.set(`assist:nodes:${nodeID}:sessions`, JSON.stringify(Array.from(sessionIDs)), 'EX', cacheRefreshInterval);
logger.info(`step 4 (full list + updated) complete: ${(performance.now() - currStepTs).toFixed(2)}ms, ${toUpdate.length} sockets`);
const duration = performance.now() - startTime;
logger.info(`Background refresh complete: ${duration.toFixed(2)}ms, ${result.length} sockets`);
} catch (error) {
logger.error(`Background refresh error: ${error}`);
}
}
let isFlushing = false;
function startCacheRefresher(io) {
if (cacheRefresher) clearInterval(cacheRefresher);
cacheRefresher = setInterval(async () => {
if (isFlushing) {
logger.warn("Skipping tick: flush in progress");
return;
}
const now = Date.now();
if (now - lastCacheUpdateTime < cacheRefreshIntervalMs) {
return;
}
isFlushing = true;
try {
await updateNodeCache(io);
lastCacheUpdateTime = Date.now();
} catch (err) {
logger.error(`Tick error: ${err}`);
} finally {
isFlushing = false;
}
}, cacheRefreshIntervalMs / 2);
}
module.exports = {
sendAssistEvent,
addSession,
updateSession,
renewSession,
removeSession,
startCacheRefresher,
}

21
ee/assist/app/geoIP.js Normal file
View file

@ -0,0 +1,21 @@
const geoip2Reader = require('@maxmind/geoip2-node').Reader;
const {logger} = require('./logger');
let geoip = null;
if (process.env.MAXMINDDB_FILE !== undefined) {
geoip2Reader.open(process.env.MAXMINDDB_FILE, {})
.then(reader => {
geoip = reader;
})
.catch(error => {
logger.error(`Error while opening the MAXMINDDB_FILE, err: ${error}`);
});
} else {
logger.error("!!! please provide a valid value for MAXMINDDB_FILE env var.");
}
module.exports = {
geoip: () => {
return geoip;
}
}

23
ee/assist/app/logger.js Normal file
View file

@ -0,0 +1,23 @@
const winston = require('winston');
const isDebugMode = process.env.debug === "1";
const logLevel = isDebugMode ? 'debug' : 'info';
const logger = winston.createLogger({
level: logLevel,
format: winston.format.combine(
winston.format.timestamp({
format: 'YYYY-MM-DD HH:mm:ss.SSS' // The same format as in backend services
}),
winston.format.errors({stack: true}),
winston.format.json()
),
defaultMeta: {service: process.env.SERVICE_NAME || 'assist'},
transports: [
new winston.transports.Console(),
],
});
module.exports = {
logger,
}

266
ee/assist/app/socket.js Normal file
View file

@ -0,0 +1,266 @@
const {
processPeerInfo,
IDENTITIES,
EVENTS_DEFINITION,
extractSessionInfo,
errorHandler
} = require("./assist");
const {
addSession,
updateSession,
renewSession,
removeSession
} = require('./cache');
const {
logger
} = require('./logger');
const {
startAssist,
endAssist,
handleEvent
} = require('./stats');
const deepMerge = require('@fastify/deepmerge')({all: true});
let io;
const setSocketIOServer = function (server) {
io = server;
}
function sendFrom(from, to, eventName, ...data) {
from.to(to).emit(eventName, ...data);
}
function sendTo(to, eventName, ...data) {
sendFrom(io, to, eventName, ...data);
}
const fetchSockets = async function (roomID) {
if (!io) {
return [];
}
try {
if (roomID) {
return await io.in(roomID).fetchSockets();
} else {
return await io.fetchSockets();
}
} catch (error) {
logger.error('Error fetching sockets:', error);
return [];
}
}
const findSessionSocketId = async (roomId, tabId) => {
let pickFirstSession = tabId === undefined;
const connected_sockets = await fetchSockets(roomId);
for (let socket of connected_sockets) {
if (socket.handshake.query.identity === IDENTITIES.session) {
if (pickFirstSession) {
return socket.id;
} else if (socket.handshake.query.tabId === tabId) {
return socket.id;
}
}
}
return null;
};
async function getRoomData(roomID) {
let tabsCount = 0, agentsCount = 0, tabIDs = [], agentIDs = [];
const connected_sockets = await fetchSockets(roomID);
if (connected_sockets.length > 0) {
for (let socket of connected_sockets) {
if (socket.handshake.query.identity === IDENTITIES.session) {
tabsCount++;
tabIDs.push(socket.handshake.query.tabId);
} else {
agentsCount++;
agentIDs.push(socket.id);
}
}
} else {
tabsCount = -1;
agentsCount = -1;
}
return {tabsCount, agentsCount, tabIDs, agentIDs};
}
async function onConnect(socket) {
logger.debug(`A new client:${socket.id}, Query:${JSON.stringify(socket.handshake.query)}`);
// Drop unknown socket.io connections
if (socket.handshake.query.identity === undefined || socket.handshake.query.peerId === undefined) {
logger.debug(`no identity or peerId, refusing connexion`);
return socket.disconnect();
} else if (socket.handshake.query.identity === IDENTITIES.session && socket.handshake.query.sessionInfo === undefined) {
logger.debug(`sessionInfo is undefined, refusing connexion`);
return socket.disconnect();
}
processPeerInfo(socket);
const {tabsCount, agentsCount, tabIDs, agentIDs} = await getRoomData(socket.handshake.query.roomId);
if (socket.handshake.query.identity === IDENTITIES.session) {
// Check if session with the same tabID already connected, if so, refuse new connexion
if (tabsCount > 0) {
for (let tab of tabIDs) {
if (tab === socket.handshake.query.tabId) {
logger.debug(`session already connected, refusing new connexion, peerId: ${socket.handshake.query.peerId}`);
sendTo(socket.id, EVENTS_DEFINITION.emit.SESSION_ALREADY_CONNECTED);
return socket.disconnect();
}
}
}
extractSessionInfo(socket);
if (tabsCount < 0) {
// New session creates new room
}
// Inform all connected agents about reconnected session
if (agentsCount > 0) {
logger.debug(`notifying new session about agent-existence`);
sendTo(socket.id, EVENTS_DEFINITION.emit.AGENTS_CONNECTED, agentIDs);
sendFrom(socket, socket.handshake.query.roomId, EVENTS_DEFINITION.emit.SESSION_RECONNECTED, socket.id);
}
} else if (tabsCount <= 0) {
logger.debug(`notifying new agent about no SESSIONS with peerId:${socket.handshake.query.peerId}`);
sendTo(socket.id, EVENTS_DEFINITION.emit.NO_SESSIONS);
}
await socket.join(socket.handshake.query.roomId);
logger.debug(`${socket.id} joined room:${socket.handshake.query.roomId}, as:${socket.handshake.query.identity}, connections:${agentsCount + tabsCount + 1}`)
// Add session to cache
if (socket.handshake.query.identity === IDENTITIES.session) {
await addSession(socket.handshake.query.sessId, socket.handshake.query.sessionInfo);
}
if (socket.handshake.query.identity === IDENTITIES.agent) {
if (socket.handshake.query.agentInfo !== undefined) {
socket.handshake.query.agentInfo = JSON.parse(socket.handshake.query.agentInfo);
socket.handshake.query.agentID = socket.handshake.query.agentInfo.id;
startAssist(socket, socket.handshake.query.agentID);
}
sendFrom(socket, socket.handshake.query.roomId, EVENTS_DEFINITION.emit.NEW_AGENT, socket.id, socket.handshake.query.agentInfo);
}
socket.conn.on("packet", (packet) => {
if (packet.type === 'pong') {
renewSession(socket.handshake.query.sessId);
}
});
// Set disconnect handler
socket.on('disconnect', () => onDisconnect(socket));
// Handle update event
socket.on(EVENTS_DEFINITION.listen.UPDATE_EVENT, (...args) => onUpdateEvent(socket, ...args));
// Handle webrtc events
socket.on(EVENTS_DEFINITION.listen.WEBRTC_AGENT_CALL, (...args) => onWebrtcAgentHandler(socket, ...args));
// Handle errors
socket.on(EVENTS_DEFINITION.listen.ERROR, err => errorHandler(EVENTS_DEFINITION.listen.ERROR, err));
socket.on(EVENTS_DEFINITION.listen.CONNECT_ERROR, err => errorHandler(EVENTS_DEFINITION.listen.CONNECT_ERROR, err));
socket.on(EVENTS_DEFINITION.listen.CONNECT_FAILED, err => errorHandler(EVENTS_DEFINITION.listen.CONNECT_FAILED, err));
// Handle all other events (usually dom's mutations and user's actions)
socket.onAny((eventName, ...args) => onAny(socket, eventName, ...args));
}
async function onDisconnect(socket) {
logger.debug(`${socket.id} disconnected from ${socket.handshake.query.roomId}`);
if (socket.handshake.query.identity === IDENTITIES.agent) {
endAssist(socket, socket.handshake.query.agentID);
sendFrom(socket, socket.handshake.query.roomId, EVENTS_DEFINITION.emit.AGENT_DISCONNECT, socket.id);
}
logger.debug("checking for number of connected agents and sessions");
let {tabsCount, agentsCount, tabIDs, agentIDs} = await getRoomData(socket.handshake.query.roomId);
if (tabsCount <= 0) {
await removeSession(socket.handshake.query.sessId);
}
if (tabsCount === -1 && agentsCount === -1) {
logger.debug(`room not found: ${socket.handshake.query.roomId}`);
return;
}
if (tabsCount === 0) {
logger.debug(`notifying everyone in ${socket.handshake.query.roomId} about no SESSIONS`);
sendFrom(socket, socket.handshake.query.roomId, EVENTS_DEFINITION.emit.NO_SESSIONS);
}
if (agentsCount === 0) {
logger.debug(`notifying everyone in ${socket.handshake.query.roomId} about no AGENTS`);
sendFrom(socket, socket.handshake.query.roomId, EVENTS_DEFINITION.emit.NO_AGENTS);
}
}
async function onUpdateEvent(socket, ...args) {
logger.debug(`${socket.id} sent update event.`);
if (socket.handshake.query.identity !== IDENTITIES.session) {
logger.debug('Ignoring update event.');
return
}
args[0] = updateSessionData(socket, args[0])
socket.handshake.query.sessionInfo = deepMerge(socket.handshake.query.sessionInfo, args[0]?.data, {tabId: args[0]?.meta?.tabId});
// update session cache
await updateSession(socket.handshake.query.sessId, socket.handshake.query.sessionInfo);
// Update sessionInfo for all agents in the room
const connected_sockets = await fetchSockets(socket.handshake.query.roomId);
for (let item of connected_sockets) {
if (item.handshake.query.identity === IDENTITIES.session && item.handshake.query.sessionInfo) {
item.handshake.query.sessionInfo = deepMerge(item.handshake.query.sessionInfo, args[0]?.data, {tabId: args[0]?.meta?.tabId});
} else if (item.handshake.query.identity === IDENTITIES.agent) {
sendFrom(socket, item.id, EVENTS_DEFINITION.emit.UPDATE_EVENT, args[0]);
}
}
}
async function onWebrtcAgentHandler(socket, ...args) {
if (socket.handshake.query.identity === IDENTITIES.agent) {
const agentIdToConnect = args[0]?.data?.toAgentId;
logger.debug(`${socket.id} sent webrtc event to agent:${agentIdToConnect}`);
if (agentIdToConnect && socket.handshake.sessionData.AGENTS_CONNECTED.includes(agentIdToConnect)) {
sendFrom(socket, agentIdToConnect, EVENTS_DEFINITION.listen.WEBRTC_AGENT_CALL, args[0]);
}
}
}
async function onAny(socket, eventName, ...args) {
if (Object.values(EVENTS_DEFINITION.listen).indexOf(eventName) >= 0) {
logger.debug(`received event:${eventName}, should be handled by another listener, stopping onAny.`);
return
}
args[0] = updateSessionData(socket, args[0])
if (socket.handshake.query.identity === IDENTITIES.session) {
logger.debug(`received event:${eventName}, from:${socket.handshake.query.identity}, sending message to room:${socket.handshake.query.roomId}`);
sendFrom(socket, socket.handshake.query.roomId, eventName, args[0]);
} else {
handleEvent(eventName, socket, args[0]);
logger.debug(`received event:${eventName}, from:${socket.handshake.query.identity}, sending message to session of room:${socket.handshake.query.roomId}`);
let socketId = await findSessionSocketId(socket.handshake.query.roomId, args[0]?.meta?.tabId);
if (socketId === null) {
logger.debug(`session not found for:${socket.handshake.query.roomId}`);
sendTo(socket.id, EVENTS_DEFINITION.emit.NO_SESSIONS);
} else {
logger.debug("message sent");
sendTo(socket.id, eventName, socket.id, args[0]);
}
}
}
// Back compatibility (add top layer with meta information)
function updateSessionData(socket, sessionData) {
if (sessionData?.meta === undefined && socket.handshake.query.identity === IDENTITIES.session) {
sessionData = {meta: {tabId: socket.handshake.query.tabId, version: 1}, data: sessionData};
}
return sessionData
}
module.exports = {
onConnect,
setSocketIOServer,
}

View file

@ -1,6 +1,5 @@
const statsHost = process.env.STATS_HOST || 'http://assist-stats-openreplay.app.svc.cluster.local:8000/events';
const authToken = process.env.STATS_AUTH_TOKEN || '';
const {logger} = require('./logger'); const {logger} = require('./logger');
const {sendAssistEvent} = require('./cache');
class InMemoryCache { class InMemoryCache {
constructor() { constructor() {
@ -26,32 +25,10 @@ class InMemoryCache {
const cache = new InMemoryCache(); const cache = new InMemoryCache();
async function postData(payload) {
let headers = {
'Content-Type': 'application/json'
};
if (authToken && authToken.trim() !== '') {
headers['Authorization'] = 'Bearer ' + authToken;
}
const options = {
method: 'POST',
body: JSON.stringify(payload),
headers: headers,
}
try {
const response = await fetch(statsHost, options)
const jsonResponse = await response.json();
logger.debug('JSON response', JSON.stringify(jsonResponse, null, 4))
} catch(err) {
logger.debug('ERROR', err);
}
}
function startAssist(socket, agentID) { function startAssist(socket, agentID) {
const tsNow = +new Date(); const tsNow = +new Date();
const eventID = `${socket.handshake.query.sessId}_${agentID}_assist_${tsNow}`; const eventID = `${socket.handshake.query.sessId}_${agentID}_assist_${tsNow}`;
void postData({ void sendAssistEvent({
"project_id": socket.handshake.query.projectId, "project_id": socket.handshake.query.projectId,
"session_id": socket.handshake.query.sessId, "session_id": socket.handshake.query.sessId,
"agent_id": agentID, "agent_id": agentID,
@ -72,7 +49,7 @@ function endAssist(socket, agentID) {
logger.debug(`have to skip assist_ended, no eventID in the cache, agentID: ${socket.handshake.query.agentID}, sessID: ${socket.handshake.query.sessId}, projID: ${socket.handshake.query.projectId}`); logger.debug(`have to skip assist_ended, no eventID in the cache, agentID: ${socket.handshake.query.agentID}, sessID: ${socket.handshake.query.sessId}, projID: ${socket.handshake.query.projectId}`);
return return
} }
void postData({ void sendAssistEvent({
"project_id": socket.handshake.query.projectId, "project_id": socket.handshake.query.projectId,
"session_id": socket.handshake.query.sessId, "session_id": socket.handshake.query.sessId,
"agent_id": agentID, "agent_id": agentID,
@ -90,7 +67,7 @@ function endAssist(socket, agentID) {
function startCall(socket, agentID) { function startCall(socket, agentID) {
const tsNow = +new Date(); const tsNow = +new Date();
const eventID = `${socket.handshake.query.sessId}_${agentID}_call_${tsNow}`; const eventID = `${socket.handshake.query.sessId}_${agentID}_call_${tsNow}`;
void postData({ void sendAssistEvent({
"project_id": socket.handshake.query.projectId, "project_id": socket.handshake.query.projectId,
"session_id": socket.handshake.query.sessId, "session_id": socket.handshake.query.sessId,
"agent_id": agentID, "agent_id": agentID,
@ -112,7 +89,7 @@ function endCall(socket, agentID) {
logger.debug(`have to skip s_call_ended, no eventID in the cache, agentID: ${agentID}, sessID: ${socket.handshake.query.sessId}, projID: ${socket.handshake.query.projectId}, time: ${tsNow}`); logger.debug(`have to skip s_call_ended, no eventID in the cache, agentID: ${agentID}, sessID: ${socket.handshake.query.sessId}, projID: ${socket.handshake.query.projectId}, time: ${tsNow}`);
return return
} }
void postData({ void sendAssistEvent({
"project_id": socket.handshake.query.projectId, "project_id": socket.handshake.query.projectId,
"session_id": socket.handshake.query.sessId, "session_id": socket.handshake.query.sessId,
"agent_id": agentID, "agent_id": agentID,
@ -129,7 +106,7 @@ function endCall(socket, agentID) {
function startControl(socket, agentID) { function startControl(socket, agentID) {
const tsNow = +new Date(); const tsNow = +new Date();
const eventID = `${socket.handshake.query.sessId}_${agentID}_control_${tsNow}`; const eventID = `${socket.handshake.query.sessId}_${agentID}_control_${tsNow}`;
void postData({ void sendAssistEvent({
"project_id": socket.handshake.query.projectId, "project_id": socket.handshake.query.projectId,
"session_id": socket.handshake.query.sessId, "session_id": socket.handshake.query.sessId,
"agent_id": agentID, "agent_id": agentID,
@ -150,7 +127,7 @@ function endControl(socket, agentID) {
logger.debug(`have to skip s_control_ended, no eventID in the cache, agentID: ${agentID}, sessID: ${socket.handshake.query.sessId}, projID: ${socket.handshake.query.projectId}, time: ${tsNow}`); logger.debug(`have to skip s_control_ended, no eventID in the cache, agentID: ${agentID}, sessID: ${socket.handshake.query.sessId}, projID: ${socket.handshake.query.projectId}, time: ${tsNow}`);
return return
} }
void postData({ void sendAssistEvent({
"project_id": socket.handshake.query.projectId, "project_id": socket.handshake.query.projectId,
"session_id": socket.handshake.query.sessId, "session_id": socket.handshake.query.sessId,
"agent_id": agentID, "agent_id": agentID,
@ -167,7 +144,7 @@ function endControl(socket, agentID) {
function startRecord(socket, agentID) { function startRecord(socket, agentID) {
const tsNow = +new Date(); const tsNow = +new Date();
const eventID = `${socket.handshake.query.sessId}_${agentID}_record_${tsNow}`; const eventID = `${socket.handshake.query.sessId}_${agentID}_record_${tsNow}`;
void postData({ void sendAssistEvent({
"project_id": socket.handshake.query.projectId, "project_id": socket.handshake.query.projectId,
"session_id": socket.handshake.query.sessId, "session_id": socket.handshake.query.sessId,
"agent_id": agentID, "agent_id": agentID,
@ -184,7 +161,7 @@ function startRecord(socket, agentID) {
function endRecord(socket, agentID) { function endRecord(socket, agentID) {
const tsNow = +new Date(); const tsNow = +new Date();
const eventID = cache.get(`${socket.sessId}_record`); const eventID = cache.get(`${socket.sessId}_record`);
void postData({ void sendAssistEvent({
"project_id": socket.handshake.query.projectId, "project_id": socket.handshake.query.projectId,
"session_id": socket.handshake.query.sessId, "session_id": socket.handshake.query.sessId,
"agent_id": agentID, "agent_id": agentID,

View file

@ -1,14 +0,0 @@
rm -rf ./utils/assistHelper.js
rm -rf ./utils/geoIP.js
rm -rf ./utils/health.js
rm -rf ./utils/HeapSnapshot.js
rm -rf ./utils/helper.js
rm -rf ./utils/httpHandlers.js
rm -rf ./utils/logger.js
rm -rf ./utils/metrics.js
rm -rf ./utils/socketHandlers.js
rm -rf servers/peerjs-server.js
rm -rf servers/sourcemaps-handler.js
rm -rf servers/sourcemaps-server.js
rm -rf build.sh

File diff suppressed because it is too large Load diff

View file

@ -1,33 +1,26 @@
{ {
"name": "assist-server", "name": "assist-server",
"version": "v1.22.0-ee", "version": "1.0.0",
"description": "assist server to get live sessions & sourcemaps reader to get stack trace", "description": "",
"main": "peerjs-server.js", "main": "index.js",
"scripts": { "scripts": {
"test": "echo \"Error: no test specified\" && exit 1", "test": "echo \"Error: no test specified\" && exit 1"
"start": "node server.js"
}, },
"repository": { "keywords": [],
"type": "git", "author": "",
"url": "git+https://github.com/openreplay/openreplay.git" "license": "ISC",
},
"author": "KRAIEM Taha Yassine <tahayk2@gmail.com>",
"license": "Elastic License 2.0 (ELv2)",
"bugs": {
"url": "https://github.com/openreplay/openreplay/issues"
},
"homepage": "https://github.com/openreplay/openreplay#readme",
"dependencies": { "dependencies": {
"@fastify/deepmerge": "^2.0.1", "@fastify/deepmerge": "^3.0.0",
"@maxmind/geoip2-node": "^4.2.0", "@maxmind/geoip2-node": "^6.0.0",
"@socket.io/redis-adapter": "^8.2.1", "async-mutex": "^0.5.0",
"express": "^4.21.1", "express": "^4.21.2",
"ioredis": "^5.6.1",
"jsonwebtoken": "^9.0.2", "jsonwebtoken": "^9.0.2",
"prom-client": "^15.0.0", "redis": "^4.7.0",
"redis": "^4.6.10", "socket.io": "^4.8.1",
"socket.io": "^4.8.0", "socket.io-client": "^4.8.1",
"ua-parser-js": "^1.0.37", "ua-parser-js": "^2.0.3",
"uWebSockets.js": "github:uNetworking/uWebSockets.js#v20.51.0", "uWebSockets.js": "github:uNetworking/uWebSockets.js#v20.51.0",
"winston": "^3.13.0" "winston": "^3.17.0"
} }
} }

View file

@ -1,2 +0,0 @@
#!/bin/bash
rsync -avr --exclude=".*" --exclude="node_modules" --ignore-existing ../../assist/* ./

View file

@ -1,6 +0,0 @@
#!/bin/bash
set -a
source .env
set +a
npm start

View file

@ -1,117 +1,64 @@
const dumps = require('./utils/HeapSnapshot'); const { App } = require('uWebSockets.js');
const {request_logger} = require('./utils/helper'); const { Server } = require('socket.io');
const express = require('express'); const { logger } = require("./app/logger");
const health = require("./utils/health"); const { authorizer } = require("./app/assist");
const assert = require('assert').strict; const { onConnect, setSocketIOServer } = require("./app/socket");
const register = require('./utils/metrics').register; const { startCacheRefresher } = require("./app/cache");
let socket;
if (process.env.redis === "true") {
socket = require("./servers/websocket-cluster");
} else {
socket = require("./servers/websocket");
}
const {logger} = require('./utils/logger');
health.healthApp.get('/metrics', async (req, res) => { const app = App();
try { const pingInterval = parseInt(process.env.PING_INTERVAL) || 25000;
res.set('Content-Type', register.contentType);
res.end(await register.metrics()); const getCompressionConfig = function () {
} catch (ex) { // WS: The theoretical overhead per socket is 19KB (11KB for compressor and 8KB for decompressor)
res.status(500).end(ex); let perMessageDeflate = false;
if (process.env.COMPRESSION === "true") {
logger.info(`WS compression: enabled`);
perMessageDeflate = {
zlibDeflateOptions: {
windowBits: 10,
memLevel: 1
},
zlibInflateOptions: {
windowBits: 10
}
}
} else {
logger.info(`WS compression: disabled`);
} }
return {
perMessageDeflate: perMessageDeflate,
clientNoContextTakeover: true
};
}
const io = new Server({
maxHttpBufferSize: (parseFloat(process.env.maxHttpBufferSize) || 5) * 1e6,
pingInterval: pingInterval, // Will use it for cache invalidation
cors: {
origin: "*", // Allow connections from any origin (for development)
methods: ["GET", "POST"],
credentials: true
},
path: '/socket',
...getCompressionConfig()
}); });
io.use(async (socket, next) => await authorizer.check(socket, next));
io.on('connection', (socket) => onConnect(socket));
io.attachApp(app);
setSocketIOServer(io);
const HOST = process.env.LISTEN_HOST || '0.0.0.0'; const HOST = process.env.LISTEN_HOST || '0.0.0.0';
const PORT = process.env.LISTEN_PORT || 9001; const PORT = parseInt(process.env.PORT) || 9001;
assert.ok(process.env.ASSIST_KEY, 'The "ASSIST_KEY" environment variable is required'); app.listen(PORT, (token) => {
const P_KEY = process.env.ASSIST_KEY; if (token) {
const PREFIX = process.env.PREFIX || process.env.prefix || `/assist`; console.log(`Server running at http://${HOST}:${PORT}`);
} else {
const heapdump = process.env.heapdump === "1"; console.log(`Failed to listen on port ${PORT}`);
if (process.env.uws !== "true") {
let wsapp = express();
wsapp.use(express.json());
wsapp.use(express.urlencoded({extended: true}));
wsapp.use(request_logger("[wsapp]"));
wsapp.get(['/', PREFIX, `${PREFIX}/`, `${PREFIX}/${P_KEY}`, `${PREFIX}/${P_KEY}/`], (req, res) => {
res.statusCode = 200;
res.end("ok!");
}
);
heapdump && wsapp.use(`${PREFIX}/${P_KEY}/heapdump`, dumps.router);
wsapp.use(`${PREFIX}/${P_KEY}`, socket.wsRouter);
wsapp.enable('trust proxy');
const wsserver = wsapp.listen(PORT, HOST, () => {
logger.info(`WS App listening on http://${HOST}:${PORT}`);
health.healthApp.listen(health.PORT, HOST, health.listen_cb);
});
socket.start(wsserver);
module.exports = {wsserver};
} else {
logger.info("Using uWebSocket");
const {App} = require("uWebSockets.js");
const uapp = new App();
const healthFn = (res, req) => {
res.writeStatus('200 OK').end('ok!');
} }
uapp.get('/', healthFn); });
uapp.get(PREFIX, healthFn); startCacheRefresher(io);
uapp.get(`${PREFIX}/`, healthFn);
uapp.get(`${PREFIX}/${P_KEY}`, healthFn);
uapp.get(`${PREFIX}/${P_KEY}/`, healthFn);
process.on('uncaughtException', err => {
/* Either onAborted or simply finished request */ logger.error(`Uncaught Exception: ${err}`);
const onAbortedOrFinishedResponse = function (res) { });
if (res.id === -1) {
logger.debug("ERROR! onAbortedOrFinishedResponse called twice for the same res!");
} else {
logger.debug('Stream was closed');
}
/* Mark this response already accounted for */
res.id = -1;
}
const uWrapper = function (fn) {
return (res, req) => {
res.id = 1;
req.startTs = performance.now(); // track request's start timestamp
req.method = req.getMethod();
res.onAborted(() => {
onAbortedOrFinishedResponse(res);
});
return fn(req, res);
}
}
uapp.get(`${PREFIX}/${P_KEY}/sockets-list/:projectKey/autocomplete`, uWrapper(socket.handlers.autocomplete));
uapp.get(`${PREFIX}/${P_KEY}/sockets-list/:projectKey/:sessionId`, uWrapper(socket.handlers.socketsListByProject));
uapp.get(`${PREFIX}/${P_KEY}/sockets-live/:projectKey/autocomplete`, uWrapper(socket.handlers.autocomplete));
uapp.get(`${PREFIX}/${P_KEY}/sockets-live/:projectKey`, uWrapper(socket.handlers.socketsLiveByProject));
uapp.post(`${PREFIX}/${P_KEY}/sockets-live/:projectKey`, uWrapper(socket.handlers.socketsLiveByProject));
uapp.get(`${PREFIX}/${P_KEY}/sockets-live/:projectKey/:sessionId`, uWrapper(socket.handlers.socketsLiveBySession));
socket.start(uapp);
uapp.listen(HOST, PORT, (token) => {
if (!token) {
logger.error("port already in use");
}
logger.info(`WS App listening on http://${HOST}:${PORT}`);
health.healthApp.listen(health.PORT, HOST, health.listen_cb);
});
process.on('uncaughtException', err => {
logger.error(`Uncaught Exception: ${err}`);
});
module.exports = {uapp};
}

View file

@ -1,64 +0,0 @@
const express = require('express');
const {
socketConnexionTimeout,
authorizer
} = require('../utils/assistHelper');
const {
createSocketIOServer
} = require('../utils/wsServer');
const {
onConnect
} = require('../utils/socketHandlers');
const {
socketsListByProject,
socketsLiveByProject,
socketsLiveBySession,
autocomplete
} = require('../utils/httpHandlers');
const {logger} = require('../utils/logger');
const {createAdapter} = require("@socket.io/redis-adapter");
const {createClient} = require("redis");
const REDIS_URL = (process.env.REDIS_URL || "localhost:6379").replace(/((^\w+:|^)\/\/|^)/, 'redis://');
const pubClient = createClient({url: REDIS_URL});
const subClient = pubClient.duplicate();
logger.info(`Using Redis: ${REDIS_URL}`);
const wsRouter = express.Router();
wsRouter.get(`/sockets-list/:projectKey/autocomplete`, autocomplete); // autocomplete
wsRouter.get(`/sockets-list/:projectKey/:sessionId`, socketsListByProject); // is_live
wsRouter.get(`/sockets-live/:projectKey/autocomplete`, autocomplete); // not using
wsRouter.get(`/sockets-live/:projectKey`, socketsLiveByProject);
wsRouter.post(`/sockets-live/:projectKey`, socketsLiveByProject); // assist search
wsRouter.get(`/sockets-live/:projectKey/:sessionId`, socketsLiveBySession); // session_exists, get_live_session_by_id
let io;
module.exports = {
wsRouter,
start: (server, prefix) => {
io = createSocketIOServer(server, prefix);
io.use(async (socket, next) => await authorizer.check(socket, next));
io.on('connection', (socket) => onConnect(socket));
logger.info("WS server started");
socketConnexionTimeout(io);
Promise.all([pubClient.connect(), subClient.connect()])
.then(() => {
io.adapter(createAdapter(pubClient, subClient,
{requestsTimeout: process.env.REDIS_REQUESTS_TIMEOUT || 5000}));
logger.info("> redis connected.");
})
.catch((err) => {
logger.error(`redis connection error: ${err}`);
process.exit(2);
});
},
handlers: {
socketsListByProject,
socketsLiveByProject,
socketsLiveBySession,
autocomplete
}
};

View file

@ -1,45 +0,0 @@
const express = require('express');
const {
socketConnexionTimeout,
authorizer
} = require('../utils/assistHelper');
const {
createSocketIOServer
} = require('../utils/wsServer');
const {
onConnect
} = require('../utils/socketHandlers');
const {
socketsListByProject,
socketsLiveByProject,
socketsLiveBySession,
autocomplete
} = require('../utils/httpHandlers');
const {logger} = require('../utils/logger');
const wsRouter = express.Router();
wsRouter.get(`/sockets-list/:projectKey/autocomplete`, autocomplete); // autocomplete
wsRouter.get(`/sockets-list/:projectKey/:sessionId`, socketsListByProject); // is_live
wsRouter.get(`/sockets-live/:projectKey/autocomplete`, autocomplete); // not using
wsRouter.get(`/sockets-live/:projectKey`, socketsLiveByProject);
wsRouter.post(`/sockets-live/:projectKey`, socketsLiveByProject); // assist search
wsRouter.get(`/sockets-live/:projectKey/:sessionId`, socketsLiveBySession); // session_exists, get_live_session_by_id
let io;
module.exports = {
wsRouter,
start: (server, prefix) => {
io = createSocketIOServer(server, prefix);
io.use(async (socket, next) => await authorizer.check(socket, next));
io.on('connection', (socket) => onConnect(socket));
logger.info("WS server started");
socketConnexionTimeout(io);
},
handlers: {
socketsListByProject,
socketsLiveByProject,
socketsLiveBySession,
autocomplete
}
};

View file

@ -1,13 +0,0 @@
const {
extractProjectKeyFromRequest,
extractSessionIdFromRequest,
extractPayloadFromRequest,
getAvailableRooms
} = require('../utils/helper-ee');
module.exports = {
extractProjectKeyFromRequest,
extractSessionIdFromRequest,
extractPayloadFromRequest,
getAvailableRooms
}

View file

@ -1,126 +0,0 @@
const uWS = require("uWebSockets.js");
const helper = require('./helper');
const {logger} = require('./logger');
const getBodyFromUWSResponse = async function (res) {
return new Promise(((resolve, reject) => {
let buffer;
res.onData((ab, isLast) => {
let chunk = Buffer.from(ab);
if (buffer) {
buffer = Buffer.concat([buffer, chunk]);
} else {
buffer = Buffer.concat([chunk]);
}
if (isLast) {
let json;
try {
json = JSON.parse(buffer);
} catch (e) {
console.error(e);
json = {};
}
resolve(json);
}
});
}));
}
const extractProjectKeyFromRequest = function (req) {
if (process.env.uws === "true") {
if (req.getParameter(0)) {
logger.debug(`[WS]where projectKey=${req.getParameter(0)}`);
return req.getParameter(0);
}
} else {
return helper.extractProjectKeyFromRequest(req);
}
return undefined;
}
const extractSessionIdFromRequest = function (req) {
if (process.env.uws === "true") {
if (req.getParameter(1)) {
logger.debug(`[WS]where projectKey=${req.getParameter(1)}`);
return req.getParameter(1);
}
} else {
return helper.extractSessionIdFromRequest(req);
}
return undefined;
}
const extractPayloadFromRequest = async function (req, res) {
let filters = {
"query": {},
"filter": {}
};
if (process.env.uws === "true") {
if (req.getQuery("q")) {
logger.debug(`[WS]where q=${req.getQuery("q")}`);
filters.query.value = req.getQuery("q");
}
if (req.getQuery("key")) {
logger.debug(`[WS]where key=${req.getQuery("key")}`);
filters.query.key = req.getQuery("key");
}
if (req.getQuery("userId")) {
logger.debug(`[WS]where userId=${req.getQuery("userId")}`);
filters.filter.userID = [req.getQuery("userId")];
}
if (!filters.query.value) {
let body = {};
if (req.getMethod() !== 'get') {
body = await getBodyFromUWSResponse(res);
}
filters = {
...filters,
"sort": {
"key": body.sort && body.sort.key ? body.sort.key : undefined,
"order": body.sort && body.sort.order === "DESC"
},
"pagination": {
"limit": body.pagination && body.pagination.limit ? body.pagination.limit : undefined,
"page": body.pagination && body.pagination.page ? body.pagination.page : undefined
}
}
filters.filter = {...filters.filter, ...(body.filter || {})};
}
} else {
return helper.extractPayloadFromRequest(req);
}
filters.filter = helper.objectToObjectOfArrays(filters.filter);
filters.filter = helper.transformFilters(filters.filter);
logger.debug("payload/filters:" + JSON.stringify(filters))
return Object.keys(filters).length > 0 ? filters : undefined;
}
const getAvailableRooms = async function (io) {
if (process.env.redis === "true") {
return io.of('/').adapter.allRooms();
} else {
return helper.getAvailableRooms(io);
}
}
const getCompressionConfig = function () {
if (process.env.uws !== "true") {
return helper.getCompressionConfig();
} else {
// uWS: The theoretical overhead per socket is 32KB (8KB for compressor and for 24KB decompressor)
if (process.env.COMPRESSION === "true") {
console.log(`uWS compression: enabled`);
return {
compression: uWS.DEDICATED_COMPRESSOR_8KB,
decompression: uWS.DEDICATED_DECOMPRESSOR_1KB
};
} else {
console.log(`uWS compression: disabled`);
return {};
}
}
}
module.exports = {
extractProjectKeyFromRequest,
extractSessionIdFromRequest,
extractPayloadFromRequest,
getCompressionConfig,
getAvailableRooms
};

View file

@ -1,94 +0,0 @@
const _io = require("socket.io");
const {getCompressionConfig} = require("./helper");
const {logger} = require('./logger');
let io;
const getServer = function () {
return io;
}
let redisClient;
const useRedis = process.env.redis === "true";
if (useRedis) {
const {createClient} = require("redis");
const REDIS_URL = (process.env.REDIS_URL || "localhost:6379").replace(/((^\w+:|^)\/\/|^)/, 'redis://');
redisClient = createClient({url: REDIS_URL});
redisClient.on("error", (error) => logger.error(`Redis error : ${error}`));
void redisClient.connect();
}
const processSocketsList = function (sockets) {
let res = []
for (let socket of sockets) {
let {handshake} = socket;
res.push({handshake});
}
return res
}
const doFetchAllSockets = async function () {
if (useRedis) {
try {
let cachedResult = await redisClient.get('fetchSocketsResult');
if (cachedResult) {
return JSON.parse(cachedResult);
}
let result = await io.fetchSockets();
let cachedString = JSON.stringify(processSocketsList(result));
await redisClient.set('fetchSocketsResult', cachedString, {EX: 5});
return result;
} catch (error) {
logger.error('Error setting value with expiration:', error);
}
}
return await io.fetchSockets();
}
const fetchSockets = async function (roomID) {
if (!io) {
return [];
}
if (!roomID) {
return await doFetchAllSockets();
}
return await io.in(roomID).fetchSockets();
}
const createSocketIOServer = function (server, prefix) {
if (io) {
return io;
}
if (process.env.uws !== "true") {
io = _io(server, {
maxHttpBufferSize: (parseFloat(process.env.maxHttpBufferSize) || 5) * 1e6,
cors: {
origin: "*",
methods: ["GET", "POST", "PUT"],
credentials: true
},
path: (prefix ? prefix : '') + '/socket',
...getCompressionConfig()
});
} else {
io = new _io.Server({
maxHttpBufferSize: (parseFloat(process.env.maxHttpBufferSize) || 5) * 1e6,
cors: {
origin: "*",
methods: ["GET", "POST", "PUT"],
credentials: true
},
path: (prefix ? prefix : '') + '/socket',
...getCompressionConfig()
});
io.attachApp(server);
}
return io;
}
module.exports = {
createSocketIOServer,
getServer,
fetchSockets,
}

View file

@ -0,0 +1,60 @@
package main
import (
"context"
assistConfig "openreplay/backend/internal/config/assist"
"openreplay/backend/pkg/assist"
"openreplay/backend/pkg/db/postgres/pool"
"openreplay/backend/pkg/db/redis"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/metrics"
databaseMetrics "openreplay/backend/pkg/metrics/database"
"openreplay/backend/pkg/metrics/web"
"openreplay/backend/pkg/server"
"openreplay/backend/pkg/server/api"
)
func main() {
ctx := context.Background()
log := logger.New()
cfg := assistConfig.New(log)
// Observability
webMetrics := web.New("assist")
dbMetric := databaseMetrics.New("assist")
metrics.New(log, append(webMetrics.List(), dbMetric.List()...))
if cfg.AssistKey == "" {
log.Fatal(ctx, "assist key is not set")
}
pgConn, err := pool.New(dbMetric, cfg.Postgres.String())
if err != nil {
log.Fatal(ctx, "can't init postgres connection: %s", err)
}
defer pgConn.Close()
redisClient, err := redis.New(&cfg.Redis)
if err != nil {
log.Fatal(ctx, "can't init redis connection: %s", err)
}
defer redisClient.Close()
prefix := api.NoPrefix
builder, err := assist.NewServiceBuilder(log, cfg, webMetrics, dbMetric, pgConn, redisClient)
if err != nil {
log.Fatal(ctx, "can't init services: %s", err)
}
defer func() {
builder.AssistStats.Stop()
}()
router, err := api.NewRouter(&cfg.HTTP, log)
if err != nil {
log.Fatal(ctx, "failed while creating router: %s", err)
}
router.AddHandlers(prefix, builder.AssistAPI)
router.AddMiddlewares(builder.RateLimiter.Middleware)
server.Run(ctx, log, &cfg.HTTP, router)
}

View file

@ -0,0 +1,30 @@
package assist
import (
"time"
"openreplay/backend/internal/config/common"
"openreplay/backend/internal/config/configurator"
"openreplay/backend/internal/config/redis"
"openreplay/backend/pkg/env"
"openreplay/backend/pkg/logger"
)
type Config struct {
common.Config
common.Postgres
redis.Redis
common.HTTP
ProjectExpiration time.Duration `env:"PROJECT_EXPIRATION,default=10m"`
AssistKey string `env:"ASSIST_KEY"`
CacheTTL time.Duration `env:"REDIS_CACHE_TTL,default=5s"`
BatchSize int `env:"REDIS_BATCH_SIZE,default=1000"`
ScanSize int64 `env:"REDIS_SCAN_SIZE,default=1000"`
WorkerID uint16
}
func New(log logger.Logger) *Config {
cfg := &Config{WorkerID: env.WorkerID()}
configurator.Process(log, cfg)
return cfg
}

View file

@ -0,0 +1,207 @@
package api
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"time"
"github.com/gorilla/mux"
assistAPI "openreplay/backend/internal/config/assist"
"openreplay/backend/pkg/assist/service"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/server/api"
"openreplay/backend/pkg/sessionmanager"
)
type handlersImpl struct {
cfg *assistAPI.Config
log logger.Logger
responser *api.Responser
jsonSizeLimit int64
assist service.Assist
}
func NewHandlers(log logger.Logger, cfg *assistAPI.Config, responser *api.Responser, assist service.Assist) (api.Handlers, error) {
return &handlersImpl{
cfg: cfg,
log: log,
responser: responser,
jsonSizeLimit: cfg.JsonSizeLimit,
assist: assist,
}, nil
}
func (e *handlersImpl) GetAll() []*api.Description {
keyPrefix := "/assist"
if e.cfg.AssistKey != "" {
keyPrefix = fmt.Sprintf("/assist/%s", e.cfg.AssistKey)
}
return []*api.Description{
{keyPrefix + "/sockets-list/{projectKey}/autocomplete", e.autocomplete, "GET"}, // event search with live=true
{keyPrefix + "/sockets-list/{projectKey}/{sessionId}", e.socketsListByProject, "GET"}, // is_live for getReplay call
{keyPrefix + "/sockets-live/{projectKey}", e.socketsLiveByProject, "POST"}, // handler /{projectId}/assist/sessions for co-browser
{keyPrefix + "/sockets-live/{projectKey}/{sessionId}", e.socketsLiveBySession, "GET"}, // for get_live_session (with data) and for session_exists
{"/v1/ping", e.ping, "GET"},
}
}
func (e *handlersImpl) ping(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}
func getProjectKey(r *http.Request) (string, error) {
vars := mux.Vars(r)
key := vars["projectKey"]
if key == "" {
return "", fmt.Errorf("empty project key")
}
return key, nil
}
func getSessionID(r *http.Request) (string, error) {
vars := mux.Vars(r)
key := vars["sessionId"]
if key == "" {
return "", fmt.Errorf("empty session ID")
}
return key, nil
}
func getQuery(r *http.Request) (*service.Query, error) {
params := r.URL.Query()
q := &service.Query{
Key: params.Get("key"),
Value: params.Get("q"),
}
if q.Key == "" || q.Value == "" {
return nil, fmt.Errorf("empty key or value")
}
return q, nil
}
func (e *handlersImpl) autocomplete(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
projectKey, err := getProjectKey(r)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
query, err := getQuery(r)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
resp, err := e.assist.Autocomplete(projectKey, query)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
response := map[string]interface{}{
"data": resp,
}
e.responser.ResponseWithJSON(e.log, r.Context(), w, response, startTime, r.URL.Path, bodySize)
}
func (e *handlersImpl) socketsListByProject(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
projectKey, err := getProjectKey(r)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
sessionID, err := getSessionID(r)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
resp, err := e.assist.GetByID(projectKey, sessionID)
if err != nil {
if errors.Is(err, sessionmanager.ErrSessionNotFound) {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusNotFound, err, startTime, r.URL.Path, bodySize)
} else if errors.Is(err, sessionmanager.ErrSessionNotBelongToProject) {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusForbidden, err, startTime, r.URL.Path, bodySize)
} else {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
}
return
}
response := map[string]interface{}{
"data": resp,
}
e.responser.ResponseWithJSON(e.log, r.Context(), w, response, startTime, r.URL.Path, bodySize)
}
func (e *handlersImpl) socketsLiveByProject(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
projectKey, err := getProjectKey(r)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
bodyBytes, err := api.ReadBody(e.log, w, r, e.jsonSizeLimit)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
return
}
e.log.Debug(context.Background(), "bodyBytes: %s", bodyBytes)
bodySize = len(bodyBytes)
req := &service.Request{}
if err := json.Unmarshal(bodyBytes, req); err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
resp, err := e.assist.GetAll(projectKey, req)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
response := map[string]interface{}{
"data": resp,
}
e.responser.ResponseWithJSON(e.log, r.Context(), w, response, startTime, r.URL.Path, bodySize)
}
func (e *handlersImpl) socketsLiveBySession(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
projectKey, err := getProjectKey(r)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
sessionID, err := getSessionID(r)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
resp, err := e.assist.GetByID(projectKey, sessionID)
if err != nil {
if errors.Is(err, sessionmanager.ErrSessionNotFound) {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusNotFound, err, startTime, r.URL.Path, bodySize)
} else if errors.Is(err, sessionmanager.ErrSessionNotBelongToProject) {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusForbidden, err, startTime, r.URL.Path, bodySize)
} else {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
}
return
}
response := map[string]interface{}{
"data": resp,
}
e.responser.ResponseWithJSON(e.log, r.Context(), w, response, startTime, r.URL.Path, bodySize)
}

View file

@ -0,0 +1,48 @@
package assist
import (
"time"
"openreplay/backend/internal/config/assist"
assistAPI "openreplay/backend/pkg/assist/api"
"openreplay/backend/pkg/assist/service"
"openreplay/backend/pkg/db/postgres/pool"
"openreplay/backend/pkg/db/redis"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/metrics/database"
"openreplay/backend/pkg/metrics/web"
"openreplay/backend/pkg/projects"
"openreplay/backend/pkg/server/api"
"openreplay/backend/pkg/server/limiter"
"openreplay/backend/pkg/sessionmanager"
)
type ServicesBuilder struct {
RateLimiter *limiter.UserRateLimiter
AssistAPI api.Handlers
AssistStats service.AssistStats
}
func NewServiceBuilder(log logger.Logger, cfg *assist.Config, webMetrics web.Web, dbMetrics database.Database, pgconn pool.Pool, redis *redis.Client) (*ServicesBuilder, error) {
projectsManager := projects.New(log, pgconn, redis, dbMetrics)
sessManager, err := sessionmanager.New(log, cfg, redis.Redis)
if err != nil {
return nil, err
}
sessManager.Start()
assistStats, err := service.NewAssistStats(log, pgconn, redis.Redis)
if err != nil {
return nil, err
}
assistManager := service.NewAssist(log, pgconn, projectsManager, sessManager)
responser := api.NewResponser(webMetrics)
handlers, err := assistAPI.NewHandlers(log, cfg, responser, assistManager)
if err != nil {
return nil, err
}
return &ServicesBuilder{
RateLimiter: limiter.NewUserRateLimiter(10, 30, 1*time.Minute, 5*time.Minute),
AssistAPI: handlers,
AssistStats: assistStats,
}, nil
}

View file

@ -0,0 +1,119 @@
package service
import (
"fmt"
"strconv"
"openreplay/backend/pkg/db/postgres/pool"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/projects"
"openreplay/backend/pkg/sessionmanager"
)
type assistImpl struct {
log logger.Logger
pgconn pool.Pool
projects projects.Projects
sessions sessionmanager.SessionManager
}
type Assist interface {
Autocomplete(projectKey string, query *Query) (interface{}, error)
IsLive(projectKey, sessionID string) (bool, error)
GetAll(projectKey string, filters *Request) (interface{}, error)
GetByID(projectKey, sessionID string) (interface{}, error)
}
func NewAssist(log logger.Logger, pgconn pool.Pool, projects projects.Projects, sessions sessionmanager.SessionManager) Assist {
return &assistImpl{
log: log,
pgconn: pgconn,
projects: projects,
sessions: sessions,
}
}
func (a *assistImpl) Autocomplete(projectKey string, query *Query) (interface{}, error) {
switch {
case projectKey == "":
return nil, fmt.Errorf("project key is required")
case query == nil:
return nil, fmt.Errorf("query is required")
case query.Key == "":
return nil, fmt.Errorf("query key is required")
case query.Value == "":
return nil, fmt.Errorf("query value is required")
}
project, err := a.projects.GetProjectByKey(projectKey)
if err != nil {
return nil, fmt.Errorf("failed to get project by key: %s", err)
}
return a.sessions.Autocomplete(strconv.Itoa(int(project.ProjectID)), sessionmanager.FilterType(query.Key), query.Value)
}
func (a *assistImpl) IsLive(projectKey, sessionID string) (bool, error) {
switch {
case projectKey == "":
return false, fmt.Errorf("project key is required")
case sessionID == "":
return false, fmt.Errorf("session ID is required")
}
project, err := a.projects.GetProjectByKey(projectKey)
if err != nil {
return false, fmt.Errorf("failed to get project by key: %s", err)
}
sess, err := a.sessions.GetByID(strconv.Itoa(int(project.ProjectID)), sessionID)
if err != nil {
return false, fmt.Errorf("failed to get session by ID: %s", err)
}
return sess != nil, nil
}
func (a *assistImpl) GetAll(projectKey string, request *Request) (interface{}, error) {
switch {
case projectKey == "":
return nil, fmt.Errorf("project key is required")
case request == nil:
return nil, fmt.Errorf("filters are required")
}
project, err := a.projects.GetProjectByKey(projectKey)
if err != nil {
return nil, fmt.Errorf("failed to get project by key: %s", err)
}
order := sessionmanager.Asc
if request.Sort.Order == "DESC" {
order = sessionmanager.Desc
}
filters := make([]*sessionmanager.Filter, 0, len(request.Filters))
for name, f := range request.Filters {
filters = append(filters, &sessionmanager.Filter{
Type: sessionmanager.FilterType(name),
Value: f.Value,
Operator: f.Operator == "is",
})
}
sessions, total, counter, err := a.sessions.GetAll(strconv.Itoa(int(project.ProjectID)), filters, order, request.Pagination.Page, request.Pagination.Limit)
if err != nil {
return nil, fmt.Errorf("failed to get sessions: %s", err)
}
resp := map[string]interface{}{
"total": total,
"counter": counter,
"sessions": sessions,
}
return resp, nil
}
func (a *assistImpl) GetByID(projectKey, sessionID string) (interface{}, error) {
switch {
case projectKey == "":
return nil, fmt.Errorf("project key is required")
case sessionID == "":
return nil, fmt.Errorf("session ID is required")
}
project, err := a.projects.GetProjectByKey(projectKey)
if err != nil {
return nil, fmt.Errorf("failed to get project by key: %s", err)
}
return a.sessions.GetByID(strconv.Itoa(int(project.ProjectID)), sessionID)
}

View file

@ -0,0 +1,27 @@
package service
type Query struct {
Key string
Value string
}
type Filter struct {
Value []string `json:"values"`
Operator string `json:"operator"` // is|contains
}
type Pagination struct {
Limit int `json:"limit"`
Page int `json:"page"`
}
type Sort struct {
Key string `json:"key"` // useless
Order string `json:"order"` // [ASC|DESC]
}
type Request struct {
Filters map[string]Filter `json:"filter"`
Pagination Pagination `json:"pagination"`
Sort Sort `json:"sort"`
}

View file

@ -0,0 +1,126 @@
package service
import (
"context"
"encoding/json"
"errors"
"time"
"github.com/redis/go-redis/v9"
"openreplay/backend/pkg/db/postgres/pool"
"openreplay/backend/pkg/logger"
)
type assistStatsImpl struct {
log logger.Logger
pgClient pool.Pool
redisClient *redis.Client
ticker *time.Ticker
stopChan chan struct{}
}
type AssistStats interface {
Stop()
}
func NewAssistStats(log logger.Logger, pgClient pool.Pool, redisClient *redis.Client) (AssistStats, error) {
switch {
case log == nil:
return nil, errors.New("logger is empty")
case pgClient == nil:
return nil, errors.New("pg client is empty")
case redisClient == nil:
return nil, errors.New("redis client is empty")
}
stats := &assistStatsImpl{
log: log,
pgClient: pgClient,
redisClient: redisClient,
ticker: time.NewTicker(time.Minute),
stopChan: make(chan struct{}),
}
stats.init()
return stats, nil
}
func (as *assistStatsImpl) init() {
as.log.Debug(context.Background(), "Starting assist stats")
go func() {
for {
select {
case <-as.ticker.C:
as.loadData()
case <-as.stopChan:
as.log.Debug(context.Background(), "Stopping assist stats")
return
}
}
}()
}
type AssistStatsEvent struct {
ProjectID uint32 `json:"project_id"`
SessionID string `json:"session_id"`
AgentID string `json:"agent_id"`
EventID string `json:"event_id"`
EventType string `json:"event_type"`
EventState string `json:"event_state"`
Timestamp int64 `json:"timestamp"`
}
func (as *assistStatsImpl) loadData() {
ctx := context.Background()
events, err := as.redisClient.LPopCount(ctx, "assist:stats", 1000).Result()
if err != nil {
if errors.Is(err, redis.Nil) {
as.log.Debug(ctx, "No data to load from redis")
} else {
as.log.Error(ctx, "Failed to load data from redis: ", err)
}
return
}
if len(events) == 0 {
as.log.Debug(ctx, "No data to load from redis")
return
}
as.log.Debug(ctx, "Loaded %d events from redis", len(events))
for _, event := range events {
e := &AssistStatsEvent{}
err := json.Unmarshal([]byte(event), &e)
if err != nil {
as.log.Error(ctx, "Failed to unmarshal event: ", err)
continue
}
switch e.EventType {
case "start":
err = as.insertEvent(e)
case "end":
err = as.updateEvent(e)
default:
as.log.Warn(ctx, "Unknown event type: %s", e.EventType)
}
if err != nil {
as.log.Error(ctx, "Failed to process event: ", err)
continue
}
}
}
func (as *assistStatsImpl) insertEvent(event *AssistStatsEvent) error {
insertQuery := `INSERT INTO assist_events (event_id, project_id, session_id, agent_id, event_type, timestamp) VALUES ($1, $2, $3, $4, $5, $6) ON CONFLICT (event_id) DO NOTHING`
return as.pgClient.Exec(insertQuery, event.EventID, event.ProjectID, event.SessionID, event.AgentID, event.EventType, event.Timestamp)
}
func (as *assistStatsImpl) updateEvent(event *AssistStatsEvent) error {
updateQuery := `UPDATE assist_events SET duration = $1 - timestamp WHERE event_id = $2`
return as.pgClient.Exec(updateQuery, event.Timestamp, event.EventID)
}
func (as *assistStatsImpl) Stop() {
close(as.stopChan)
as.ticker.Stop()
}

View file

@ -4,7 +4,8 @@ import (
"errors" "errors"
"strings" "strings"
"github.com/go-redis/redis" "github.com/docker/distribution/context"
"github.com/redis/go-redis/v9"
config "openreplay/backend/internal/config/redis" config "openreplay/backend/internal/config/redis"
) )
@ -33,7 +34,7 @@ func New(cfg *config.Redis) (*Client, error) {
return nil, err return nil, err
} }
client := redis.NewClient(options) client := redis.NewClient(options)
if _, err := client.Ping().Result(); err != nil { if _, err := client.Ping(context.Background()).Result(); err != nil {
return nil, err return nil, err
} }
return &Client{ return &Client{

Some files were not shown because too many files have changed in this diff Show more