Compare commits

...
Sign in to create a new pull request.

155 commits

Author SHA1 Message Date
nick-delirium
3ee33391bc
fix message reader 2024-07-09 10:51:33 +02:00
rjshrjndrn
d215161565 chore(helm): Updating frontend image release 2024-05-30 12:40:38 +02:00
Shekar Siri
819616267a fix(ui): remove starting underscore from the metada which were added to avoid coflicting with existing filter keys 2024-05-30 11:40:38 +02:00
rjshrjndrn
82131627af chore(db): Patch image
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-05-29 18:45:38 +02:00
Alexander
4c12c9ed53 feat(backend): put path+query parts into url_path column 2024-05-29 18:40:38 +02:00
Mehdi Osman
1cc498807d
Increment frontend chart version (#2223)
Co-authored-by: GitHub Action <action@github.com>
2024-05-29 18:04:08 +02:00
Delirium
0edc130db0
fix ui: jumpto param fix (#2222) 2024-05-29 17:55:48 +02:00
Mehdi Osman
fc995408a6
Increment frontend chart version (#2220)
Co-authored-by: GitHub Action <action@github.com>
2024-05-29 12:40:18 +02:00
Delirium
ec666fb23f
fix ui: fix jump to time func (#2218) 2024-05-29 12:33:56 +02:00
rjshrjndrn
15442db2a8 fix(assist): remove hardcoded arch
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-05-28 18:15:35 +02:00
Mehdi Osman
37f957a0b6
Increment assist chart version (#2213)
Co-authored-by: GitHub Action <action@github.com>
2024-05-27 18:34:33 +02:00
rjshrjndrn
a97b23a9a3 chore(build): Fix build arm
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-05-27 18:27:54 +02:00
Mehdi Osman
9f0bc868ed
Increment assist chart version (#2210)
Co-authored-by: GitHub Action <action@github.com>
2024-05-27 16:25:07 +02:00
Alexander
80af24f91a
feat(assist): count a search filters for full match results only (#2209) 2024-05-27 16:00:46 +02:00
rjshrjndrn
3a943d8396 chore(helm): Adding connector chart
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-05-24 12:10:58 +02:00
Alexander
a6fdd478e1 feat(connector): fields order fix 2024-05-23 18:44:35 +02:00
Alexander
5f021e48b7
feat(connector): added new events + fixed schema naming (#2201) 2024-05-23 12:34:42 +02:00
rjshrjndrn
013e492020 fix(actions): Always sort tags using name
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-05-23 11:26:12 +02:00
rjshrjndrn
56656fd567 chore(cli): Exposing OR backups systemwide
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-05-23 11:09:19 +02:00
Alexander
336a745db9 feat(connector): removed elastic option 2024-05-22 16:47:13 +02:00
Alexander
22ff45f058 feat(connector): fixed mobile naming 2024-05-22 16:35:39 +02:00
Alexander
7a7fd44c3b
New S3 connector (#2199)
* feat(connector): added s3 connector + small improvements

* feat(connector): added missing import
2024-05-22 15:58:13 +02:00
Mehdi Osman
0507a00acf
Increment chalice chart version (#2193)
Co-authored-by: GitHub Action <action@github.com>
2024-05-15 20:44:55 +02:00
Kraiem Taha Yassine
5b5df8bf32
fix(chalice): fixed funnels (#2192)
fix(chalice): fixed permissions
2024-05-15 20:39:25 +02:00
Mehdi Osman
b68750caeb
Increment frontend chart version (#2189)
Co-authored-by: GitHub Action <action@github.com>
2024-05-10 18:38:31 +02:00
Shekar Siri
fe158e7ba5
Change UI minor (#2188)
* change(ui): pagination input width

* fix ui: fix usability test description check

---------

Co-authored-by: nick-delirium <nikita@openreplay.com>
2024-05-10 18:30:34 +02:00
Alexander
2401449a4a
feat(assest): use original content-encoding for uploaded to s3/azure assets (#2178) 2024-05-07 18:13:25 +02:00
Alexander
f68aff93d2
feat(peers): disabled peers debug level (#2176) 2024-05-07 15:55:14 +02:00
Mehdi Osman
406342dcf9
Increment frontend chart version (#2172)
Co-authored-by: GitHub Action <action@github.com>
2024-05-06 14:39:31 +02:00
Delirium
a7b34c41be
fix ui: fix EFS indexes check for reader (#2171) 2024-05-06 14:34:39 +02:00
rjshrjndrn
24f4bf6414 chore(helm): Remove hardcoded ingress host
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-05-06 12:17:42 +02:00
rjshrjndrn
e35a284b1a fix(helm): allow ingress class to be overriden
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-05-03 23:48:30 +02:00
Mehdi Osman
6d7d7bae09
Increment chalice chart version (#2167)
Co-authored-by: GitHub Action <action@github.com>
2024-05-03 18:52:14 +02:00
Kraiem Taha Yassine
c3f07b5f05
fix(chalice): fixed insights with platform filter (#2166) 2024-05-03 18:46:19 +02:00
Mehdi Osman
33526498dd
Increment chalice chart version (#2165)
Co-authored-by: GitHub Action <action@github.com>
2024-05-03 18:12:17 +02:00
Kraiem Taha Yassine
21324125c7
fix(chalice): fixed table of error with platform filter for EE-PG (#2164) 2024-05-03 18:07:36 +02:00
Mehdi Osman
525a43eb29
Increment chalice chart version (#2162)
Co-authored-by: GitHub Action <action@github.com>
2024-05-02 18:27:26 +02:00
Kraiem Taha Yassine
0295fb26cb
fix(chalice): fixed table of error with platform filter (#2161) 2024-05-02 18:18:47 +02:00
Mehdi Osman
7090bfc06e
Increment chalice chart version (#2160)
Co-authored-by: GitHub Action <action@github.com>
2024-05-02 16:48:42 +02:00
Kraiem Taha Yassine
11850cd27f
Revert "fix(chalice): changed SSO (#2157)" (#2159)
This reverts commit 28df42132f.
2024-05-02 16:42:17 +02:00
Mehdi Osman
09739bcb01
Increment chalice chart version (#2158)
Co-authored-by: GitHub Action <action@github.com>
2024-05-02 16:01:38 +02:00
Kraiem Taha Yassine
28df42132f
fix(chalice): changed SSO (#2157) 2024-05-02 15:52:25 +02:00
Mehdi Osman
acf2ea7a8d
Increment chalice chart version (#2156)
Co-authored-by: GitHub Action <action@github.com>
2024-05-02 14:14:09 +02:00
Kraiem Taha Yassine
046e850a65
fix(chalice): changed SSO and added logs (#2155) 2024-05-02 14:02:04 +02:00
Alexander
7eb3e208fb
feat(assist): added filter's counter (#2153) 2024-04-30 15:56:43 +02:00
Mehdi Osman
372caf6c7c
Increment storage chart version (#2133)
Co-authored-by: GitHub Action <action@github.com>
2024-04-24 17:39:12 +02:00
Alexander
6f3a440511
feat(backend): use zstd as a default compression algo (#2132) 2024-04-24 17:12:29 +02:00
Alexander
fcc0195528
feat(backend): added projects filter to connector logic (#2130) 2024-04-24 16:45:52 +02:00
Kraiem Taha Yassine
905953f899
fix(dependencies): added missing dependency (#2129) 2024-04-24 16:44:38 +02:00
Alexander
ec09a6de5b
fix(backend): fixed connection issue (#2125) 2024-04-24 13:09:24 +02:00
Alexander
dfd2b3a6de
fix(backend): added missing ch import (#2123) 2024-04-23 16:25:16 +02:00
Alexander
5022688c5f
feat(backend): fixed imports issue (#2120) 2024-04-23 10:16:54 +02:00
Alexander
22a3dc9f8e
feat(backend): patched redshift connector (#2117) 2024-04-22 13:34:23 +02:00
Mehdi Osman
5a6969e1eb
Increment frontend chart version (#2114)
Co-authored-by: GitHub Action <action@github.com>
2024-04-19 18:21:01 +02:00
Shekar Siri
1fe2e5afe3
fix(ui): include credentials for fetch request to address the refresh token issue (#2113) 2024-04-19 18:16:19 +02:00
Mehdi Osman
e8ccfbb2b3
Increment chalice chart version (#2106)
Co-authored-by: GitHub Action <action@github.com>
2024-04-18 15:40:37 +02:00
Kraiem Taha Yassine
0c5f8b4d20
fix(chalice): deduplicate viewed-sessions (#2105) 2024-04-18 15:31:59 +02:00
Mehdi Osman
f1ba8f03f4
Increment chalice chart version (#2097)
Co-authored-by: GitHub Action <action@github.com>
2024-04-16 15:18:11 +02:00
Kraiem Taha Yassine
256971304d
fix(chalice): fixed update JIRA integration (#2096)
fix(chalice): fixed JIRA URL validation
fix(chalice): fixed add/update JIRA token
2024-04-16 15:13:24 +02:00
Mehdi Osman
38a04bc6c9
Increment chalice chart version (#2093)
Co-authored-by: GitHub Action <action@github.com>
2024-04-16 13:51:56 +02:00
Mehdi Osman
ff190e7315
Increment chalice chart version (#2089)
Co-authored-by: GitHub Action <action@github.com>
2024-04-15 19:03:45 +02:00
Kraiem Taha Yassine
e721ee6fc5
fix(chalice): fixed clickmap events filter (#2088) 2024-04-15 19:01:15 +02:00
rjshrjndrn
6a142db59f chore(ci): Always rebase on main
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-04-10 12:52:49 +02:00
Mehdi Osman
0f26fcf3e4
Increment chalice chart version (#2070)
Co-authored-by: GitHub Action <action@github.com>
2024-04-09 18:08:50 +02:00
Mehdi Osman
f0488edf83
Updated patch build from main 41318269f7 (#2069)
* chore(buil): Cherrypicking build script
* fix(chalice): fixed mouse_thrashing title (#2014)
* fix(chalice): fixed vault with exp_search
* refactor(chalice): enhanced CH exception handler
* fix(chalice): fixed table of URLs-values not filtered according to the specified sessions' filters CH (#2055)
* fix(chalice): fixed cards-table error (#2057)
* fix(chalice): fixed 1 stage results VS multi-stages result (#2060)
* fix(chalice): fixed funnels negative-filter's operators (#2061)
* refactor(chalice): changed JWT_REFRESH_EXPIRATION default value (#2062)
* refactor(chalice): delete global session notes (#2064)
* fix(chalice): support issues step-filters and tab-filters at the same… (#2065)
* fix(chalice): support issues step-filters and tab-filters at the same time
* Increment chalice chart version

---------

Co-authored-by: Taha Yassine Kraiem <tahayk2@gmail.com>
Co-authored-by: GitHub Action <action@github.com>
2024-04-09 16:56:56 +02:00
rjshrjndrn
a5df7ffb90 fix(ci): Build ee
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-04-08 17:53:41 +02:00
rjshrjndrn
e2f120b77f chore(ci): Adding debug logs
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-04-08 17:37:34 +02:00
rjshrjndrn
b39a2dbaa9 chore(buil): Cherrypicking build script
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-04-08 17:16:05 +02:00
Mehdi Osman
65c497f902
Updated patch build from main ae1a49fd91 (#2059)
* change(ui): fullView param to restrict to player content

* Increment frontend chart version

---------

Co-authored-by: Shekar Siri <sshekarsiri@gmail.com>
Co-authored-by: GitHub Action <action@github.com>
2024-04-08 17:01:40 +02:00
rjshrjndrn
ebf5c1f49c fix(ci): build image tags
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-04-06 15:05:13 +02:00
rjshrjndrn
31f2fe6d48 fix(patch): proper commit id
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-04-06 14:59:07 +02:00
rjshrjndrn
fe4a38d6fa ci(patch): Adding follow up job trigger in description
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-04-06 13:10:44 +02:00
Rajesh Rajendran
b16fd7ffff actions: build patch for main branch (#2047)
* ci(actions): For GH patching

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* ci(actions): Pushing the changed code to a new branch

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* chore(ci): Skipping bulds for chalice and frontend arm builds

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* ci(actions): Build msaas

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* ci(actions): Removed unnecessary steps

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* ci(actions): Proper name

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* fix(ci): Sevice names

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* ci(actions): Fixes

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* actions: limit actor

* chore(release): Updated version to v1.18.0

* Enable AWS ecr auth

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* fix(ci): fixes

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* ci(patch): Update tag with main

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* chore(ci): Remove debug job

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

---------

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-04-06 13:05:41 +02:00
rjshrjndrn
11406d4dbf chore(helm): Disable minio lifecycle by default
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-04-03 19:57:31 +02:00
rjshrjndrn
f149ace8f2 chore(helm): Updating frontend image release 2024-04-03 12:56:22 +02:00
Delirium
3002386673
fix ui: reader skipping bytes bugfix (#2030) 2024-04-03 12:31:40 +02:00
rjshrjndrn
0092b2fcb7 chore(helm): Updating assets image release 2024-04-02 17:56:20 +02:00
Delirium
ed281b4f7d
fix: remove orphan submodule hash (#2008) 2024-03-28 12:15:26 +01:00
Delirium
0ab36aac03
Removed submodule ee/intelligent_search/llama (#2007) 2024-03-28 11:38:43 +01:00
rjshrjndrn
52fe465dc8 chore(cli): Installing templater
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-03-28 09:35:30 +01:00
rjshrjndrn
4b3bbe1e8d feat(helm): Ability to change the retention on hours
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-03-26 18:42:24 +01:00
rjshrjndrn
548930c5be chore(helm): reducing default retention to 1day
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-03-26 14:53:12 +01:00
rjshrjndrn
bffd6e51b4 feat(helm): Override the tmp directory path 2024-03-26 14:49:50 +01:00
rjshrjndrn
ad37e94cc7 chore(helm): Updating storage image release 2024-03-22 16:05:39 +01:00
Alexander
9dbf682efe
feat(backend): added missing USE_S3_TAGS env var (#1984) 2024-03-22 14:33:53 +01:00
Alexander
ec867328ba
feat(backend): moved file tagging feature to EE (#1981) 2024-03-22 14:15:37 +01:00
rjshrjndrn
85fe92e352 chore(helm): run cleanup every day with 2 days retention
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-03-19 16:47:57 +01:00
rjshrjndrn
a30cfb8468 feat(init): Provision to override build scripts
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-03-18 12:52:22 +01:00
rjshrjndrn
ddc3d1202f fix(build): source script
Shouldn't have "" else the empty string will cause issue.

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-03-14 18:05:18 +01:00
rjshrjndrn
dbbe4cd2e1 chore(helm): Updating chalice image release 2024-03-13 19:12:05 +01:00
Kraiem Taha Yassine
3249329537
fix(chalice): reduce AIO-PG pool size (#1953)
refactor(chalice): configurable AIO-PG pool size
2024-03-13 17:26:44 +01:00
rjshrjndrn
82c33dab0c chore(build): Chalice support arm build
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-03-12 17:12:22 +01:00
rjshrjndrn
fa4a8f0c67 chore(helm): Updating chalice image release 2024-03-12 16:39:32 +01:00
rjshrjndrn
933626d3ae fix(docker): Cache source build
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-03-12 16:35:58 +01:00
rjshrjndrn
ab6921c6d9 chore(build): Custom docker build env 2024-03-12 16:13:59 +01:00
Kraiem Taha Yassine
ee87e89805
fix(chalice): fixed cards data merge (#1948) 2024-03-12 16:10:05 +01:00
rjshrjndrn
398e50a9b0 build: Library function for custom docker build 2024-03-12 16:07:54 +01:00
rjshrjndrn
e0a2c9b97d chore(helm): Update docker image
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-03-12 15:26:06 +01:00
Alexander
4099eea0f6
feat(backend): added path+query to autocomplete (#1946) 2024-03-12 15:08:26 +01:00
rjshrjndrn
e424ccd26b chore(build): Updating build script to take custom docker runtimes
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-03-12 15:02:52 +01:00
rjshrjndrn
f74b25f81a chore(helm): Updating db image release 2024-03-12 13:28:59 +00:00
Alexander
6bf07df5e6
feat(backend): added full url (#1944) 2024-03-12 14:01:42 +01:00
rjshrjndrn
5504964fe4 chore(helm): Updating chalice image release 2024-03-12 11:37:59 +00:00
Kraiem Taha Yassine
ee6b22b579
fix(chalice): transform array-source to single value for sessions-filters (#1943) 2024-03-12 12:28:57 +01:00
R Sriram
7b0027e3bd
removed duplicate dependency entry 🙂🙂 (#1933)
Co-authored-by: Sriram Ramesh <sriram@vananam.com>
2024-03-07 14:15:39 +01:00
rjshrjndrn
e2bfc23064 chore(helm): Updating chalice image release 2024-03-06 18:01:53 +00:00
Kraiem Taha Yassine
eea362969e
fix(chalice): check relayState type for SSO (#1932) 2024-03-06 18:59:49 +01:00
rjshrjndrn
0d88edb572 chore(helm): Updating chalice image release 2024-03-06 13:08:26 +00:00
Kraiem Taha Yassine
b6976dfec6
refactor(chalice): optimized search sessions by specific issue (#1931)
fix(chalice): fixed search sessions by specific issue in EXP mode
2024-03-06 13:32:37 +01:00
rjshrjndrn
e0ffc4175d feat(cli): Get git version
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-03-05 11:35:50 +01:00
rjshrjndrn
3663e21c67 chore(helm): Updating chalice image release 2024-03-01 19:08:34 +00:00
rjshrjndrn
a68e19b239 chore(helm): Updating chalice image release 2024-03-01 19:03:43 +00:00
rjshrjndrn
55576d1251 chore(helm): Updating chalice image release 2024-03-01 18:59:49 +00:00
rjshrjndrn
8784615509 chore(helm): Updating chalice image release 2024-03-01 18:58:52 +00:00
Kraiem Taha Yassine
8c6ce9c068
fix(chalice): fixed nested data for try/issues-funnel (#1926) 2024-03-01 19:35:50 +01:00
rjshrjndrn
f9aaa45b0c chore(helm): Updating chalice image release 2024-02-29 17:42:07 +00:00
Kraiem Taha Yassine
fa91609d8a
fix(chalice): fixed SSO (#1920) 2024-02-29 17:19:09 +01:00
rjshrjndrn
be717cd01a feat(cli): override busybox from env
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-02-29 10:12:17 +01:00
rjshrjndrn
cf6e7511a2 chore(helm): Updating frontend image release 2024-02-28 13:32:23 +00:00
Delirium
c1c1617766
fix(ui): fix search query (#1916) 2024-02-28 09:55:34 +01:00
Alexander
32525385af
feat(backend): upgraded user-agent for assist service (#1914) 2024-02-27 14:13:31 +01:00
rjshrjndrn
c19f258860 fix(helm): minio init
For new minio, the command changed for setting access.

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-02-27 10:36:06 +01:00
rjshrjndrn
2c31a239bd chore(helm): upgrade postgres version
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-02-22 18:44:16 +01:00
rjshrjndrn
7f6d0d07c8 chore(helm): Updating frontend image release 2024-02-22 08:31:21 +00:00
Delirium
2152d1c3db
fix(ui): skip 0 index for orphan sheets, skip -moz- checks for css rules (#1900) 2024-02-21 16:00:25 +01:00
rjshrjndrn
87c3b59a59 chore(helm): Updating frontend image release 2024-02-21 10:42:42 +00:00
Shekar Siri
b51b7dcfad
Iframe changes to SSO (#1899)
* change(ui): iframe check for sso redirect

* change(ui): removed login.js

* change(ui): sso link

* change(ui): sso link

* change(ui): iframe check and sso redirect
2024-02-20 14:27:18 +01:00
Delirium
452dde1620
fix(ui): cap video framerate for mobile to 100hz (#1898) 2024-02-20 14:26:49 +01:00
rjshrjndrn
707939a37f chore(helm): Updating frontend image release 2024-02-20 09:42:54 +00:00
rjshrjndrn
eb47338c1e fix(minio): wrong data path for minio
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-02-19 17:00:23 +01:00
Delirium
2192681149
fix(ui): canvas replay back/forth bug (#1896)
* fix(tracker): change canvas scaling

* fix(tracker): 12.0.3

* fix(tracker): 12.0.3
2024-02-19 16:53:15 +01:00
rjshrjndrn
c8d0d1e949 chore(docker): support arch build
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-02-15 17:29:22 +01:00
rjshrjndrn
653221cbd8 feat(kube): updating postgres image
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-02-15 16:27:42 +01:00
rjshrjndrn
499fff6646 feat(cli): arm64 support
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-02-15 13:18:47 +01:00
rjshrjndrn
0b4c0e092d chore(helm): Update minio tag
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-02-15 13:07:43 +01:00
Alexander
dbfbf55b82
fix(assist): fixed call/control/stats functionality (#1890) 2024-02-14 16:37:23 +01:00
rjshrjndrn
e327522829 chore(helm): Updating frontend image release 2024-02-13 08:38:35 +00:00
rjshrjndrn
24f489dcc6 chore(helm): Updating chalice image release 2024-02-13 08:23:10 +00:00
rjshrjndrn
4503aeca25 fix: kerberos build
Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com>
2024-02-13 07:17:17 +01:00
Kraiem Taha Yassine
e97b519598
fix(chalice): install lxml from source (#1888) 2024-02-12 18:30:32 +01:00
Shekar Siri
7926279342
fix(ui): autoplay session ids are parsed to int (#1883) 2024-02-09 19:03:46 +01:00
Kraiem Taha Yassine
61c415bffa
feature(chalice): support multi SSO redirect (#1882) 2024-02-09 18:45:54 +01:00
Alexander
0de0dd4cbf
feat(assist): improved assist performance (for one-node mode and cluster mode) (#1880) 2024-02-06 16:30:43 +01:00
rjshrjndrn
ad8e35198b fix: override
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-02-05 18:34:30 +01:00
rjshrjndrn
76ddea74f3 fix(install): override file
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-02-05 18:20:29 +01:00
rjshrjndrn
1765c0b5bf chore(cli): support override file while installing
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-02-02 20:57:26 +01:00
Mohammad H Alijany
621f63d90e
fix(scrips) Improve Docker Installation Scripts (#1866)
* fix(scrips) fix docker installation commands

* fix(scripts) check docker compose installation

* fix(script) revert scripts styles
2024-01-29 09:38:18 +01:00
Mehdi Osman
89f59b2054
Fixed translations (#1865)
* Updated hero

* Fixed typo and added feature flags

* Typo
2024-01-26 18:37:35 -05:00
Mehdi Osman
1a5d00444e
Updated hero (#1864) 2024-01-26 18:17:00 -05:00
rjshrjndrn
4ee57c4e87 chore(docker-compose): update version
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-01-26 23:05:28 +01:00
rjshrjndrn
e8c8b861e0 fix(docker): redis path
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-01-26 23:04:08 +01:00
Rajesh Rajendran
7f05a81b0b
update caddy, postgres, minio in docker compose (#1863)
Co-authored-by: keshav-multi <keshav@multivariate.tech>
2024-01-26 22:49:31 +01:00
rjshrjndrn
b58b446ca6 chore(frontend): Update the image version in env
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2024-01-26 21:43:03 +01:00
Taha Yassine Kraiem
65f843805c refactor(chalice): changed permissions 2024-01-26 19:05:38 +01:00
Amirouche
90059f59ca fix(api): even in ee, get_boy_project_key must return the project 2024-01-26 18:11:35 +01:00
198 changed files with 3067 additions and 2886 deletions

View file

@ -145,11 +145,14 @@ jobs:
SLACK_USERNAME: "OR Bot"
SLACK_MESSAGE: 'Build failed :bomb:'
# - name: Debug Job
# # if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}-ee
# ENVIRONMENT: staging
# - name: Debug Job
# # if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}-ee
# ENVIRONMENT: staging
# with:
# limit-access-to-actor: true

View file

@ -133,12 +133,15 @@ jobs:
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
SLACK_USERNAME: "OR Bot"
SLACK_MESSAGE: 'Build failed :bomb:'
# - name: Debug Job
# # if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}-ee
# ENVIRONMENT: staging
# with:
# limit-access-to-actor: true
# - name: Debug Job
# if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}
# ENVIRONMENT: staging

View file

@ -144,11 +144,15 @@ jobs:
SLACK_USERNAME: "OR Bot"
SLACK_MESSAGE: 'Build failed :bomb:'
# - name: Debug Job
# if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}-ee
# ENVIRONMENT: staging
# - name: Debug Job
# # if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}-ee
# ENVIRONMENT: staging
# with:
# limit-access-to-actor: true

View file

@ -133,11 +133,12 @@ jobs:
SLACK_USERNAME: "OR Bot"
SLACK_MESSAGE: 'Build failed :bomb:'
# - name: Debug Job
# if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}
# ENVIRONMENT: staging
# - name: Debug Job
# # if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}-ee
# ENVIRONMENT: staging
# with:
# iimit-access-to-actor: true

View file

@ -116,11 +116,12 @@ jobs:
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
ENVIRONMENT: staging
# - name: Debug Job
# if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}
# ENVIRONMENT: staging
#
# - name: Debug Job
# # if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}-ee
# ENVIRONMENT: staging
# with:
# iimit-access-to-actor: true

View file

@ -130,11 +130,13 @@ jobs:
SLACK_USERNAME: "OR Bot"
SLACK_MESSAGE: 'Build failed :bomb:'
# - name: Debug Job
# if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}
# ENVIRONMENT: staging
# - name: Debug Job
# # if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}-ee
# ENVIRONMENT: staging
# with:
# limit-access-to-actor: true

View file

@ -115,11 +115,12 @@ jobs:
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
ENVIRONMENT: staging
# - name: Debug Job
# if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}
# ENVIRONMENT: staging
#
# - name: Debug Job
# # if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}-ee
# ENVIRONMENT: staging
# with:
# iimit-access-to-actor: true

View file

@ -145,11 +145,12 @@ jobs:
SLACK_USERNAME: "OR Bot"
SLACK_MESSAGE: 'Build failed :bomb:'
# - name: Debug Job
# if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}-ee
# ENVIRONMENT: staging
#
# - name: Debug Job
# # if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}-ee
# ENVIRONMENT: staging
# with:
# iimit-access-to-actor: true

View file

@ -140,12 +140,13 @@ jobs:
IMAGE_TAG: ${{ github.sha }}
ENVIRONMENT: staging
# - name: Debug Job
# if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
# AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
# AWS_REGION: eu-central-1
# AWS_S3_BUCKET_NAME: ${{ secrets.AWS_S3_BUCKET_NAME }}
# - name: Debug Job
# # if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}-ee
# ENVIRONMENT: staging
# with:
# limit-access-to-actor: true

View file

@ -78,4 +78,4 @@ jobs:
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
env:
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
iMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}

View file

@ -133,11 +133,12 @@ jobs:
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
ENVIRONMENT: staging
# - name: Debug Job
# if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
# AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
# AWS_REGION: eu-central-1
# AWS_S3_BUCKET_NAME: ${{ secrets.AWS_S3_BUCKET_NAME }}
# - name: Debug Job
# # if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}-ee
# ENVIRONMENT: staging
# with:
# iimit-access-to-actor: true

158
.github/workflows/patch-build.yaml vendored Normal file
View file

@ -0,0 +1,158 @@
# Ref: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions
on:
workflow_dispatch:
description: 'This workflow will build for patches for latest tag, and will Always use commit from main branch.'
inputs:
services:
description: 'Comma separated names of services to build(in small letters).'
required: true
default: 'chalice,frontend'
name: Build patches from main branch, Raise PR to Main, and Push to tag
jobs:
deploy:
name: Build Patch from main
runs-on: ubuntu-latest
env:
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
steps:
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 1
- name: Rebase with main branch, to make sure the code has latest main changes
run: |
git pull --rebase origin main
- name: Downloading yq
run: |
VERSION="v4.42.1"
sudo wget https://github.com/mikefarah/yq/releases/download/${VERSION}/yq_linux_amd64 -O /usr/bin/yq
sudo chmod +x /usr/bin/yq
# Configure AWS credentials for the first registry
- name: Configure AWS credentials for RELEASE_ARM_REGISTRY
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.AWS_DEPOT_ACCESS_KEY }}
aws-secret-access-key: ${{ secrets.AWS_DEPOT_SECRET_KEY }}
aws-region: ${{ secrets.AWS_DEPOT_DEFAULT_REGION }}
- name: Login to Amazon ECR for RELEASE_ARM_REGISTRY
id: login-ecr-arm
run: |
aws ecr get-login-password --region ${{ secrets.AWS_DEPOT_DEFAULT_REGION }} | docker login --username AWS --password-stdin ${{ secrets.RELEASE_ARM_REGISTRY }}
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ secrets.RELEASE_OSS_REGISTRY }}
- uses: depot/setup-action@v1
- name: Get HEAD Commit ID
run: echo "HEAD_COMMIT_ID=$(git rev-parse HEAD)" >> $GITHUB_ENV
- name: Define Branch Name
run: echo "BRANCH_NAME=patch/main/${HEAD_COMMIT_ID}" >> $GITHUB_ENV
- name: Set Remote with GITHUB_TOKEN
run: |
git config --unset http.https://github.com/.extraheader
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}.git
- name: Build
id: build-image
env:
DOCKER_REPO_ARM: ${{ secrets.RELEASE_ARM_REGISTRY }}
DOCKER_REPO_OSS: ${{ secrets.RELEASE_OSS_REGISTRY }}
MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
MSAAS_REPO_FOLDER: /tmp/msaas
run: |
set -exo pipefail
git config --local user.email "action@github.com"
git config --local user.name "GitHub Action"
git checkout -b $BRANCH_NAME
working_dir=$(pwd)
function image_version(){
local service=$1
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
current_version=$(yq eval '.AppVersion' $chart_path)
new_version=$(echo $current_version | awk -F. '{$NF += 1 ; print $1"."$2"."$3}')
echo $new_version
# yq eval ".AppVersion = \"$new_version\"" -i $chart_path
}
function clone_msaas() {
[ -d $MSAAS_REPO_FOLDER ] || {
git clone -b dev --recursive https://x-access-token:$MSAAS_REPO_CLONE_TOKEN@$MSAAS_REPO_URL $MSAAS_REPO_FOLDER
cd $MSAAS_REPO_FOLDER
bash git-init.sh
git checkout
}
}
function build_managed() {
local service=$1
local version=$2
echo building managed
clone_msaas
if [[ $service == 'chalice' ]]; then
cd $MSAAS_REPO_FOLDER/openreplay/api
else
cd $MSAAS_REPO_FOLDER/openreplay/$service
fi
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh >> /tmp/arm.txt
}
# Checking for backend images
ls backend/cmd >> /tmp/backend.txt
echo Services: "${{ github.event.inputs.services }}"
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
# Build FOSS
for SERVICE in "${SERVICES[@]}"; do
# Check if service is backend
if grep -q $SERVICE /tmp/backend.txt; then
cd backend
foss_build_args="nil $SERVICE"
ee_build_args="ee $SERVICE"
else
[[ $SERVICE == 'chalice' ]] && cd $working_dir/api || cd $SERVICE
ee_build_args="ee"
fi
version=$(image_version $SERVICE)
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash build.sh $foss_build_args
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash build.sh $foss_build_args
echo IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash build.sh $ee_build_args
IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash build.sh $ee_build_args
if [[ "$SERVICE" != "chalice" && "$SERVICE" != "frontend" ]]; then
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh $foss_build_args
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh $foss_build_args
else
build_managed $SERVICE $version
fi
cd $working_dir
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$SERVICE/Chart.yaml"
yq eval ".AppVersion = \"$version\"" -i $chart_path
git add $chart_path
git commit -m "Increment $SERVICE chart version"
git push --set-upstream origin $BRANCH_NAME
done
- name: Create Pull Request
uses: repo-sync/pull-request@v2
with:
github_token: ${{ secrets.ACTIONS_COMMMIT_TOKEN }}
source_branch: ${{ env.BRANCH_NAME }}
destination_branch: "main"
pr_title: "Updated patch build from main ${{ env.HEAD_COMMIT_ID }}"
pr_body: |
This PR updates the Helm chart version after building the patch from $HEAD_COMMIT_ID.
Once this PR is merged, To update the latest tag, run the following workflow.
https://github.com/openreplay/openreplay/actions/workflows/update-tag.yaml
# - name: Debug Job
# if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO_ARM: ${{ secrets.RELEASE_ARM_REGISTRY }}
# DOCKER_REPO_OSS: ${{ secrets.RELEASE_OSS_REGISTRY }}
# MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
# MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
# MSAAS_REPO_FOLDER: /tmp/msaas
# with:
# limit-access-to-actor: true

View file

@ -133,11 +133,12 @@ jobs:
SLACK_USERNAME: "OR Bot"
SLACK_MESSAGE: 'Build failed :bomb:'
# - name: Debug Job
# if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}
# ENVIRONMENT: staging
#
# - name: Debug Job
# # if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}-ee
# ENVIRONMENT: staging
# with:
# iimit-access-to-actor: true

View file

@ -130,11 +130,13 @@ jobs:
SLACK_USERNAME: "OR Bot"
SLACK_MESSAGE: 'Build failed :bomb:'
# - name: Debug Job
# if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}
# ENVIRONMENT: staging
# - name: Debug Job
# # if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}-ee
# ENVIRONMENT: staging
# with:
# limit-access-to-actor: true

View file

@ -83,4 +83,4 @@ jobs:
]
}
EOF
aws route53 change-resource-record-sets --hosted-zone-id ${{ secrets.OR_PR_HOSTED_ZONE_ID }} --change-batch file://route53-changes.json
iws route53 change-resource-record-sets --hosted-zone-id ${{ secrets.OR_PR_HOSTED_ZONE_ID }} --change-batch file://route53-changes.json

View file

@ -329,10 +329,12 @@ jobs:
# run: |
# # Add any cleanup commands if necessary
- name: Debug Job
if: failure()
uses: mxschmitt/action-tmate@v3
env:
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
IMAGE_TAG: ${{ github.sha }}
ENVIRONMENT: staging
# - name: Debug Job
# # if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}-ee
# ENVIRONMENT: staging
# with:
# iimit-access-to-actor: true

View file

@ -132,11 +132,13 @@ jobs:
SLACK_USERNAME: "OR Bot"
SLACK_MESSAGE: 'Build failed :bomb:'
# - name: Debug Job
# if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}
# ENVIRONMENT: staging
# - name: Debug Job
# # if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}-ee
# ENVIRONMENT: staging
# with:
# limit-access-to-actor: true

View file

@ -131,11 +131,13 @@ jobs:
SLACK_USERNAME: "OR Bot"
SLACK_MESSAGE: 'Build failed :bomb:'
# - name: Debug Job
# if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}
# ENVIRONMENT: staging
# - name: Debug Job
# # if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}-ee
# ENVIRONMENT: staging
# with:
# limit-access-to-actor: true

View file

@ -72,4 +72,4 @@ jobs:
with:
token: ${{ secrets.CODECOV_TOKEN }}
flags: tracker
name: tracker
iame: tracker

35
.github/workflows/update-tag.yaml vendored Normal file
View file

@ -0,0 +1,35 @@
on:
workflow_dispatch:
description: "This workflow will build for patches for latest tag, and will Always use commit from main branch."
inputs:
services:
description: "This action will update the latest tag with current main branch HEAD. Should I proceed ? true/false"
required: true
default: "false"
name: Force Push tag with main branch HEAD
jobs:
deploy:
name: Build Patch from main
runs-on: ubuntu-latest
env:
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set Remote with GITHUB_TOKEN
run: |
git config --unset http.https://github.com/.extraheader
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}.git
- name: Push main branch to tag
run: |
git fetch --tags
git checkout main
git push origin HEAD:refs/tags/$(git tag --list 'v[0-9]*' --sort=-v:refname | head -n 1) --force
# - name: Debug Job
# if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# with:
# limit-access-to-actor: true

View file

@ -169,11 +169,12 @@ jobs:
SLACK_USERNAME: "OR Bot"
SLACK_MESSAGE: 'Build failed :bomb:'
# - name: Debug Job
# if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}
# ENVIRONMENT: staging
#
# - name: Debug Job
# # if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}-ee
# ENVIRONMENT: staging
# with:
# iimit-access-to-actor: true

View file

@ -166,11 +166,12 @@ jobs:
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
SLACK_USERNAME: "OR Bot"
SLACK_MESSAGE: 'Build failed :bomb:'
# - name: Debug Job
# if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}
# ENVIRONMENT: staging
#
# - name: Debug Job
# # if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}-ee
# ENVIRONMENT: staging
# with:
# iimit-access-to-actor: true

3
.gitmodules vendored
View file

@ -1,3 +0,0 @@
[submodule "ee/intelligent_search/llama"]
path = ee/intelligent_search/llama
url = https://github.com/facebookresearch/llama.git

View file

@ -38,15 +38,11 @@
</a>
</p>
<p align="center">
<a href="https://github.com/openreplay/openreplay">
<img src="static/openreplay-git-hero.svg">
</a>
</p>
https://github.com/openreplay/openreplay/assets/20417222/684133c4-575a-48a7-aa91-d4bf88c5436a
OpenReplay is a session replay suite you can host yourself, that lets you see what users do on your web app, helping you troubleshoot issues faster.
OpenReplay is a session replay suite you can host yourself, that lets you see what users do on your web and mobile apps, helping you troubleshoot issues faster.
- **Session replay.** OpenReplay replays what users do, but not only. It also shows you what went under the hood, how your website or app behaves by capturing network activity, console logs, JS errors, store actions/state, page speed metrics, cpu/memory usage and much more.
- **Session replay**. OpenReplay replays what users do, but not only. It also shows you what went under the hood, how your website or app behaves by capturing network activity, console logs, JS errors, store actions/state, page speed metrics, cpu/memory usage and much more. In addition to web applications, iOS and React Native apps are also supported (Android and Flutter are coming out soon).
- **Low footprint**. With a ~26KB (.br) tracker that asynchronously sends minimal data for a very limited impact on performance.
- **Self-hosted**. No more security compliance checks, 3rd-parties processing user data. Everything OpenReplay captures stays in your cloud for a complete control over your data.
- **Privacy controls**. Fine-grained security features for sanitizing user data.

View file

@ -38,16 +38,12 @@
</a>
</p>
<p align="center">
<a href="https://github.com/openreplay/openreplay">
<img src="static/openreplay-git-hero.svg">
</a>
</p>
https://github.com/openreplay/openreplay/assets/20417222/684133c4-575a-48a7-aa91-d4bf88c5436a
OpenReplay هو مجموعة إعادة تشغيل الجلسة التي يمكنك استضافتها بنفسك، والتي تتيح لك رؤية ما يقوم به المستخدمون على تطبيق الويب الخاص بك، مما يساعدك على حل المشكلات بشكل أسرع.
OpenReplay هو مجموعة إعادة تشغيل الجلسة التي يمكنك استضافتها بنفسك، والتي تتيح لك رؤية ما يقوم به المستخدمون على تطبيق الويب و تطبيقات الهاتف المحمول الخاص بك، مما يساعدك على حل المشكلات بشكل أسرع.
- **إعادة تشغيل الجلسة.** يقوم OpenReplay بإعادة تشغيل ما يقوم به المستخدمون، وكيف يتصرف موقع الويب الخاص بك أو التطبيق من خلال التقاط النشاط على الشبكة، وسجلات وحدة التحكم، وأخطاء JavaScript، وإجراءات/حالة التخزين، وقياسات سرعة الصفحة، واستخدام وحدة المعالجة المركزية/الذاكرة، وأكثر من ذلك بكثير.
- **بصمة منخفضة**. مع متتبع بحجم حوالي 26 كيلوبايت (نوع .br) الذي يرسل بيانات دقيقة بشكل غير متزامن لتأثير محدود جدًا على الأداء.
- **إعادة تشغيل الجلسة**. يقوم OpenReplay بإعادة تشغيل ما يقوم به المستخدمون، وكيف يتصرف موقع الويب الخاص بك أو التطبيق من خلال التقاط النشاط على الشبكة، وسجلات وحدة التحكم، وأخطاء JavaScript، وإجراءات/حالة التخزين، وقياسات سرعة الصفحة، واستخدام وحدة المعالجة المركزية/الذاكرة، وأكثر من ذلك بكثير. بالإضافة إلى تطبيقات الويب، تطبيقات نظام iOS و React Native مدعومة أيضاً (سيتم إطلاق نسخ Android و Flutter قريباً).
- **بصمة منخفضة**. مع متتبع بحجم حوالي 26 كيلوبايت (نوع .br) الذي يرسل بيانات دقيقة بشكل غير متزامن لتأثير محدود جدًا على الأداء.
- **مضيف بواسطتك.** لا مزيد من فحوص الامتثال الأمني، ومعالجة بيانات المستخدمين من قبل جهات خارجية. كل ما يتم التقاطه بواسطة OpenReplay يبقى في سحابتك للتحكم الكامل في بياناتك.
- **ضوابط الخصوصية.** ميزات أمان دقيقة لتنقية بيانات المستخدم.
- **نشر سهل.** بدعم من مزودي الخدمة السحابية العامة الرئيسيين (AWS، GCP، Azure، DigitalOcean).

View file

@ -38,15 +38,11 @@
</a>
</p>
<p align="center">
<a href="https://github.com/openreplay/openreplay">
<img src="static/openreplay-git-hero.svg">
</a>
</p>
https://github.com/openreplay/openreplay/assets/20417222/684133c4-575a-48a7-aa91-d4bf88c5436a
OpenReplay es una suite de retransmisión de sesiones que puedes alojar tú mismo, lo que te permite ver lo que hacen los usuarios en tu aplicación web y ayudarte a solucionar problemas más rápido.
OpenReplay es una suite de retransmisión de sesiones que puedes alojar tú mismo, lo que te permite ver lo que hacen los usuarios en tu aplicación web y móviles y ayudarte a solucionar problemas más rápido.
- **Reproducción de sesiones.** OpenReplay reproduce lo que hacen los usuarios, pero no solo eso. También te muestra lo que ocurre bajo el capó, cómo se comporta tu sitio web o aplicación al capturar la actividad de la red, registros de la consola, errores de JavaScript, acciones/estado del almacén, métricas de velocidad de la página, uso de CPU/memoria y mucho más.
- **Reproducción de sesiones**. OpenReplay reproduce lo que hacen los usuarios, pero no solo eso. También te muestra lo que ocurre bajo el capó, cómo se comporta tu sitio web o aplicación al capturar la actividad de la red, registros de la consola, errores de JavaScript, acciones/estado del almacén, métricas de velocidad de la página, uso de CPU/memoria y mucho más. Además de las aplicaciones web, las aplicaciones de iOS y React Native también son compatibles (las versiones de Android y Flutter saldrán pronto).
- **Huella reducida.** Con un rastreador de aproximadamente 26 KB (.br) que envía datos mínimos de forma asíncrona, lo que tiene un impacto muy limitado en el rendimiento.
- **Auto-alojado.** No más verificaciones de cumplimiento de seguridad, procesamiento de datos de usuario por terceros. Todo lo que OpenReplay captura se queda en tu nube para un control completo sobre tus datos.
- **Controles de privacidad.** Funciones de seguridad detalladas para desinfectar los datos de usuario.
@ -57,6 +53,7 @@ OpenReplay es una suite de retransmisión de sesiones que puedes alojar tú mism
- **Reproducción de sesiones:** Te permite revivir la experiencia de tus usuarios, ver dónde encuentran dificultades y cómo afecta su comportamiento. Cada reproducción de sesión se analiza automáticamente en función de heurísticas, para un triaje sencillo.
- **Herramientas de desarrollo (DevTools):** Es como depurar en tu propio navegador. OpenReplay te proporciona el contexto completo (actividad de red, errores de JavaScript, acciones/estado del almacén y más de 40 métricas) para que puedas reproducir instantáneamente errores y entender problemas de rendimiento.
- **Asistencia (Assist):** Te ayuda a brindar soporte a tus usuarios al ver su pantalla en tiempo real y unirte instantáneamente a una llamada (WebRTC) con ellos, sin necesidad de software de uso compartido de pantalla de terceros.
- **Banderas de características:** Habilitar o deshabilitar una característica, hacer lanzamientos graduales y pruebas A/B sin necesidad de volver a desplegar tu aplicación.
- **Búsqueda universal (Omni-search):** Busca y filtra por casi cualquier acción/criterio de usuario, atributo de sesión o evento técnico, para que puedas responder a cualquier pregunta. No se requiere instrumentación.
- **Embudos (Funnels):** Para resaltar los problemas más impactantes que causan la conversión y la pérdida de ingresos.
- **Controles de privacidad detallados:** Elige qué capturar, qué ocultar o qué ignorar para que los datos de usuario ni siquiera lleguen a tus servidores.

View file

@ -38,15 +38,11 @@
</a>
</p>
<p align="center">
<a href="https://github.com/openreplay/openreplay">
<img src="static/openreplay-git-hero.svg">
</a>
</p>
https://github.com/openreplay/openreplay/assets/20417222/684133c4-575a-48a7-aa91-d4bf88c5436a
OpenReplay est une suite d'outils de relecture (appelée aussi "replay") de sessions que vous pouvez héberger vous-même, vous permettant de voir ce que les utilisateurs font sur une application web, vous aidant ainsi à résoudre différents types de problèmes plus rapidement.
OpenReplay est une suite d'outils de relecture (appelée aussi "replay") de sessions que vous pouvez héberger vous-même, vous permettant de voir ce que les utilisateurs font sur une application web ou mobile, vous aidant ainsi à résoudre différents types de problèmes plus rapidement.
- **Relecture de session.** OpenReplay rejoue ce que les utilisateurs font, mais pas seulement. Il vous montre également ce qui se passe en coulisse, comment votre site web ou votre application se comporte en capturant l'activité réseau, les journaux de console, les erreurs JS, les actions/états du store, les métriques de chargement des pages, l'utilisation du CPU/mémoire, et bien plus encore.
- **Relecture de session**. OpenReplay rejoue ce que les utilisateurs font, mais pas seulement. Il vous montre également ce qui se passe en coulisse, comment votre site web ou votre application se comporte en capturant l'activité réseau, les journaux de console, les erreurs JS, les actions/états du store, les métriques de chargement des pages, l'utilisation du CPU/mémoire, et bien plus encore. En plus des applications web, les applications iOS et React Native sont également prises en charge (les versions Android et Flutter seront bientôt disponibles).
- **Faible empreinte**. Avec un traqueur d'environ 26 Ko (.br) qui envoie de manière asynchrone des données minimales, ce qui a un impact très limité sur les performances.
- **Auto-hébergé**. Plus de vérifications de conformité en matière de sécurité, plus de traitement des données des utilisateurs par des tiers. Tout ce qu'OpenReplay capture reste dans votre cloud pour un contrôle complet sur vos données.
- **Contrôles de confidentialité**. Fonctionnalités de sécurité détaillées pour la désinfection des données utilisateur.
@ -57,6 +53,7 @@ OpenReplay est une suite d'outils de relecture (appelée aussi "replay") de sess
- **Relecture de session :** Vous permet de revivre l'expérience de vos utilisateurs, de voir où ils rencontrent des problèmes et comment cela affecte leur comportement. Chaque relecture de session est automatiquement analysée en se basant sur des heuristiques, pour un triage plus facile des problèmes en fonction de l'impact.
- **Outils de développement (DevTools) :** C'est comme déboguer dans votre propre navigateur. OpenReplay vous fournit le contexte complet (activité réseau, erreurs JS, actions/états du store et plus de 40 métriques) pour que vous puissiez instantanément reproduire les bugs et comprendre les problèmes de performance.
- **Assistance (Assist) :** Vous aide à soutenir vos utilisateurs en voyant leur écran en direct et en vous connectant instantanément avec eux via appel/vidéo (WebRTC), sans nécessiter de logiciel tiers de partage d'écran.
- **Drapeaux de fonctionnalité :** Activer ou désactiver une fonctionnalité, faire des déploiements progressifs et des tests A/B sans avoir à redéployer votre application.
- **Recherche universelle (Omni-search) :** Recherchez et filtrez presque n'importe quelle action/critère utilisateur, attribut de session ou événement technique, afin de pouvoir répondre à n'importe quelle question. Aucune instrumentation requise.
- **Entonnoirs (Funnels) :** Pour mettre en évidence les problèmes les plus impactants entraînant une conversion et une perte de revenus.
- **Contrôles de confidentialité détaillés :** Choisissez ce que vous voulez capturer, ce que vous voulez obscurcir ou ignorer, de sorte que les données utilisateur n'atteignent même pas vos serveurs.

View file

@ -38,15 +38,11 @@
</a>
</p>
<p align="center">
<a href="https://github.com/openreplay/openreplay">
<img src="static/openreplay-git-hero.svg">
</a>
</p>
https://github.com/openreplay/openreplay/assets/20417222/684133c4-575a-48a7-aa91-d4bf88c5436a
OpenReplay - это набор инструментов для воспроизведения пользовательских сессий, позволяющий увидеть действия пользователи в вашем веб-приложении, который вы можете разместить в своем облаке или на серверах.
OpenReplay - это набор инструментов для воспроизведения сессий, который вы можете разместить самостоятельно, позволяющий вам видеть, что пользователи делают в ваших веб- и мобильных приложениях, помогая вам быстрее устранять проблемы.
- **Воспроизведение сессий.** OpenReplay не только воспроизводит действия пользователей, но и показывает, что происходит под капотом сессии, как ведет себя ваш сайт или приложение, фиксируя сетевую активность, логи консоли, JS-ошибки, действия/состояние стейт менеджеров, показатели скорости страницы, использование процессора/памяти и многое другое.
- **Воспроизведение сессий**. OpenReplay не только воспроизводит действия пользователей, но и показывает, что происходит под капотом сессии, как ведет себя ваш сайт или приложение, фиксируя сетевую активность, логи консоли, JS-ошибки, действия/состояние стейт менеджеров, показатели скорости страницы, использование процессора/памяти и многое другое. В дополнение к веб-приложениям, также поддерживаются приложения для iOS и React Native (приложения для Android и Flutter скоро появятся).
- **Компактность**. Размером всего в ~26 КБ (.br), трекер асинхронно отправляет минимальное количество данных, оказывая очень незначительное влияние на производительность вашего приложения.
- **Self-hosted**. Больше никаких проверок на соответствие требованиям безопасности или обработки данных ваших пользователей третьими сторонами. Все, что фиксирует OpenReplay, остается в вашем облаке, что обеспечивает полный контроль над вашими данными.
- **Контроль над приватностью**. Тонкие настройки приватности позволяют записывать только действительно необходимые данные.
@ -57,6 +53,7 @@ OpenReplay - это набор инструментов для воспроиз
- **Session Replay:** Позволяет повторить опыт пользователей, увидеть, где они испытывают трудности и как это влияет на конверсию. Каждый реплей автоматически анализируется на наличие ошибок и аномалий, что значительно облегчает сортировку и поиск проблемных сессий.
- **DevTools:** Прямо как отладка в вашем собственном браузере. OpenReplay предоставляет вам полный контекст (сетевая активность, JS ошибки, действия/состояние стейт менеджеров и более 40 метрик), чтобы вы могли мгновенно воспроизвести ошибки и найти проблемы с производительностью.
- **Assist:** Позволяет вам помочь вашим пользователям, наблюдая их экран в настоящем времени и мгновенно переходя на звонок (WebRTC) с ними, не требуя стороннего программного обеспечения для совместного просмотра экрана.
- **Функциональные флаги:** Включение или отключение функции, поэтапный выпуск и A/B тестирование без необходимости повторного развертывания вашего приложения.
- **Omni-search:** Поиск и фильтрация практически любого действия пользователя/критерия, атрибута сессии или технического события, чтобы вы могли ответить на любой вопрос.
- **Воронки:** Для выявления наиболее влияющих на конверсию мест.
- **Тонкая настройка приватности:** Выбирайте, что записывать, а что игнорировать, чтобы данные пользователя даже не отправлялись на ваши сервера.

View file

@ -56,7 +56,9 @@ async def lifespan(app: FastAPI):
"application_name": "AIO" + config("APP_NAME", default="PY"),
}
database = psycopg_pool.AsyncConnectionPool(kwargs=database, connection_class=ORPYAsyncConnection)
database = psycopg_pool.AsyncConnectionPool(kwargs=database, connection_class=ORPYAsyncConnection,
min_size=config("PG_AIO_MINCONN", cast=int, default=1),
max_size=config("PG_AIO_MAXCONN", cast=int, default=5), )
app.state.postgresql = database
# App listening

View file

@ -15,6 +15,9 @@ exit_err() {
fi
}
source ../scripts/lib/_docker.sh
ARCH=${ARCH:-'amd64'}
environment=$1
git_sha=$(git rev-parse --short HEAD)
image_tag=${IMAGE_TAG:-git_sha}
@ -66,7 +69,7 @@ function build_api() {
tag="ee-"
}
mv Dockerfile.dockerignore .dockerignore
docker build -f ./Dockerfile --build-arg envarg=$envarg --build-arg GIT_SHA=$git_sha -t ${DOCKER_REPO:-'local'}/${IMAGE_NAME:-'chalice'}:${image_tag} .
docker build -f ./Dockerfile --platform linux/${ARCH} --build-arg envarg=$envarg --build-arg GIT_SHA=$git_sha -t ${DOCKER_REPO:-'local'}/${IMAGE_NAME:-'chalice'}:${image_tag} .
cd ../api || exit_err 100
rm -rf ../${destination}
[[ $PUSH_IMAGE -eq 1 ]] && {

View file

@ -10,6 +10,7 @@
git_sha=$(git rev-parse --short HEAD)
image_tag=${IMAGE_TAG:-git_sha}
envarg="default-foss"
source ../scripts/lib/_docker.sh
check_prereq() {
which docker || {
echo "Docker not installed, please install docker."
@ -17,27 +18,26 @@ check_prereq() {
}
}
[[ $1 == ee ]] && ee=true
[[ $PATCH -eq 1 ]] && {
image_tag="$(grep -ER ^.ppVersion ../scripts/helmcharts/openreplay/charts/$chart | xargs | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
[[ $ee == "true" ]] && {
image_tag="${image_tag}-ee"
}
image_tag="$(grep -ER ^.ppVersion ../scripts/helmcharts/openreplay/charts/$chart | xargs | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
[[ $ee == "true" ]] && {
image_tag="${image_tag}-ee"
}
}
update_helm_release() {
chart=$1
HELM_TAG="$(grep -iER ^version ../scripts/helmcharts/openreplay/charts/$chart | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
# Update the chart version
sed -i "s#^version.*#version: $HELM_TAG# g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
# Update image tags
sed -i "s#ppVersion.*#ppVersion: \"$image_tag\"#g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
# Commit the changes
git add ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
git commit -m "chore(helm): Updating $chart image release"
chart=$1
HELM_TAG="$(grep -iER ^version ../scripts/helmcharts/openreplay/charts/$chart | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
# Update the chart version
sed -i "s#^version.*#version: $HELM_TAG# g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
# Update image tags
sed -i "s#ppVersion.*#ppVersion: \"$image_tag\"#g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
# Commit the changes
git add ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
git commit -m "chore(helm): Updating $chart image release"
}
function build_alerts(){
function build_alerts() {
destination="_alerts"
[[ $1 == "ee" ]] && {
destination="_alerts_ee"
@ -69,5 +69,5 @@ function build_alerts(){
check_prereq
build_alerts $1
if [[ $PATCH -eq 1 ]]; then
update_helm_release alerts
update_helm_release alerts
fi

View file

@ -9,6 +9,7 @@
git_sha1=${IMAGE_TAG:-$(git rev-parse HEAD)}
envarg="default-foss"
source ../scripts/lib/_docker.sh
check_prereq() {
which docker || {
echo "Docker not installed, please install docker."
@ -17,7 +18,7 @@ check_prereq() {
[[ exit -eq 1 ]] && exit 1
}
function build_crons(){
function build_crons() {
destination="_crons_ee"
cp -R ../api ../${destination}
cd ../${destination}
@ -46,7 +47,6 @@ check_prereq
[[ $1 == "ee" ]] && {
build_crons $1
} || {
echo -e "Crons is only for ee. Rerun the script using \n bash $0 ee"
exit 100
echo -e "Crons is only for ee. Rerun the script using \n bash $0 ee"
exit 100
}

View file

@ -88,6 +88,7 @@ def __get_sessions_list(project_id, user_id, data: schemas.CardSchema):
def __get_click_map_chart(project_id, user_id, data: schemas.CardClickMap, include_mobs: bool = True):
if len(data.series) == 0:
return None
data.series[0].filter.filters += data.series[0].filter.events
return click_maps.search_short_session(project_id=project_id, user_id=user_id,
data=schemas.ClickMapSessionsSearch(
**data.series[0].filter.model_dump()),
@ -193,10 +194,10 @@ def __merge_metric_with_data(metric: schemas.CardSchema,
if data.series is not None and len(data.series) > 0:
metric.series = data.series
if len(data.filters) > 0:
for s in metric.series:
s.filter.filters += data.filters
metric = schemas.CardSchema(**metric.model_dump(by_alias=True))
# if len(data.filters) > 0:
# for s in metric.series:
# s.filter.filters += data.filters
# metric = schemas.CardSchema(**metric.model_dump(by_alias=True))
return metric
@ -257,11 +258,11 @@ def get_sessions(project_id, user_id, data: schemas.CardSessionsSchema):
def __get_funnel_issues(project_id: int, user_id: int, data: schemas.CardFunnel):
if len(data.series) == 0:
return {"data": []}
return []
data.series[0].filter.startTimestamp = data.startTimestamp
data.series[0].filter.endTimestamp = data.endTimestamp
data = funnels.get_issues_on_the_fly_widget(project_id=project_id, data=data.series[0].filter)
return {"data": data}
return data
def __get_path_analysis_issues(project_id: int, user_id: int, data: schemas.CardPathAnalysis):

View file

@ -450,7 +450,8 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id):
# To ignore Script error
pg_sub_query.append("pe.message!='Script error.'")
pg_sub_query_chart = __get_basic_constraints(platform, time_constraint=False, chart=True, project_key=None)
# pg_sub_query_chart.append("source ='js_exception'")
if platform:
pg_sub_query_chart += ["start_ts>=%(startDate)s", "start_ts<%(endDate)s", "project_id=%(project_id)s"]
pg_sub_query_chart.append("errors.error_id =details.error_id")
statuses = []
error_ids = None
@ -537,7 +538,8 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id):
COUNT(session_id) AS count
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp
LEFT JOIN LATERAL (SELECT DISTINCT session_id
FROM events.errors
FROM events.errors
{"INNER JOIN public.sessions USING(session_id)" if platform else ""}
WHERE {" AND ".join(pg_sub_query_chart)}
) AS sessions ON (TRUE)
GROUP BY timestamp

View file

@ -19,11 +19,13 @@ class JIRAIntegration(integration_base.BaseIntegration):
self._user_id = user_id
self.integration = self.get()
if self.integration is None:
return
self.integration["valid"] = True
if not self.integration["url"].endswith('atlassian.net'):
self.integration["valid"] = False
@staticmethod
def __validate(data):
data["valid"] = JIRAIntegration.__is_valid_url(data["url"])
@staticmethod
def __is_valid_url(url):
return url.endswith('atlassian.net') or url.endswith('atlassian.net/')
@property
def provider(self):
@ -31,7 +33,7 @@ class JIRAIntegration(integration_base.BaseIntegration):
@property
def issue_handler(self):
if self.integration["url"].endswith('atlassian.net') and self._issue_handler is None:
if JIRAIntegration.__is_valid_url(self.integration["url"]) and self._issue_handler is None:
try:
self._issue_handler = JIRACloudIntegrationIssue(token=self.integration["token"],
username=self.integration["username"],
@ -55,9 +57,7 @@ class JIRAIntegration(integration_base.BaseIntegration):
if data is None:
return
data["valid"] = True
if not data["url"].endswith('atlassian.net'):
data["valid"] = False
JIRAIntegration.__validate(data)
return data
def get_obfuscated(self):
@ -81,16 +81,17 @@ class JIRAIntegration(integration_base.BaseIntegration):
**changes})
)
w = helper.dict_to_camel_case(cur.fetchone())
JIRAIntegration.__validate(w)
if obfuscate:
w["token"] = obfuscate_string(w["token"])
return self.get()
return w
# TODO: make this generic for all issue tracking integrations
def _add(self, data):
print("a pretty defined abstract method")
return
def add(self, username, token, url):
def add(self, username, token, url, obfuscate=False):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify("""\
@ -101,7 +102,11 @@ class JIRAIntegration(integration_base.BaseIntegration):
"token": token, "url": url})
)
w = helper.dict_to_camel_case(cur.fetchone())
return self.get()
JIRAIntegration.__validate(w)
if obfuscate:
w["token"] = obfuscate_string(w["token"])
return w
def delete(self):
with pg_client.PostgresClient() as cur:
@ -120,7 +125,7 @@ class JIRAIntegration(integration_base.BaseIntegration):
"username": data.username,
"token": data.token if len(data.token) > 0 and data.token.find("***") == -1 \
else self.integration.token,
"url": data.url
"url": str(data.url)
},
obfuscate=True
)
@ -128,5 +133,6 @@ class JIRAIntegration(integration_base.BaseIntegration):
return self.add(
username=data.username,
token=data.token,
url=str(data.url)
url=str(data.url),
obfuscate=True
)

View file

@ -282,14 +282,31 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
step_size = int(metrics_helper.__get_step_size(endTimestamp=data.endTimestamp, startTimestamp=data.startTimestamp,
density=density, factor=1, decimal=True))
extra_event = None
extra_conditions = None
if metric_of == schemas.MetricOfTable.visited_url:
extra_event = "events.pages"
extra_conditions = {}
for e in data.events:
if e.type == schemas.EventType.location:
if e.operator not in extra_conditions:
extra_conditions[e.operator] = schemas.SessionSearchEventSchema2.model_validate({
"type": e.type,
"isEvent": True,
"value": [],
"operator": e.operator,
"filters": []
})
for v in e.value:
if v not in extra_conditions[e.operator].value:
extra_conditions[e.operator].value.append(v)
extra_conditions = list(extra_conditions.values())
elif metric_of == schemas.MetricOfTable.issues and len(metric_value) > 0:
data.filters.append(schemas.SessionSearchFilterSchema(value=metric_value, type=schemas.FilterType.issue,
operator=schemas.SearchEventOperator._is))
full_args, query_part = search_query_parts(data=data, error_status=None, errors_only=False,
favorite_only=False, issue=None, project_id=project_id,
user_id=None, extra_event=extra_event)
user_id=None, extra_event=extra_event, extra_conditions=extra_conditions)
full_args["step_size"] = step_size
with pg_client.PostgresClient() as cur:
if isinstance(metric_of, schemas.MetricOfTable):
@ -400,7 +417,7 @@ def __is_valid_event(is_any: bool, event: schemas.SessionSearchEventSchema2):
# this function generates the query and return the generated-query with the dict of query arguments
def search_query_parts(data: schemas.SessionsSearchPayloadSchema, error_status, errors_only, favorite_only, issue,
project_id, user_id, platform="web", extra_event=None):
project_id, user_id, platform="web", extra_event=None, extra_conditions=None):
ss_constraints = []
full_args = {"project_id": project_id, "startDate": data.startTimestamp, "endDate": data.endTimestamp,
"projectId": project_id, "userId": user_id}
@ -1085,6 +1102,24 @@ def search_query_parts(data: schemas.SessionsSearchPayloadSchema, error_status,
extra_join += f"""INNER JOIN {extra_event} AS ev USING(session_id)"""
extra_constraints.append("ev.timestamp>=%(startDate)s")
extra_constraints.append("ev.timestamp<=%(endDate)s")
if extra_conditions and len(extra_conditions) > 0:
_extra_or_condition = []
for i, c in enumerate(extra_conditions):
if sh.isAny_opreator(c.operator):
continue
e_k = f"ec_value{i}"
op = sh.get_sql_operator(c.operator)
c.value = helper.values_for_operator(value=c.value, op=c.operator)
full_args = {**full_args,
**sh.multi_values(c.value, value_key=e_k)}
if c.type == events.EventType.LOCATION.ui_type:
_extra_or_condition.append(
sh.multi_conditions(f"ev.{events.EventType.LOCATION.column} {op} %({e_k})s",
c.value, value_key=e_k))
else:
logging.warning(f"unsupported extra_event type:${c.type}")
if len(_extra_or_condition) > 0:
extra_constraints.append("(" + " OR ".join(_extra_or_condition) + ")")
query_part = f"""\
FROM {f"({events_query_part}) AS f" if len(events_query_part) > 0 else "public.sessions AS s"}
{extra_join}

View file

@ -125,16 +125,15 @@ def edit(tenant_id, user_id, project_id, note_id, data: schemas.SessionUpdateNot
return {"errors": ["Note not found"]}
def delete(tenant_id, user_id, project_id, note_id):
def delete(project_id, note_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(""" UPDATE public.sessions_notes
SET deleted_at = timezone('utc'::text, now())
WHERE note_id = %(note_id)s
AND project_id = %(project_id)s
AND user_id = %(user_id)s
AND deleted_at ISNULL;""",
{"project_id": project_id, "user_id": user_id, "note_id": note_id})
{"project_id": project_id, "note_id": note_id})
)
return {"data": {"state": "success"}}

View file

@ -1,10 +1,7 @@
__author__ = "AZNAUROV David"
__maintainer__ = "KRAIEM Taha Yassine"
import logging
import schemas
from chalicelib.core import events, metadata, sessions
from chalicelib.core import events, metadata
from chalicelib.utils import sql_helper as sh
"""
@ -57,69 +54,61 @@ def get_stages_and_events(filter_d: schemas.CardSeriesFilterSchema, project_id)
op = sh.get_sql_operator(f.operator)
filter_type = f.type
# values[f_k] = sessions.__get_sql_value_multiple(f["value"])
f_k = f"f_value{i}"
values = {**values,
**sh.multi_values(helper.values_for_operator(value=f.value, op=f.operator),
value_key=f_k)}
**sh.multi_values(f.value, value_key=f_k)}
is_not = False
if sh.is_negation_operator(f.operator):
is_not = True
if filter_type == schemas.FilterType.user_browser:
# op = sessions.__get_sql_operator_multiple(f["operator"])
first_stage_extra_constraints.append(
sh.multi_conditions(f's.user_browser {op} %({f_k})s', f.value, value_key=f_k))
sh.multi_conditions(f's.user_browser {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
elif filter_type in [schemas.FilterType.user_os, schemas.FilterType.user_os_ios]:
# op = sessions.__get_sql_operator_multiple(f["operator"])
first_stage_extra_constraints.append(
sh.multi_conditions(f's.user_os {op} %({f_k})s', f.value, value_key=f_k))
sh.multi_conditions(f's.user_os {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
elif filter_type in [schemas.FilterType.user_device, schemas.FilterType.user_device_ios]:
# op = sessions.__get_sql_operator_multiple(f["operator"])
first_stage_extra_constraints.append(
sh.multi_conditions(f's.user_device {op} %({f_k})s', f.value, value_key=f_k))
sh.multi_conditions(f's.user_device {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
elif filter_type in [schemas.FilterType.user_country, schemas.FilterType.user_country_ios]:
# op = sessions.__get_sql_operator_multiple(f["operator"])
first_stage_extra_constraints.append(
sh.multi_conditions(f's.user_country {op} %({f_k})s', f.value, value_key=f_k))
sh.multi_conditions(f's.user_country {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
elif filter_type == schemas.FilterType.duration:
if len(f.value) > 0 and f.value[0] is not None:
first_stage_extra_constraints.append(f's.duration >= %(minDuration)s')
values["minDuration"] = f.value[0]
if len(f["value"]) > 1 and f.value[1] is not None and int(f.value[1]) > 0:
if len(f.value) > 1 and f.value[1] is not None and int(f.value[1]) > 0:
first_stage_extra_constraints.append('s.duration <= %(maxDuration)s')
values["maxDuration"] = f.value[1]
elif filter_type == schemas.FilterType.referrer:
# events_query_part = events_query_part + f"INNER JOIN events.pages AS p USING(session_id)"
filter_extra_from = [f"INNER JOIN {events.EventType.LOCATION.table} AS p USING(session_id)"]
# op = sessions.__get_sql_operator_multiple(f["operator"])
first_stage_extra_constraints.append(
sh.multi_conditions(f"p.base_referrer {op} %({f_k})s", f.value, value_key=f_k))
sh.multi_conditions(f"p.base_referrer {op} %({f_k})s", f.value, is_not=is_not, value_key=f_k))
elif filter_type == events.EventType.METADATA.ui_type:
if meta_keys is None:
meta_keys = metadata.get(project_id=project_id)
meta_keys = {m["key"]: m["index"] for m in meta_keys}
# op = sessions.__get_sql_operator(f["operator"])
if f.source in meta_keys.keys():
first_stage_extra_constraints.append(
sh.multi_conditions(
f's.{metadata.index_to_colname(meta_keys[f.source])} {op} %({f_k})s', f.value,
value_key=f_k))
is_not=is_not, value_key=f_k))
# values[f_k] = helper.string_to_sql_like_with_op(f["value"][0], op)
elif filter_type in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
# op = sessions.__get_sql_operator(f["operator"])
first_stage_extra_constraints.append(
sh.multi_conditions(f's.user_id {op} %({f_k})s', f.value, value_key=f_k))
sh.multi_conditions(f's.user_id {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
# values[f_k] = helper.string_to_sql_like_with_op(f["value"][0], op)
elif filter_type in [schemas.FilterType.user_anonymous_id,
schemas.FilterType.user_anonymous_id_ios]:
# op = sessions.__get_sql_operator(f["operator"])
first_stage_extra_constraints.append(
sh.multi_conditions(f's.user_anonymous_id {op} %({f_k})s', f.value, value_key=f_k))
sh.multi_conditions(f's.user_anonymous_id {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
# values[f_k] = helper.string_to_sql_like_with_op(f["value"][0], op)
elif filter_type in [schemas.FilterType.rev_id, schemas.FilterType.rev_id_ios]:
# op = sessions.__get_sql_operator(f["operator"])
first_stage_extra_constraints.append(
sh.multi_conditions(f's.rev_id {op} %({f_k})s', f.value, value_key=f_k))
sh.multi_conditions(f's.rev_id {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
# values[f_k] = helper.string_to_sql_like_with_op(f["value"][0], op)
i = -1
for s in stages:
@ -553,35 +542,11 @@ def get_issues(stages, rows, first_stage=None, last_stage=None, drop_only=False)
def get_top_insights(filter_d: schemas.CardSeriesFilterSchema, project_id):
output = []
stages = filter_d.events
# TODO: handle 1 stage alone
if len(stages) == 0:
logging.debug("no stages found")
return output, 0
elif len(stages) == 1:
# TODO: count sessions, and users for single stage
output = [{
"type": stages[0].type,
"value": stages[0].value,
"dropPercentage": None,
"operator": stages[0].operator,
"sessionsCount": 0,
"dropPct": 0,
"usersCount": 0,
"dropDueToIssues": 0
}]
# original
# counts = sessions.search_sessions(data=schemas.SessionsSearchCountSchema.parse_obj(filter_d),
# project_id=project_id, user_id=None, count_only=True)
# first change
# counts = sessions.search_sessions(data=schemas.FlatSessionsSearchPayloadSchema.parse_obj(filter_d),
# project_id=project_id, user_id=None, count_only=True)
# last change
counts = sessions.search_sessions(data=schemas.SessionsSearchPayloadSchema.model_validate(filter_d),
project_id=project_id, user_id=None, count_only=True)
output[0]["sessionsCount"] = counts["countSessions"]
output[0]["usersCount"] = counts["countUsers"]
return output, 0
# The result of the multi-stage query
rows = get_stages_and_events(filter_d=filter_d, project_id=project_id)
if len(rows) == 0:

View file

@ -249,7 +249,8 @@ def get_issue_title(issue_type):
'custom': "Custom Event",
'js_exception': "Error",
'custom_event_error': "Custom Error",
'js_error': "Error"}.get(issue_type, issue_type)
'js_error': "Error",
"mouse_thrashing": "Mouse Thrashing"}.get(issue_type, issue_type)
def __progress(old_val, new_val):

View file

@ -29,7 +29,7 @@ js_cache_bucket=
jwt_algorithm=HS512
JWT_EXPIRATION=6000
JWT_ISSUER=openReplay-dev
JWT_REFRESH_EXPIRATION=60
JWT_REFRESH_EXPIRATION=604800
JWT_REFRESH_SECRET=SECRET2
jwt_secret=SECRET
LOCAL_DEV=true

View file

@ -481,8 +481,7 @@ def edit_note(projectId: int, noteId: int, data: schemas.SessionUpdateNoteSchema
@app.delete('/{projectId}/notes/{noteId}', tags=["sessions", "notes"])
def delete_note(projectId: int, noteId: int, _=Body(None), context: schemas.CurrentContext = Depends(OR_context)):
data = sessions_notes.delete(tenant_id=context.tenant_id, project_id=projectId, user_id=context.user_id,
note_id=noteId)
data = sessions_notes.delete(project_id=projectId, note_id=noteId)
return data

View file

@ -658,6 +658,18 @@ class SessionSearchFilterSchema(BaseModel):
_transform = model_validator(mode='before')(transform_old_filter_type)
_single_to_list_values = field_validator('value', mode='before')(single_to_list)
@model_validator(mode='before')
def _transform_data(cls, values):
if values.get("source") is not None:
if isinstance(values["source"], list):
if len(values["source"]) == 0:
values["source"] = None
elif len(values["source"]) == 1:
values["source"] = values["source"][0]
else:
raise ValueError(f"Unsupported multi-values source")
return values
@model_validator(mode='after')
def filter_validator(cls, values):
if values.type == FilterType.metadata:
@ -772,9 +784,12 @@ class SessionsSearchPayloadSchema(_TimedSchema, _PaginatedSchema):
@field_validator("filters", mode="after")
def merge_identical_filters(cls, values):
# ignore 'issue' type as it could be used for step-filters and tab-filters at the same time
i = 0
while i < len(values):
if values[i].is_event:
if values[i].is_event or values[i].type == FilterType.issue:
if values[i].type == FilterType.issue:
values[i] = remove_duplicate_values(values[i])
i += 1
continue
j = i + 1

View file

@ -9,19 +9,20 @@
# Helper function
exit_err() {
err_code=$1
if [[ $err_code != 0 ]]; then
exit "$err_code"
fi
err_code=$1
if [[ $err_code != 0 ]]; then
exit "$err_code"
fi
}
source ../scripts/lib/_docker.sh
app="assist-stats" # Set the app variable to "chalice"
app="assist-stats" # Set the app variable to "chalice"
environment=$1
git_sha=$(git rev-parse --short HEAD)
image_tag=${IMAGE_TAG:-git_sha}
envarg="default-foss"
chart="$app" # Use the app variable here
chart="$app" # Use the app variable here
check_prereq() {
which docker || {
echo "Docker not installed, please install docker."
@ -32,31 +33,31 @@ check_prereq() {
[[ $1 == ee ]] && ee=true
[[ $PATCH -eq 1 ]] && {
image_tag="$(grep -ER ^.ppVersion ../scripts/helmcharts/openreplay/charts/$chart | xargs | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
[[ $ee == "true" ]] && {
image_tag="${image_tag}-ee"
}
image_tag="$(grep -ER ^.ppVersion ../scripts/helmcharts/openreplay/charts/$chart | xargs | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
[[ $ee == "true" ]] && {
image_tag="${image_tag}-ee"
}
}
update_helm_release() {
[[ $ee == "true" ]] && return
HELM_TAG="$(grep -iER ^version ../scripts/helmcharts/openreplay/charts/$chart | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
# Update the chart version
sed -i "s#^version.*#version: $HELM_TAG# g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
# Update image tags
sed -i "s#ppVersion.*#ppVersion: \"$image_tag\"#g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
# Commit the changes
git add ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
git commit -m "chore(helm): Updating $chart image release"
[[ $ee == "true" ]] && return
HELM_TAG="$(grep -iER ^version ../scripts/helmcharts/openreplay/charts/$chart | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
# Update the chart version
sed -i "s#^version.*#version: $HELM_TAG# g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
# Update image tags
sed -i "s#ppVersion.*#ppVersion: \"$image_tag\"#g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
# Commit the changes
git add ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
git commit -m "chore(helm): Updating $chart image release"
}
function build_api(){
function build_api() {
destination="_assist_stats"
[[ $1 == "ee" ]] && {
destination="_assist_stats_ee"
}
[[ -d ../${destination} ]] && {
echo "Removing previous build cache"
rm -rf ../${destination}
echo "Removing previous build cache"
rm -rf ../${destination}
}
cp -R ../assist-stats ../${destination}
cd ../${destination} || exit_err 100
@ -86,5 +87,5 @@ check_prereq
build_api $environment
echo buil_complete
if [[ $PATCH -eq 1 ]]; then
update_helm_release
update_helm_release
fi

View file

@ -1,5 +1,5 @@
#ARCH can be amd64 or arm64
ARG ARCH=amd64
ARG ARCH
FROM --platform=linux/$ARCH node:20-alpine
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
@ -22,4 +22,4 @@ USER 1001
ADD --chown=1001 https://static.openreplay.com/geoip/GeoLite2-City.mmdb $MAXMINDDB_FILE
ENTRYPOINT ["/sbin/tini", "--"]
CMD npm start
CMD npm start

View file

@ -8,40 +8,43 @@
git_sha=$(git rev-parse --short HEAD)
image_tag=${IMAGE_TAG:-git_sha}
ARCH=${ARCH:-'amd64'}
check_prereq() {
which docker || {
echo "Docker not installed, please install docker."
exit 1
}
}
source ../scripts/lib/_docker.sh
[[ $1 == ee ]] && ee=true
[[ $PATCH -eq 1 ]] && {
image_tag="$(grep -ER ^.ppVersion ../scripts/helmcharts/openreplay/charts/$chart | xargs | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
[[ $ee == "true" ]] && {
image_tag="${image_tag}-ee"
}
image_tag="$(grep -ER ^.ppVersion ../scripts/helmcharts/openreplay/charts/$chart | xargs | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
[[ $ee == "true" ]] && {
image_tag="${image_tag}-ee"
}
}
update_helm_release() {
chart=$1
HELM_TAG="$(grep -iER ^version ../scripts/helmcharts/openreplay/charts/$chart | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
# Update the chart version
sed -i "s#^version.*#version: $HELM_TAG# g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
# Update image tags
sed -i "s#ppVersion.*#ppVersion: \"$image_tag\"#g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
# Commit the changes
git add ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
git commit -m "chore(helm): Updating $chart image release"
chart=$1
HELM_TAG="$(grep -iER ^version ../scripts/helmcharts/openreplay/charts/$chart | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
# Update the chart version
sed -i "s#^version.*#version: $HELM_TAG# g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
# Update image tags
sed -i "s#ppVersion.*#ppVersion: \"$image_tag\"#g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
# Commit the changes
git add ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
git commit -m "chore(helm): Updating $chart image release"
}
function build_api(){
function build_api() {
destination="_assist"
[[ $1 == "ee" ]] && {
destination="_assist_ee"
}
[[ -d ../${destination} ]] && {
echo "Removing previous build cache"
rm -rf ../${destination}
echo "Removing previous build cache"
rm -rf ../${destination}
}
cp -R ../assist ../${destination}
cd ../${destination}
@ -50,7 +53,7 @@ function build_api(){
[[ $1 == "ee" ]] && {
cp -rf ../ee/assist/* ./
}
docker build -f ./Dockerfile --build-arg GIT_SHA=$git_sha -t ${DOCKER_REPO:-'local'}/assist:${image_tag} .
docker build -f ./Dockerfile --platform linux/${ARCH} --build-arg ARCH=$ARCH --build-arg GIT_SHA=$git_sha -t ${DOCKER_REPO:-'local'}/assist:${image_tag} .
cd ../assist
rm -rf ../${destination}
@ -68,5 +71,5 @@ function build_api(){
check_prereq
build_api $1
if [[ $PATCH -eq 1 ]]; then
update_helm_release assist
update_helm_release assist
fi

View file

@ -40,4 +40,4 @@ module.exports = {
socketsLiveByProject,
socketsLiveBySession
}
};
};

View file

@ -20,7 +20,7 @@ const extractTabId = (peerId) => {
const extractPeerId = (peerId) => {
let splited = peerId.split("-");
if (splited.length < 2 || splited.length > 3) {
debug && console.error(`cannot split peerId: ${peerId}`);
// debug && console.error(`cannot split peerId: ${peerId}`);
return {};
}
if (PROJECT_KEY_LENGTH > 0 && splited[0].length !== PROJECT_KEY_LENGTH) {
@ -64,38 +64,43 @@ const extractSessionIdFromRequest = function (req) {
}
const isValidSession = function (sessionInfo, filters) {
let foundAll = true;
for (const [key, body] of Object.entries(filters)) {
const result = {matched: false, filters: {}};
for (const [filterName, body] of Object.entries(filters)) { // range by filter names (key)
if (body.values === undefined || body.values === null) {
return result;
}
let found = false;
if (body.values !== undefined && body.values !== null) {
for (const [skey, svalue] of Object.entries(sessionInfo)) {
if (svalue !== undefined && svalue !== null) {
if (typeof (svalue) === "object") {
if (isValidSession(svalue, {[key]: body})) {
found = true;
break;
}
} else if (skey.toLowerCase() === key.toLowerCase()) {
for (let v of body["values"]) {
if (body.operator === "is" && v && String(svalue).toLowerCase() === String(v).toLowerCase()
|| body.operator !== "is" && String(svalue).toLowerCase().indexOf(String(v).toLowerCase()) >= 0) {
found = true;
break;
}
}
if (found) {
break;
}
for (const [sessKey, sessValue] of Object.entries(sessionInfo)) {
if (sessValue === undefined || sessValue === null) {
continue;
}
if (typeof (sessValue) === "object") {
const partRes = isValidSession(sessValue, {[filterName]: body})
if (partRes.matched) {
found = true;
Object.assign(result.filters, partRes.filters);
break;
}
} else if (sessKey.toLowerCase() === filterName.toLowerCase()) {
for (let v of body.values) {
if (body.operator === "is" && v && String(sessValue).toLowerCase() === String(v).toLowerCase()
|| body.operator !== "is" && String(sessValue).toLowerCase().indexOf(String(v).toLowerCase()) >= 0) {
found = true;
result.filters[filterName] = v;
break;
}
}
if (found) {
break;
}
}
}
foundAll = foundAll && found;
if (!found) {
break;
return result;
}
}
return foundAll;
result.matched = true;
return result;
}
const getValidAttributes = function (sessionInfo, query) {
@ -209,7 +214,7 @@ const getValue = function (obj, key) {
return undefined;
}
const sortPaginate = function (list, filters) {
const sortPaginate = function (list, filters, counter) {
if (typeof (list) === "object" && !Array.isArray(list)) {
for (const [key, value] of Object.entries(list)) {
list[key] = sortPaginate(value, filters);
@ -239,7 +244,7 @@ const sortPaginate = function (list, filters) {
list = list.slice((filters.pagination.page - 1) * filters.pagination.limit,
filters.pagination.page * filters.pagination.limit);
}
return {"total": total, "sessions": list};
return {"total": total, "sessions": list, "counter": counter};
}
const uniqueAutocomplete = function (list) {

View file

@ -15,16 +15,13 @@ const {
RecordRequestDuration,
IncreaseTotalRequests
} = require('../utils/metrics');
const {
GetRoomInfo,
GetRooms,
GetSessions,
} = require('../utils/rooms');
const {fetchSockets} = require("./wsServer");
const {IDENTITIES} = require("./assistHelper");
const debug_log = process.env.debug === "1";
const respond = function (req, res, data) {
console.log("responding with data: ", data)
console.log("responding with data: ", JSON.stringify(data))
let result = {data}
if (process.env.uws !== "true") {
res.statusCode = 200;
@ -38,37 +35,77 @@ const respond = function (req, res, data) {
RecordRequestDuration(req.method.toLowerCase(), res.handlerName, 200, duration/1000.0);
}
const getParticularSession = function (sessionId, filters) {
const sessInfo = GetRoomInfo(sessionId);
const getParticularSession = async function (roomId, filters) {
let connected_sockets = await fetchSockets(roomId);
if (connected_sockets.length === 0) {
return null;
}
let sessInfo;
for (let item of connected_sockets) {
if (item.handshake.query.identity === IDENTITIES.session && item.handshake.query.sessionInfo) {
sessInfo = item.handshake.query.sessionInfo;
break;
}
}
if (!sessInfo) {
return null;
}
if (!hasFilters(filters)) {
return sessInfo;
}
if (isValidSession(sessInfo, filters.filter)) {
const result = isValidSession(sessInfo, filters.filter)
if (result.matched) {
return sessInfo;
}
return null;
}
const getAllSessions = function (projectKey, filters, onlineOnly= false) {
const getAllSessions = async function (projectKey, filters, counters, onlineOnly= false) {
const sessions = [];
const allRooms = onlineOnly ? GetSessions(projectKey) : GetRooms(projectKey);
const connected_sockets = await fetchSockets();
if (connected_sockets.length === 0) {
return sessions;
}
for (let sessionId of allRooms) {
let sessInfo = GetRoomInfo(sessionId);
if (!sessInfo) {
const rooms = new Map();
for (let item of connected_sockets) {
// Prefilter checks
if (rooms.has(item.handshake.query.roomId)) {
continue;
}
if (item.handshake.query.projectKey !== projectKey || !item.handshake.query.sessionInfo) {
continue;
}
if (onlineOnly && item.handshake.query.identity !== IDENTITIES.session) {
continue;
}
// Mark this room as visited
rooms.set(item.handshake.query.roomId, true);
// Add session to the list without filtering
if (!hasFilters(filters)) {
sessions.push(sessInfo);
sessions.push(item.handshake.query.sessionInfo);
continue;
}
if (isValidSession(sessInfo, filters.filter)) {
sessions.push(sessInfo);
// Add session to the list if it passes the filter
const result = isValidSession(item.handshake.query.sessionInfo, filters.filter)
if (result.matched) {
sessions.push(item.handshake.query.sessionInfo);
// Add filter name/value to counter
for (const [filterName, filterValue] of Object.entries(result.filters)) {
if (counters[filterName] === undefined) {
counters[filterName] = {};
}
if (counters[filterName][filterValue] === undefined) {
counters[filterName][filterValue] = 0;
}
counters[filterName][filterValue] += 1;
}
}
}
return sessions
}
@ -83,14 +120,16 @@ const socketsListByProject = async function (req, res) {
// find a particular session
if (_sessionId) {
return respond(req, res, getParticularSession(_sessionId, filters));
const sessInfo = await getParticularSession(`${_projectKey}-${_sessionId}`, filters);
return respond(req, res, sessInfo);
}
// find all sessions for a project
const sessions = getAllSessions(_projectKey, filters);
const counters = {};
const sessions = await getAllSessions(_projectKey, filters, counters);
// send response
respond(req, res, sortPaginate(sessions, filters));
respond(req, res, sortPaginate(sessions, filters, counters));
}
// Sort by projectKey
@ -104,14 +143,16 @@ const socketsLiveByProject = async function (req, res) {
// find a particular session
if (_sessionId) {
return respond(req, res, getParticularSession(_sessionId, filters));
let sessInfo = await getParticularSession(`${_projectKey}-${_sessionId}`, filters);
return respond(req, res, sessInfo);
}
// find all sessions for a project
const sessions = getAllSessions(_projectKey, filters, true);
const counters = {};
const sessions = await getAllSessions(_projectKey, filters, counters, true);
// send response
respond(req, res, sortPaginate(sessions, filters));
respond(req, res, sortPaginate(sessions, filters, counters));
}
// Sort by roomID (projectKey+sessionId)
@ -119,12 +160,14 @@ const socketsLiveBySession = async function (req, res) {
debug_log && console.log("[WS]looking for LIVE session");
res.handlerName = 'socketsLiveBySession';
const _projectKey = extractProjectKeyFromRequest(req);
const _sessionId = extractSessionIdFromRequest(req);
const filters = await extractPayloadFromRequest(req, res);
// find a particular session
if (_sessionId) {
return respond(req, res, getParticularSession(_sessionId, filters));
let sessInfo = await getParticularSession(`${_projectKey}-${_sessionId}`, filters);
return respond(req, res, sessInfo);
}
return respond(req, res, null);
}
@ -140,14 +183,27 @@ const autocomplete = async function (req, res) {
if (!hasQuery(filters)) {
return respond(req, res, results);
}
let allSessions = GetSessions(_projectKey);
for (let sessionId of allSessions) {
let sessInfo = GetRoomInfo(sessionId);
if (!sessInfo) {
let connected_sockets = await fetchSockets();
if (connected_sockets.length === 0) {
return results;
}
const rooms = new Map();
for (let item of connected_sockets) {
if (rooms.has(item.handshake.query.roomId)) {
continue;
}
results = [...results, ...getValidAttributes(sessInfo, filters.query)];
if (item.handshake.query.sessionInfo) {
if ((item.handshake.query.projectKey !== _projectKey) || (item.handshake.query.identity !== IDENTITIES.session)) {
continue;
}
// Mark this room as visited
rooms.set(item.handshake.query.roomId, true);
results.push(...getValidAttributes(item.handshake.query.sessionInfo, filters.query))
}
}
respond(req, res, uniqueAutocomplete(results));
}

View file

@ -1,49 +0,0 @@
const roomsInfo = new Map(); // sessionID -> sessionInfo
const projectSessions = new Map(); // projectKey -> Set(sessionIDs) // all rooms (even with agent only)
const projectRooms = new Map(); // projectKey -> Set(roomIDs) // online rooms
function AddRoom(projKey, sessID, sessInfo) {
roomsInfo.set(sessID, sessInfo);
if (!projectRooms.has(projKey)) {
projectRooms.set(projKey, new Set());
}
projectRooms.get(projKey).add(sessID);
if (!projectSessions.has(projKey)) {
projectSessions.set(projKey, new Set());
}
projectSessions.get(projKey).add(sessID);
}
function UpdateRoom(sessID, sessInfo) {
roomsInfo.set(sessID, sessInfo);
}
function DeleteSession(projKey, sessID) {
projectSessions.get(projKey)?.delete(sessID);
}
function DeleteRoom(projKey, sessID) {
projectRooms.get(projKey)?.delete(sessID);
}
function GetRoomInfo(sessID) {
return roomsInfo.get(sessID);
}
function GetRooms(projectKey) {
return projectRooms.get(projectKey) || new Set();
}
function GetSessions(projectKey) {
return projectSessions.get(projectKey) || new Set();
}
module.exports = {
AddRoom,
UpdateRoom,
DeleteRoom,
DeleteSession,
GetRoomInfo,
GetRooms,
GetSessions,
}

View file

@ -23,12 +23,6 @@ const {
IncreaseOnlineRooms,
DecreaseOnlineRooms,
} = require('../utils/metrics');
const {
AddRoom,
UpdateRoom,
DeleteRoom,
DeleteSession,
} = require('../utils/rooms');
const debug_log = process.env.debug === "1";
const error_log = process.env.ERROR === "1";
@ -36,12 +30,12 @@ const error_log = process.env.ERROR === "1";
const findSessionSocketId = async (io, roomId, tabId) => {
let pickFirstSession = tabId === undefined;
const connected_sockets = await io.in(roomId).fetchSockets();
for (let item of connected_sockets) {
if (item.handshake.query.identity === IDENTITIES.session) {
for (let socket of connected_sockets) {
if (socket.handshake.query.identity === IDENTITIES.session) {
if (pickFirstSession) {
return item.id;
} else if (item.tabId === tabId) {
return item.id;
return socket.id;
} else if (socket.handshake.query.tabId === tabId) {
return socket.id;
}
}
}
@ -52,13 +46,13 @@ async function getRoomData(io, roomID) {
let tabsCount = 0, agentsCount = 0, tabIDs = [], agentIDs = [];
const connected_sockets = await io.in(roomID).fetchSockets();
if (connected_sockets.length > 0) {
for (let sock of connected_sockets) {
if (sock.handshake.query.identity === IDENTITIES.session) {
for (let socket of connected_sockets) {
if (socket.handshake.query.identity === IDENTITIES.session) {
tabsCount++;
tabIDs.push(sock.tabId);
tabIDs.push(socket.handshake.query.tabId);
} else {
agentsCount++;
agentIDs.push(sock.id);
agentIDs.push(socket.id);
}
}
} else {
@ -70,32 +64,29 @@ async function getRoomData(io, roomID) {
function processNewSocket(socket) {
socket._connectedAt = new Date();
socket.identity = socket.handshake.query.identity;
socket.peerId = socket.handshake.query.peerId;
let {projectKey: connProjectKey, sessionId: connSessionId, tabId: connTabId} = extractPeerId(socket.peerId);
socket.roomId = `${connProjectKey}-${connSessionId}`;
socket.projectId = socket.handshake.query.projectId;
socket.projectKey = connProjectKey;
socket.sessId = connSessionId;
socket.tabId = connTabId;
debug_log && console.log(`connProjectKey:${connProjectKey}, connSessionId:${connSessionId}, connTabId:${connTabId}, roomId:${socket.roomId}`);
let {projectKey: connProjectKey, sessionId: connSessionId, tabId: connTabId} = extractPeerId(socket.handshake.query.peerId);
socket.handshake.query.roomId = `${connProjectKey}-${connSessionId}`;
socket.handshake.query.projectKey = connProjectKey;
socket.handshake.query.sessId = connSessionId;
socket.handshake.query.tabId = connTabId;
debug_log && console.log(`connProjectKey:${connProjectKey}, connSessionId:${connSessionId}, connTabId:${connTabId}, roomId:${socket.handshake.query.roomId}`);
}
async function onConnect(socket) {
debug_log && console.log(`WS started:${socket.id}, Query:${JSON.stringify(socket.handshake.query)}`);
processNewSocket(socket);
IncreaseTotalWSConnections(socket.identity);
IncreaseOnlineConnections(socket.identity);
IncreaseTotalWSConnections(socket.handshake.query.identity);
IncreaseOnlineConnections(socket.handshake.query.identity);
const io = getServer();
const {tabsCount, agentsCount, tabIDs, agentIDs} = await getRoomData(io, socket.roomId);
const {tabsCount, agentsCount, tabIDs, agentIDs} = await getRoomData(io, socket.handshake.query.roomId);
if (socket.identity === IDENTITIES.session) {
if (socket.handshake.query.identity === IDENTITIES.session) {
// Check if session with the same tabID already connected, if so, refuse new connexion
if (tabsCount > 0) {
for (let tab of tabIDs) {
if (tab === socket.tabId) {
error_log && console.log(`session already connected, refusing new connexion, peerId: ${socket.peerId}`);
if (tab === socket.handshake.query.tabId) {
error_log && console.log(`session already connected, refusing new connexion, peerId: ${socket.handshake.query.peerId}`);
io.to(socket.id).emit(EVENTS_DEFINITION.emit.SESSION_ALREADY_CONNECTED);
return socket.disconnect();
}
@ -106,35 +97,34 @@ async function onConnect(socket) {
// New session creates new room
IncreaseTotalRooms();
IncreaseOnlineRooms();
AddRoom(socket.projectKey, socket.sessId, socket.handshake.query.sessionInfo);
}
// Inform all connected agents about reconnected session
if (agentsCount > 0) {
debug_log && console.log(`notifying new session about agent-existence`);
io.to(socket.id).emit(EVENTS_DEFINITION.emit.AGENTS_CONNECTED, agentIDs);
socket.to(socket.roomId).emit(EVENTS_DEFINITION.emit.SESSION_RECONNECTED, socket.id);
socket.to(socket.handshake.query.roomId).emit(EVENTS_DEFINITION.emit.SESSION_RECONNECTED, socket.id);
}
} else if (tabsCount <= 0) {
debug_log && console.log(`notifying new agent about no SESSIONS with peerId:${socket.peerId}`);
debug_log && console.log(`notifying new agent about no SESSIONS with peerId:${socket.handshake.query.peerId}`);
io.to(socket.id).emit(EVENTS_DEFINITION.emit.NO_SESSIONS);
}
await socket.join(socket.roomId);
await socket.join(socket.handshake.query.roomId);
if (debug_log) {
let connectedSockets = await io.in(socket.roomId).fetchSockets();
let connectedSockets = await io.in(socket.handshake.query.roomId).fetchSockets();
if (connectedSockets.length > 0) {
console.log(`${socket.id} joined room:${socket.roomId}, as:${socket.identity}, members:${connectedSockets.length}`);
console.log(`${socket.id} joined room:${socket.handshake.query.roomId}, as:${socket.handshake.query.identity}, members:${connectedSockets.length}`);
}
}
if (socket.identity === IDENTITIES.agent) {
if (socket.handshake.query.identity === IDENTITIES.agent) {
if (socket.handshake.query.agentInfo !== undefined) {
socket.handshake.query.agentInfo = JSON.parse(socket.handshake.query.agentInfo);
socket.agentID = socket.handshake.query.agentInfo.id;
socket.handshake.query.agentID = socket.handshake.query.agentInfo.id;
// Stats
startAssist(socket, socket.agentID);
startAssist(socket, socket.handshake.query.agentID);
}
socket.to(socket.roomId).emit(EVENTS_DEFINITION.emit.NEW_AGENT, socket.id, socket.handshake.query.agentInfo);
socket.to(socket.handshake.query.roomId).emit(EVENTS_DEFINITION.emit.NEW_AGENT, socket.id, socket.handshake.query.agentInfo);
}
// Set disconnect handler
@ -153,50 +143,46 @@ async function onConnect(socket) {
}
async function onDisconnect(socket) {
DecreaseOnlineConnections(socket.identity);
debug_log && console.log(`${socket.id} disconnected from ${socket.roomId}`);
DecreaseOnlineConnections(socket.handshake.query.identity);
debug_log && console.log(`${socket.id} disconnected from ${socket.handshake.query.roomId}`);
if (socket.identity === IDENTITIES.agent) {
socket.to(socket.roomId).emit(EVENTS_DEFINITION.emit.AGENT_DISCONNECT, socket.id);
if (socket.handshake.query.identity === IDENTITIES.agent) {
socket.to(socket.handshake.query.roomId).emit(EVENTS_DEFINITION.emit.AGENT_DISCONNECT, socket.id);
// Stats
endAssist(socket, socket.agentID);
endAssist(socket, socket.handshake.query.agentID);
}
debug_log && console.log("checking for number of connected agents and sessions");
const io = getServer();
let {tabsCount, agentsCount, tabIDs, agentIDs} = await getRoomData(io, socket.roomId);
let {tabsCount, agentsCount, tabIDs, agentIDs} = await getRoomData(io, socket.handshake.query.roomId);
if (tabsCount === -1 && agentsCount === -1) {
DecreaseOnlineRooms();
debug_log && console.log(`room not found: ${socket.roomId}`);
DeleteSession(socket.projectKey, socket.sessId);
DeleteRoom(socket.projectKey, socket.sessId);
debug_log && console.log(`room not found: ${socket.handshake.query.roomId}`);
return;
}
if (tabsCount === 0) {
debug_log && console.log(`notifying everyone in ${socket.roomId} about no SESSIONS`);
socket.to(socket.roomId).emit(EVENTS_DEFINITION.emit.NO_SESSIONS);
DeleteSession(socket.projectKey, socket.sessId);
debug_log && console.log(`notifying everyone in ${socket.handshake.query.roomId} about no SESSIONS`);
socket.to(socket.handshake.query.roomId).emit(EVENTS_DEFINITION.emit.NO_SESSIONS);
}
if (agentsCount === 0) {
debug_log && console.log(`notifying everyone in ${socket.roomId} about no AGENTS`);
socket.to(socket.roomId).emit(EVENTS_DEFINITION.emit.NO_AGENTS);
debug_log && console.log(`notifying everyone in ${socket.handshake.query.roomId} about no AGENTS`);
socket.to(socket.handshake.query.roomId).emit(EVENTS_DEFINITION.emit.NO_AGENTS);
}
}
async function onUpdateEvent(socket, ...args) {
debug_log && console.log(`${socket.id} sent update event.`);
if (socket.identity !== IDENTITIES.session) {
if (socket.handshake.query.identity !== IDENTITIES.session) {
debug_log && console.log('Ignoring update event.');
return
}
args[0] = updateSessionData(socket, args[0])
Object.assign(socket.handshake.query.sessionInfo, args[0].data, {tabId: args[0]?.meta?.tabId});
UpdateRoom(socket.sessId, socket.handshake.query.sessionInfo);
// Update sessionInfo for all agents in the room
const io = getServer();
const connected_sockets = await io.in(socket.roomId).fetchSockets();
const connected_sockets = await io.in(socket.handshake.query.roomId).fetchSockets();
for (let item of connected_sockets) {
if (item.handshake.query.identity === IDENTITIES.session && item.handshake.query.sessionInfo) {
Object.assign(item.handshake.query.sessionInfo, args[0]?.data, {tabId: args[0]?.meta?.tabId});
@ -212,17 +198,17 @@ async function onAny(socket, eventName, ...args) {
return
}
args[0] = updateSessionData(socket, args[0])
if (socket.identity === IDENTITIES.session) {
debug_log && console.log(`received event:${eventName}, from:${socket.identity}, sending message to room:${socket.roomId}`);
socket.to(socket.roomId).emit(eventName, args[0]);
if (socket.handshake.query.identity === IDENTITIES.session) {
debug_log && console.log(`received event:${eventName}, from:${socket.handshake.query.identity}, sending message to room:${socket.handshake.query.roomId}`);
socket.to(socket.handshake.query.roomId).emit(eventName, args[0]);
} else {
// Stats
handleEvent(eventName, socket, args[0]);
debug_log && console.log(`received event:${eventName}, from:${socket.identity}, sending message to session of room:${socket.roomId}`);
debug_log && console.log(`received event:${eventName}, from:${socket.handshake.query.identity}, sending message to session of room:${socket.handshake.query.roomId}`);
const io = getServer();
let socketId = await findSessionSocketId(io, socket.roomId, args[0]?.meta?.tabId);
let socketId = await findSessionSocketId(io, socket.handshake.query.roomId, args[0]?.meta?.tabId);
if (socketId === null) {
debug_log && console.log(`session not found for:${socket.roomId}`);
debug_log && console.log(`session not found for:${socket.handshake.query.roomId}`);
io.to(socket.id).emit(EVENTS_DEFINITION.emit.NO_SESSIONS);
} else {
debug_log && console.log("message sent");
@ -233,8 +219,8 @@ async function onAny(socket, eventName, ...args) {
// Back compatibility (add top layer with meta information)
function updateSessionData(socket, sessionData) {
if (sessionData?.meta === undefined && socket.identity === IDENTITIES.session) {
sessionData = {meta: {tabId: socket.tabId, version: 1}, data: sessionData};
if (sessionData?.meta === undefined && socket.handshake.query.identity === IDENTITIES.session) {
sessionData = {meta: {tabId: socket.handshake.query.tabId, version: 1}, data: sessionData};
}
return sessionData
}

View file

@ -7,6 +7,16 @@ const getServer = function () {
return io;
}
const fetchSockets = async function (roomID) {
if (!io) {
return [];
}
if (!roomID) {
return await io.fetchSockets();
}
return await io.in(roomID).fetchSockets();
}
const createSocketIOServer = function (server, prefix) {
if (io) {
return io;
@ -26,4 +36,5 @@ const createSocketIOServer = function (server, prefix) {
module.exports = {
createSocketIOServer,
getServer,
fetchSockets,
}

View file

@ -1,15 +1,8 @@
# GSSAPI = true to enable Kerberos auth for Kafka and manually build librdkafka with GSSAPI support
ARG GSSAPI=false
#ARCH can be amd64 or arm64
ARG ARCH=amd64
FROM --platform=linux/$ARCH golang:1.21-alpine3.18 AS build
RUN if [ "$GSSAPI" = "true" ]; then \
apk add --no-cache git openssh openssl-dev pkgconf gcc g++ make libc-dev bash librdkafka-dev cyrus-sasl cyrus-sasl-gssapiv2 krb5; \
else \
apk add --no-cache gcc g++ make libc-dev; \
fi
RUN apk add --no-cache --repository=https://dl-cdn.alpinelinux.org/alpine/edge/community git openssh openssl-dev pkgconf gcc g++ make libc-dev bash librdkafka-dev cyrus-sasl cyrus-sasl-gssapiv2 krb5;
WORKDIR /root
# Load code dependencies
@ -24,11 +17,7 @@ COPY internal internal
# Build service
ARG SERVICE_NAME
RUN if [ "$GSSAPI" = "true" ]; then \
CGO_ENABLED=1 GOOS=linux GOARCH=$ARCH go build -o service -tags dynamic openreplay/backend/cmd/$SERVICE_NAME; \
else \
CGO_ENABLED=1 GOOS=linux GOARCH=$ARCH go build -o service -tags musl openreplay/backend/cmd/$SERVICE_NAME; \
fi
RUN CGO_ENABLED=1 GOOS=linux GOARCH=$ARCH go build -o service -tags dynamic openreplay/backend/cmd/$SERVICE_NAME
FROM --platform=linux/$ARCH alpine AS entrypoint
ARG GIT_SHA
@ -36,11 +25,7 @@ ARG GSSAPI=false
LABEL GIT_SHA=$GIT_SHA
LABEL GSSAPI=$GSSAPI
RUN if [ "$GSSAPI" = "true" ]; then \
apk add --no-cache ca-certificates librdkafka-dev cyrus-sasl cyrus-sasl-gssapiv2 krb5; \
else \
apk add --no-cache ca-certificates cyrus-sasl cyrus-sasl-gssapiv2 krb5; \
fi
RUN apk add --no-cache --repository=https://dl-cdn.alpinelinux.org/alpine/edge/community ca-certificates librdkafka-dev cyrus-sasl cyrus-sasl-gssapiv2 krb5
RUN adduser -u 1001 openreplay -D
ARG SERVICE_NAME

View file

@ -9,86 +9,88 @@
# Usage: IMAGE_TAG=latest DOCKER_REPO=myDockerHubID bash build.sh <ee>
set -e
GIT_ROOT=$(git rev-parse --show-toplevel)
source $GIT_ROOT/scripts/lib/_docker.sh
git_sha=$(git rev-parse --short HEAD)
image_tag=${IMAGE_TAG:-$git_sha}
ee="false"
# Possible values: amd64, arm64
arch="${ARCH:-"amd64"}"
check_prereq() {
which docker || {
echo "Docker not installed, please install docker."
exit 1
}
return
which docker || {
echo "Docker not installed, please install docker."
exit 1
}
return
}
[[ $1 == ee ]] && ee=true
[[ $PATCH -eq 1 ]] && {
chart=$2
image_tag="$(grep -ER ^.ppVersion ../scripts/helmcharts/openreplay/charts/$chart | xargs | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
[[ $ee == "true" ]] && {
image_tag="${image_tag}-ee"
}
chart=$2
image_tag="$(grep -ER ^.ppVersion ../scripts/helmcharts/openreplay/charts/$chart | xargs | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
[[ $ee == "true" ]] && {
image_tag="${image_tag}-ee"
}
}
update_helm_release() {
chart=$1
HELM_TAG="$(grep -iER ^version ../scripts/helmcharts/openreplay/charts/$chart | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
# Update the chart version
sed -i "s#^version.*#version: $HELM_TAG# g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
# Update image tags
sed -i "s#ppVersion.*#ppVersion: \"$image_tag\"#g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
# Commit the changes
git add ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
git commit -m "chore(helm): Updating $chart image release"
chart=$1
HELM_TAG="$(grep -iER ^version ../scripts/helmcharts/openreplay/charts/$chart | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
# Update the chart version
sed -i "s#^version.*#version: $HELM_TAG# g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
# Update image tags
sed -i "s#ppVersion.*#ppVersion: \"$image_tag\"#g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
# Commit the changes
git add ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
git commit -m "chore(helm): Updating $chart image release"
}
function build_service() {
image="$1"
echo "BUILDING $image"
docker build -t ${DOCKER_REPO:-'local'}/$image:${image_tag} --platform linux/$arch --build-arg ARCH=$arch --build-arg SERVICE_NAME=$image --build-arg GIT_SHA=$git_sha --build-arg GSSAPI=${GSSAPI:-'false'} .
[[ $PUSH_IMAGE -eq 1 ]] && {
docker push ${DOCKER_REPO:-'local'}/$image:${image_tag}
}
[[ $SIGN_IMAGE -eq 1 ]] && {
cosign sign --key $SIGN_KEY ${DOCKER_REPO:-'local'}/$image:${image_tag}
}
echo "Build completed for $image"
return
image="$1"
echo "BUILDING $image"
docker build -t ${DOCKER_REPO:-'local'}/$image:${image_tag} --platform linux/$arch --build-arg ARCH=$arch --build-arg SERVICE_NAME=$image --build-arg GIT_SHA=$git_sha .
[[ $PUSH_IMAGE -eq 1 ]] && {
docker push ${DOCKER_REPO:-'local'}/$image:${image_tag}
}
[[ $SIGN_IMAGE -eq 1 ]] && {
cosign sign --key $SIGN_KEY ${DOCKER_REPO:-'local'}/$image:${image_tag}
}
echo "Build completed for $image"
return
}
function build_api() {
destination="_backend"
[[ $1 == "ee" ]] && {
destination="_backend_ee"
}
[[ -d ../${destination} ]] && {
echo "Removing previous build cache"
rm -rf ../${destination}
}
cp -R ../backend ../${destination}
cd ../${destination}
# Copy enterprise code
[[ $1 == "ee" ]] && {
cp -r ../ee/backend/* ./
ee="true"
}
[[ $2 != "" ]] && {
build_service $2
[[ $PATCH -eq 1 ]] && update_helm_release $2
cd ../backend
rm -rf ../${destination}
return
}
for image in $(ls cmd); do
build_service $image
echo "::set-output name=image::${DOCKER_REPO:-'local'}/$image:${image_tag}"
[[ $PATCH -eq 1 ]] && update_helm_release $image
done
cd ../backend
rm -rf ../${destination}
echo "backend build completed"
destination="_backend"
[[ $1 == "ee" ]] && {
destination="_backend_ee"
}
[[ -d ../${destination} ]] && {
echo "Removing previous build cache"
rm -rf ../${destination}
}
cp -R ../backend ../${destination}
cd ../${destination}
# Copy enterprise code
[[ $1 == "ee" ]] && {
cp -r ../ee/backend/* ./
ee="true"
}
[[ $2 != "" ]] && {
build_service $2
[[ $PATCH -eq 1 ]] && update_helm_release $2
cd ../backend
rm -rf ../${destination}
return
}
for image in $(ls cmd); do
build_service $image
echo "::set-output name=image::${DOCKER_REPO:-'local'}/$image:${image_tag}"
[[ $PATCH -eq 1 ]] && update_helm_release $image
done
cd ../backend
rm -rf ../${destination}
echo "backend build completed"
}
check_prereq
build_api $1 $2
build_api "$1" "$2"

View file

@ -1,9 +1,12 @@
module openreplay/backend
go 1.20
go 1.21
toolchain go1.22.2
require (
cloud.google.com/go/logging v1.7.0
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0
github.com/ClickHouse/clickhouse-go/v2 v2.2.0
github.com/Masterminds/semver v1.5.0
@ -12,6 +15,7 @@ require (
github.com/btcsuite/btcutil v1.0.2
github.com/confluentinc/confluent-kafka-go/v2 v2.2.0
github.com/elastic/go-elasticsearch/v7 v7.13.1
github.com/elastic/go-elasticsearch/v8 v8.13.1
github.com/go-redis/redis v6.15.9+incompatible
github.com/google/uuid v1.3.0
github.com/gorilla/mux v1.8.0
@ -28,6 +32,7 @@ require (
github.com/sethvargo/go-envconfig v0.7.0
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce
github.com/ua-parser/uap-go v0.0.0-20200325213135-e1c09f13e2fe
go.uber.org/zap v1.17.0
golang.org/x/net v0.17.0
google.golang.org/api v0.126.0
)
@ -38,14 +43,16 @@ require (
cloud.google.com/go/compute/metadata v0.2.3 // indirect
cloud.google.com/go/iam v1.1.1 // indirect
cloud.google.com/go/longrunning v0.5.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/elastic/elastic-transport-go/v8 v8.5.0 // indirect
github.com/go-logr/logr v1.3.0 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/s2a-go v0.1.4 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect
github.com/googleapis/gax-go/v2 v2.11.0 // indirect
@ -64,12 +71,15 @@ require (
github.com/prometheus/procfs v0.7.3 // indirect
github.com/shopspring/decimal v1.3.1 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/otel v1.7.0 // indirect
go.opentelemetry.io/otel/trace v1.7.0 // indirect
go.opentelemetry.io/otel v1.21.0 // indirect
go.opentelemetry.io/otel/metric v1.21.0 // indirect
go.opentelemetry.io/otel/trace v1.21.0 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
golang.org/x/crypto v0.14.0 // indirect
golang.org/x/oauth2 v0.10.0 // indirect
golang.org/x/sync v0.3.0 // indirect
golang.org/x/sys v0.13.0 // indirect
golang.org/x/sys v0.14.0 // indirect
golang.org/x/text v0.13.0 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/appengine v1.6.7 // indirect

View file

@ -528,6 +528,7 @@ cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi
cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y=
cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4=
cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/oNM=
cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E=
cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w=
cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I=
cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4=
@ -623,6 +624,7 @@ github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0 h1:UE9n9rkJF62ArLb1F3DEjRt8O3jLwMWdSoypKV4f3MU=
github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/ClickHouse/clickhouse-go v1.5.4/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI=
@ -920,6 +922,7 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cu
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c=
github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko=
github.com/dnephin/pflag v1.0.7/go.mod h1:uxE91IoWURlOiTUIA8Mq5ZZkAv3dPUfZNaT80Zm7OQE=
github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
@ -945,8 +948,12 @@ github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZ
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/elastic/elastic-transport-go/v8 v8.5.0 h1:v5membAl7lvQgBTexPRDBO/RdnlQX+FM9fUVDyXxvH0=
github.com/elastic/elastic-transport-go/v8 v8.5.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk=
github.com/elastic/go-elasticsearch/v7 v7.13.1 h1:PaM3V69wPlnwR+ne50rSKKn0RNDYnnOFQcuGEI0ce80=
github.com/elastic/go-elasticsearch/v7 v7.13.1/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4=
github.com/elastic/go-elasticsearch/v8 v8.13.1 h1:du5F8IzUUyCkzxyHdrO9AtopcG95I/qwi2WK8Kf1xlg=
github.com/elastic/go-elasticsearch/v8 v8.13.1/go.mod h1:DIn7HopJs4oZC/w0WoJR13uMUxtHeq92eI5bqv5CRfI=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
@ -1010,7 +1017,10 @@ github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbV
github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
@ -1057,6 +1067,7 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c=
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
@ -1117,8 +1128,9 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@ -1353,6 +1365,7 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
@ -1531,6 +1544,7 @@ github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi
github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0=
github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@ -1670,8 +1684,9 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
@ -1751,35 +1766,44 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.2
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4=
go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs=
go.opentelemetry.io/otel v1.7.0 h1:Z2lA3Tdch0iDcrhJXDIlC94XE+bxok1F9B+4Lz/lGsM=
go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk=
go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE=
go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs=
go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=
go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk=
go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJqC6o=
go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU=
go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ=
go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@ -2134,8 +2158,8 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=

View file

@ -104,7 +104,7 @@ func (c *cacher) cacheURL(t *Task) {
start := time.Now()
req, _ := http.NewRequest("GET", t.requestURL, nil)
if t.retries%2 == 0 {
req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 6.1; rv:31.0) Gecko/20100101 Firefox/31.0")
req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:98.0) Gecko/20100101 Firefox/98.0")
}
for k, v := range c.requestHeaders {
req.Header.Set(k, v)
@ -142,6 +142,7 @@ func (c *cacher) cacheURL(t *Task) {
if contentType == "" {
contentType = mime.TypeByExtension(filepath.Ext(res.Request.URL.Path))
}
contentEncoding := res.Header.Get("Content-Encoding")
// Skip html file (usually it's a CDN mock for 404 error)
if strings.HasPrefix(contentType, "text/html") {
@ -158,7 +159,7 @@ func (c *cacher) cacheURL(t *Task) {
// TODO: implement in streams
start = time.Now()
err = c.objStorage.Upload(strings.NewReader(strData), t.cachePath, contentType, objectstorage.NoCompression)
err = c.objStorage.Upload(strings.NewReader(strData), t.cachePath, contentType, contentEncoding, objectstorage.NoCompression)
if err != nil {
metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), true)
c.Errors <- errors.Wrap(err, t.urlContext)

View file

@ -48,6 +48,7 @@ type Redshift struct {
User string `env:"REDSHIFT_USER"`
Password string `env:"REDSHIFT_PASSWORD"`
Database string `env:"REDSHIFT_DATABASE"`
Bucket string `env:"REDSHIFT_BUCKET,default=rdshftbucket"`
}
// Clickhouse config
@ -58,3 +59,16 @@ type Clickhouse struct {
UserName string `env:"CLICKHOUSE_USERNAME,default=default"`
Password string `env:"CLICKHOUSE_PASSWORD,default="`
}
// ElasticSearch config
type ElasticSearch struct {
URLs string `env:"ELASTICSEARCH_URLS"`
UseAWS bool `env:"ELASTICSEARCH_IN_AWS,default=false"`
User string `env:"ELASTICSEARCH_USER"`
Password string `env:"ELASTICSEARCH_PASSWORD"`
}
func (cfg *ElasticSearch) GetURLs() []string {
return strings.Split(cfg.URLs, ",")
}

View file

@ -5,6 +5,8 @@ import (
"openreplay/backend/internal/config/configurator"
"openreplay/backend/internal/config/objectstorage"
"openreplay/backend/internal/config/redis"
"strconv"
"strings"
"time"
)
@ -24,6 +26,7 @@ type Config struct {
TopicAnalytics string `env:"TOPIC_ANALYTICS,required"`
CommitBatchTimeout time.Duration `env:"COMMIT_BATCH_TIMEOUT,default=5s"`
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
ProjectIDs string `env:"PROJECT_IDS"`
}
func New() *Config {
@ -31,3 +34,22 @@ func New() *Config {
configurator.Process(cfg)
return cfg
}
func (c *Config) GetAllowedProjectIDs() []int {
stringIDs := strings.Split(c.ProjectIDs, ",")
if len(stringIDs) == 0 {
return nil
}
ids := make([]int, 0, len(stringIDs))
for _, id := range stringIDs {
intID, err := strconv.Atoi(id)
if err != nil {
continue
}
ids = append(ids, intID)
}
if len(ids) == 0 {
return nil
}
return ids
}

View file

@ -13,6 +13,8 @@ type ObjectsConfig struct {
AWSSkipSSLValidation bool `env:"AWS_SKIP_SSL_VALIDATION"`
AzureAccountName string `env:"AZURE_ACCOUNT_NAME"`
AzureAccountKey string `env:"AZURE_ACCOUNT_KEY"`
UseS3Tags bool `env:"USE_S3_TAGS,default=true"`
AWSIAMRole string `env:"AWS_IAM_ROLE"`
}
func (c *ObjectsConfig) UseFileTags() bool {

View file

@ -23,7 +23,7 @@ type Config struct {
MaxFileSize int64 `env:"MAX_FILE_SIZE,default=524288000"`
UseSort bool `env:"USE_SESSION_SORT,default=true"`
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
CompressionAlgo string `env:"COMPRESSION_ALGO,default=gzip"` // none, gzip, brotli, zstd
CompressionAlgo string `env:"COMPRESSION_ALGO,default=zstd"` // none, gzip, brotli, zstd
}
func New() *Config {

View file

@ -373,7 +373,7 @@ func (s *Storage) uploadSession(task *Task) {
metrics.RecordSessionCompressionRatio(task.domsRawSize/float64(task.doms.Len()), DOM.String())
// Upload session to s3
start := time.Now()
if err := s.objStorage.Upload(task.doms, task.id+string(DOM)+"s", "application/octet-stream", task.compression); err != nil {
if err := s.objStorage.Upload(task.doms, task.id+string(DOM)+"s", "application/octet-stream", "", task.compression); err != nil {
log.Fatalf("Storage: start upload failed. %s", err)
}
uploadDoms = time.Now().Sub(start).Milliseconds()
@ -386,7 +386,7 @@ func (s *Storage) uploadSession(task *Task) {
metrics.RecordSessionCompressionRatio(task.domeRawSize/float64(task.dome.Len()), DOM.String())
// Upload session to s3
start := time.Now()
if err := s.objStorage.Upload(task.dome, task.id+string(DOM)+"e", "application/octet-stream", task.compression); err != nil {
if err := s.objStorage.Upload(task.dome, task.id+string(DOM)+"e", "application/octet-stream", "", task.compression); err != nil {
log.Fatalf("Storage: start upload failed. %s", err)
}
uploadDome = time.Now().Sub(start).Milliseconds()
@ -399,7 +399,7 @@ func (s *Storage) uploadSession(task *Task) {
metrics.RecordSessionCompressionRatio(task.devRawSize/float64(task.dev.Len()), DEV.String())
// Upload session to s3
start := time.Now()
if err := s.objStorage.Upload(task.dev, task.id+string(DEV), "application/octet-stream", task.compression); err != nil {
if err := s.objStorage.Upload(task.dev, task.id+string(DEV), "application/octet-stream", "", task.compression); err != nil {
log.Fatalf("Storage: start upload failed. %s", err)
}
uploadDev = time.Now().Sub(start).Milliseconds()

View file

@ -125,7 +125,7 @@ func (v *VideoStorage) sendToS3(task *Task) {
} else {
key += "/replay.mp4"
}
if err := v.objStorage.Upload(bytes.NewReader(video), key, "video/mp4", objectstorage.NoCompression); err != nil {
if err := v.objStorage.Upload(bytes.NewReader(video), key, "video/mp4", "", objectstorage.NoCompression); err != nil {
log.Fatalf("Storage: start upload video replay failed. %s", err)
}
log.Printf("Video file (size: %d) uploaded successfully in %v", len(video), time.Since(start))

View file

@ -119,7 +119,11 @@ func (conn *Conn) InsertWebPageEvent(sess *sessions.Session, e *messages.PageEve
log.Printf("insert web page event in bulk err: %s", err)
}
// Add new value set to autocomplete bulk
conn.InsertAutocompleteValue(sess.SessionID, sess.ProjectID, "LOCATION", url.DiscardURLQuery(path))
location := path
if query != "" {
location += "?" + query
}
conn.InsertAutocompleteValue(sess.SessionID, sess.ProjectID, "LOCATION", location)
conn.InsertAutocompleteValue(sess.SessionID, sess.ProjectID, "REFERRER", url.DiscardURLQuery(e.Referrer))
return nil
}

View file

@ -0,0 +1,73 @@
package logger
import (
"context"
"fmt"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"os"
)
type Logger interface {
Debug(ctx context.Context, message string, args ...interface{})
Info(ctx context.Context, message string, args ...interface{})
Warn(ctx context.Context, message string, args ...interface{})
Error(ctx context.Context, message string, args ...interface{})
Fatal(ctx context.Context, message string, args ...interface{})
}
type loggerImpl struct {
l *zap.Logger
}
func New() Logger {
encoderConfig := zap.NewProductionEncoderConfig()
encoderConfig.EncodeTime = zapcore.TimeEncoderOfLayout("2006-01-02 15:04:05.000")
jsonEncoder := zapcore.NewJSONEncoder(encoderConfig)
core := zapcore.NewCore(jsonEncoder, zapcore.AddSync(os.Stdout), zap.InfoLevel)
baseLogger := zap.New(core, zap.AddCaller())
logger := baseLogger.WithOptions(zap.AddCallerSkip(1))
return &loggerImpl{l: logger}
}
func (l *loggerImpl) prepare(ctx context.Context, logger *zap.Logger) *zap.Logger {
if sID, ok := ctx.Value("sessionID").(string); ok {
logger = logger.With(zap.String("sessionID", sID))
}
if pID, ok := ctx.Value("projectID").(string); ok {
logger = logger.With(zap.String("projectID", pID))
}
if tVer, ok := ctx.Value("tracker").(string); ok {
logger = logger.With(zap.String("tracker", tVer))
}
if httpMethod, ok := ctx.Value("httpMethod").(string); ok {
logger = logger.With(zap.String("httpMethod", httpMethod))
}
if urlPath, ok := ctx.Value("url").(string); ok {
logger = logger.With(zap.String("url", urlPath))
}
if batch, ok := ctx.Value("batch").(string); ok {
logger = logger.With(zap.String("batch", batch))
}
return logger
}
func (l *loggerImpl) Debug(ctx context.Context, message string, args ...interface{}) {
l.prepare(ctx, l.l.With(zap.String("level", "debug"))).Debug(fmt.Sprintf(message, args...))
}
func (l *loggerImpl) Info(ctx context.Context, message string, args ...interface{}) {
l.prepare(ctx, l.l.With(zap.String("level", "info"))).Info(fmt.Sprintf(message, args...))
}
func (l *loggerImpl) Warn(ctx context.Context, message string, args ...interface{}) {
l.prepare(ctx, l.l.With(zap.String("level", "warn"))).Warn(fmt.Sprintf(message, args...))
}
func (l *loggerImpl) Error(ctx context.Context, message string, args ...interface{}) {
l.prepare(ctx, l.l.With(zap.String("level", "error"))).Error(fmt.Sprintf(message, args...))
}
func (l *loggerImpl) Fatal(ctx context.Context, message string, args ...interface{}) {
l.prepare(ctx, l.l.With(zap.String("level", "fatal"))).Fatal(fmt.Sprintf(message, args...))
}

View file

@ -15,7 +15,7 @@ const (
)
type ObjectStorage interface {
Upload(reader io.Reader, key string, contentType string, compression CompressionType) error
Upload(reader io.Reader, key string, contentType, contentEncoding string, compression CompressionType) error
Get(key string) (io.ReadCloser, error)
Exists(key string) bool
GetCreationTime(key string) *time.Time

View file

@ -6,7 +6,6 @@ import (
"io"
"log"
"net/http"
"net/url"
"os"
"sort"
"strconv"
@ -28,7 +27,7 @@ type storageImpl struct {
uploader *s3manager.Uploader
svc *s3.S3
bucket *string
fileTag string
fileTag *string
}
func NewS3(cfg *objConfig.ObjectsConfig) (objectstorage.ObjectStorage, error) {
@ -60,27 +59,26 @@ func NewS3(cfg *objConfig.ObjectsConfig) (objectstorage.ObjectStorage, error) {
uploader: s3manager.NewUploader(sess),
svc: s3.New(sess), // AWS Docs: "These clients are safe to use concurrently."
bucket: &cfg.BucketName,
fileTag: loadFileTag(),
fileTag: tagging(cfg.UseS3Tags),
}, nil
}
func (s *storageImpl) tagging() *string {
return &s.fileTag
}
func (s *storageImpl) Upload(reader io.Reader, key string, contentType string, compression objectstorage.CompressionType) error {
func (s *storageImpl) Upload(reader io.Reader, key string, contentType, contentEncoding string, compression objectstorage.CompressionType) error {
cacheControl := "max-age=2628000, immutable, private"
var contentEncoding *string
var encoding *string
switch compression {
case objectstorage.Gzip:
encodeStr := "gzip"
contentEncoding = &encodeStr
encoding = &encodeStr
case objectstorage.Brotli:
encodeStr := "br"
contentEncoding = &encodeStr
encoding = &encodeStr
case objectstorage.Zstd:
// Have to ignore contentEncoding for Zstd (otherwise will be an error in browser)
}
if contentEncoding != "" {
encoding = &contentEncoding
}
_, err := s.uploader.Upload(&s3manager.UploadInput{
Body: reader,
@ -88,8 +86,8 @@ func (s *storageImpl) Upload(reader io.Reader, key string, contentType string, c
Key: &key,
ContentType: &contentType,
CacheControl: &cacheControl,
ContentEncoding: contentEncoding,
Tagging: s.tagging(),
ContentEncoding: encoding,
Tagging: s.fileTag,
})
return err
}
@ -207,16 +205,3 @@ func (s *storageImpl) GetPreSignedUploadUrl(key string) (string, error) {
}
return urlStr, nil
}
func loadFileTag() string {
// Load file tag from env
key := "retention"
value := os.Getenv("RETENTION")
if value == "" {
value = "default"
}
// Create URL encoded tag set for file
params := url.Values{}
params.Add(key, value)
return params.Encode()
}

View file

@ -0,0 +1,5 @@
package s3
func tagging(useTags bool) *string {
return nil
}

1
ee/api/.gitignore vendored
View file

@ -227,6 +227,7 @@ Pipfile.lock
/chalicelib/core/sessions.py
/chalicelib/core/sessions_assignments.py
/chalicelib/core/sessions_mobs.py
/chalicelib/core/significance.py
/chalicelib/core/socket_ios.py
/chalicelib/core/sourcemaps.py
/chalicelib/core/sourcemaps_parser.py

View file

@ -11,6 +11,8 @@ ENV SOURCE_MAP_VERSION=0.7.4 \
WORKDIR /work
COPY requirements.txt ./requirements.txt
# Caching the source build
RUN pip install --no-cache-dir --upgrade python3-saml==1.16.0 --no-binary=lxml
RUN pip install --no-cache-dir --upgrade -r requirements.txt
COPY . .

View file

@ -15,16 +15,16 @@ fastapi = "==0.104.1"
gunicorn = "==21.2.0"
python-decouple = "==3.8"
apscheduler = "==3.10.4"
python3-saml = "==1.16.0"
python-multipart = "==0.0.6"
redis = "==5.0.1"
python3-saml = "==1.16.0"
azure-storage-blob = "==12.19.0"
psycopg = {extras = ["binary", "pool"], version = "==3.1.14"}
uvicorn = {extras = ["standard"], version = "==0.23.2"}
pydantic = {extras = ["email"], version = "==2.3.0"}
clickhouse-driver = {extras = ["lz4"], version = "==0.2.6"}
psycopg = {extras = ["binary", "pool"], version = "==3.1.12"}
[dev-packages]
[requires]
python_version = "3.11"
python_version = "3.12"

View file

@ -68,7 +68,9 @@ async def lifespan(app: FastAPI):
"application_name": "AIO" + config("APP_NAME", default="PY"),
}
database = psycopg_pool.AsyncConnectionPool(kwargs=database, connection_class=ORPYAsyncConnection)
database = psycopg_pool.AsyncConnectionPool(kwargs=database, connection_class=ORPYAsyncConnection,
min_size=config("PG_AIO_MINCONN", cast=int, default=1),
max_size=config("PG_AIO_MAXCONN", cast=int, default=5), )
app.state.postgresql = database
# App listening

View file

@ -99,6 +99,7 @@ def __get_sessions_list(project_id, user_id, data: schemas.CardSchema):
def __get_click_map_chart(project_id, user_id, data: schemas.CardClickMap, include_mobs: bool = True):
if len(data.series) == 0:
return None
data.series[0].filter.filters += data.series[0].filter.events
return click_maps.search_short_session(project_id=project_id, user_id=user_id,
data=schemas.ClickMapSessionsSearch(
**data.series[0].filter.model_dump()),
@ -213,10 +214,10 @@ def __merge_metric_with_data(metric: schemas.CardSchema,
if data.series is not None and len(data.series) > 0:
metric.series = data.series
if len(data.filters) > 0:
for s in metric.series:
s.filter.filters += data.filters
metric = schemas.CardSchema(**metric.model_dump(by_alias=True))
# if len(data.filters) > 0:
# for s in metric.series:
# s.filter.filters += data.filters
# metric = schemas.CardSchema(**metric.model_dump(by_alias=True))
return metric
@ -277,11 +278,11 @@ def get_sessions(project_id, user_id, data: schemas.CardSessionsSchema):
def __get_funnel_issues(project_id: int, user_id: int, data: schemas.CardFunnel):
if len(data.series) == 0:
return {"data": []}
return []
data.series[0].filter.startTimestamp = data.startTimestamp
data.series[0].filter.endTimestamp = data.endTimestamp
data = funnels.get_issues_on_the_fly_widget(project_id=project_id, data=data.series[0].filter)
return {"data": data}
return data
def __get_path_analysis_issues(project_id: int, user_id: int, data: schemas.CardPathAnalysis):

View file

@ -457,7 +457,8 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id):
# To ignore Script error
pg_sub_query.append("pe.message!='Script error.'")
pg_sub_query_chart = __get_basic_constraints(platform, time_constraint=False, chart=True, project_key=None)
# pg_sub_query_chart.append("source ='js_exception'")
if platform:
pg_sub_query_chart += ["start_ts>=%(startDate)s", "start_ts<%(endDate)s", "project_id=%(project_id)s"]
pg_sub_query_chart.append("errors.error_id =details.error_id")
statuses = []
error_ids = None
@ -544,7 +545,8 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id):
COUNT(session_id) AS count
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp
LEFT JOIN LATERAL (SELECT DISTINCT session_id
FROM events.errors
FROM events.errors
{"INNER JOIN public.sessions USING(session_id)" if platform else ""}
WHERE {" AND ".join(pg_sub_query_chart)}
) AS sessions ON (TRUE)
GROUP BY timestamp

View file

@ -744,7 +744,7 @@ def __get_basic_constraints(platform=None, time_constraint=True, startTime_arg_n
else:
table_name = ""
if type_condition:
ch_sub_query.append(f"{table_name}EventType='ERROR'")
ch_sub_query.append(f"{table_name}event_type='ERROR'")
if time_constraint:
ch_sub_query += [f"{table_name}datetime >= toDateTime(%({startTime_arg_name})s/1000)",
f"{table_name}datetime < toDateTime(%({endTime_arg_name})s/1000)"]
@ -789,24 +789,25 @@ def __get_basic_constraints_pg(platform=None, time_constraint=True, startTime_ar
def search(data: schemas.SearchErrorsSchema, project_id, user_id):
MAIN_EVENTS_TABLE = exp_ch_helper.get_main_events_table(data.startDate)
MAIN_SESSIONS_TABLE = exp_ch_helper.get_main_sessions_table(data.startDate)
MAIN_EVENTS_TABLE = exp_ch_helper.get_main_events_table(data.startTimestamp)
MAIN_SESSIONS_TABLE = exp_ch_helper.get_main_sessions_table(data.startTimestamp)
platform = None
for f in data.filters:
if f.type == schemas.FilterType.platform and len(f.value) > 0:
platform = f.value[0]
ch_sessions_sub_query = __get_basic_constraints(platform, type_condition=False)
ch_sub_query = __get_basic_constraints(platform, type_condition=True)
# ignore platform for errors table
ch_sub_query = __get_basic_constraints(None, type_condition=True)
ch_sub_query.append("source ='js_exception'")
# To ignore Script error
ch_sub_query.append("message!='Script error.'")
error_ids = None
if data.startDate is None:
data.startDate = TimeUTC.now(-7)
if data.endDate is None:
data.endDate = TimeUTC.now(1)
if data.startTimestamp is None:
data.startTimestamp = TimeUTC.now(-7)
if data.endTimestamp is None:
data.endTimestamp = TimeUTC.now(1)
subquery_part = ""
params = {}
@ -995,7 +996,7 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id):
value_key=f_k))
with ch_client.ClickHouseClient() as ch:
step_size = __get_step_size(data.startDate, data.endDate, data.density)
step_size = __get_step_size(data.startTimestamp, data.endTimestamp, data.density)
sort = __get_sort_key('datetime')
if data.sort is not None:
sort = __get_sort_key(data.sort)
@ -1004,8 +1005,8 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id):
order = data.order
params = {
**params,
"startDate": data.startDate,
"endDate": data.endDate,
"startDate": data.startTimestamp,
"endDate": data.endTimestamp,
"project_id": project_id,
"userId": user_id,
"step_size": step_size}
@ -1062,7 +1063,7 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id):
toUnixTimestamp(MIN(datetime))*1000 AS first_occurrence
FROM {MAIN_EVENTS_TABLE}
WHERE project_id=%(project_id)s
AND EventType='ERROR'
AND event_type='ERROR'
GROUP BY error_id) AS time_details
ON details.error_id=time_details.error_id
INNER JOIN (SELECT error_id, groupArray([timestamp, count]) AS chart
@ -1085,8 +1086,8 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id):
r["chart"] = list(r["chart"])
for i in range(len(r["chart"])):
r["chart"][i] = {"timestamp": r["chart"][i][0], "count": r["chart"][i][1]}
r["chart"] = metrics.__complete_missing_steps(rows=r["chart"], start_time=data.startDate,
end_time=data.endDate,
r["chart"] = metrics.__complete_missing_steps(rows=r["chart"], start_time=data.startTimestamp,
end_time=data.endTimestamp,
density=data.density, neutral={"count": 0})
return {
'total': total,

View file

@ -251,7 +251,7 @@ def get_by_project_key(project_key):
{"project_key": project_key})
cur.execute(query=query)
row = cur.fetchone()
return row["project_id"] if row else None
return helper.dict_to_camel_case(row)
def get_project_key(project_id):

View file

@ -1,10 +1,10 @@
import ast
import logging
from typing import List, Union
import schemas
from chalicelib.core import events, metadata, projects, performance_event, metrics
from chalicelib.core import events, metadata, projects, performance_event, metrics, sessions_favorite, sessions_legacy
from chalicelib.utils import pg_client, helper, metrics_helper, ch_client, exp_ch_helper
import logging
logger = logging.getLogger(__name__)
SESSION_PROJECTION_COLS_CH = """\
@ -110,6 +110,8 @@ def _isUndefined_operator(op: schemas.SearchEventOperator):
def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_id, errors_only=False,
error_status=schemas.ErrorStatus.all, count_only=False, issue=None, ids_only=False,
platform="web"):
if data.bookmarked:
data.startTimestamp, data.endTimestamp = sessions_favorite.get_start_end_timestamp(project_id, user_id)
full_args, query_part = search_query_parts_ch(data=data, error_status=error_status, errors_only=errors_only,
favorite_only=data.bookmarked, issue=issue, project_id=project_id,
user_id=user_id, platform=platform)
@ -194,7 +196,7 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_
s.{sort} AS sort_key,
map({SESSION_PROJECTION_COLS_CH_MAP}{meta_map}) AS details
{query_part}
LEFT JOIN (SELECT session_id
LEFT JOIN (SELECT DISTINCT session_id
FROM experimental.user_viewed_sessions
WHERE user_id = %(userId)s AND project_id=%(project_id)s
AND _timestamp >= toDateTime(%(startDate)s / 1000)) AS viewed_sessions
@ -354,6 +356,7 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
density=density))
extra_event = None
extra_deduplication = []
extra_conditions = None
if metric_of == schemas.MetricOfTable.visited_url:
extra_event = f"""SELECT DISTINCT ev.session_id, ev.url_path
FROM {exp_ch_helper.get_main_events_table(data.startTimestamp)} AS ev
@ -362,13 +365,30 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
AND ev.project_id = %(project_id)s
AND ev.event_type = 'LOCATION'"""
extra_deduplication.append("url_path")
extra_conditions = {}
for e in data.events:
if e.type == schemas.EventType.location:
if e.operator not in extra_conditions:
extra_conditions[e.operator] = schemas.SessionSearchEventSchema2.model_validate({
"type": e.type,
"isEvent": True,
"value": [],
"operator": e.operator,
"filters": []
})
for v in e.value:
if v not in extra_conditions[e.operator].value:
extra_conditions[e.operator].value.append(v)
extra_conditions = list(extra_conditions.values())
elif metric_of == schemas.MetricOfTable.issues and len(metric_value) > 0:
data.filters.append(schemas.SessionSearchFilterSchema(value=metric_value, type=schemas.FilterType.issue,
operator=schemas.SearchEventOperator._is))
full_args, query_part = search_query_parts_ch(data=data, error_status=None, errors_only=False,
favorite_only=False, issue=None, project_id=project_id,
user_id=None, extra_event=extra_event,
extra_deduplication=extra_deduplication)
extra_deduplication=extra_deduplication,
extra_conditions=extra_conditions)
full_args["step_size"] = step_size
sessions = []
with ch_client.ClickHouseClient() as cur:
@ -521,7 +541,14 @@ def __get_event_type(event_type: Union[schemas.EventType, schemas.PerformanceEve
# this function generates the query and return the generated-query with the dict of query arguments
def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_status, errors_only, favorite_only, issue,
project_id, user_id, platform="web", extra_event=None, extra_deduplication=[]):
project_id, user_id, platform="web", extra_event=None, extra_deduplication=[],
extra_conditions=None):
if issue:
data.filters.append(
schemas.SessionSearchFilterSchema(value=[issue['type']],
type=schemas.FilterType.issue.value,
operator='is')
)
ss_constraints = []
full_args = {"project_id": project_id, "startDate": data.startTimestamp, "endDate": data.endTimestamp,
"projectId": project_id, "userId": user_id}
@ -1446,12 +1473,17 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
extra_join = ""
if issue is not None:
extra_join = """
INNER JOIN LATERAL(SELECT TRUE FROM events_common.issues INNER JOIN public.issues AS p_issues USING (issue_id)
WHERE issues.session_id=f.session_id
AND p_issues.type=%(issue_type)s
AND p_issues.context_string=%(issue_contextString)s
AND timestamp >= f.first_event_ts
AND timestamp <= f.last_event_ts) AS issues ON(TRUE)
INNER JOIN (SELECT session_id
FROM experimental.issues
INNER JOIN experimental.events USING (issue_id)
WHERE issues.type = %(issue_type)s
AND issues.context_string = %(issue_contextString)s
AND issues.project_id = %(projectId)s
AND events.project_id = %(projectId)s
AND events.issue_type = %(issue_type)s
AND events.datetime >= toDateTime(%(startDate)s/1000)
AND events.datetime <= toDateTime(%(endDate)s/1000)
) AS issues ON (f.session_id = issues.session_id)
"""
full_args["issue_contextString"] = issue["contextString"]
full_args["issue_type"] = issue["type"]
@ -1476,9 +1508,24 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
if extra_event:
extra_event = f"INNER JOIN ({extra_event}) AS extra_event USING(session_id)"
# extra_join = f"""INNER JOIN {extra_event} AS ev USING(session_id)"""
# extra_constraints.append("ev.timestamp>=%(startDate)s")
# extra_constraints.append("ev.timestamp<=%(endDate)s")
if extra_conditions and len(extra_conditions) > 0:
_extra_or_condition = []
for i, c in enumerate(extra_conditions):
if _isAny_opreator(c.operator):
continue
e_k = f"ec_value{i}"
op = __get_sql_operator(c.operator)
c.value = helper.values_for_operator(value=c.value, op=c.operator)
full_args = {**full_args,
**_multiple_values(c.value, value_key=e_k)}
if c.type == events.EventType.LOCATION.ui_type:
_extra_or_condition.append(
_multiple_conditions(f"extra_event.url_path {op} %({e_k})s",
c.value, value_key=e_k))
else:
logging.warning(f"unsupported extra_event type:${c.type}")
if len(_extra_or_condition) > 0:
extra_constraints.append("(" + " OR ".join(_extra_or_condition) + ")")
else:
extra_event = ""
if errors_only:
@ -1668,3 +1715,29 @@ def check_recording_status(project_id: int) -> dict:
"recordingStatus": row["recording_status"],
"sessionsCount": row["sessions_count"]
}
# TODO: rewrite this function to use ClickHouse
def search_sessions_by_ids(project_id: int, session_ids: list, sort_by: str = 'session_id',
ascending: bool = False) -> dict:
if session_ids is None or len(session_ids) == 0:
return {"total": 0, "sessions": []}
with pg_client.PostgresClient() as cur:
meta_keys = metadata.get(project_id=project_id)
params = {"project_id": project_id, "session_ids": tuple(session_ids)}
order_direction = 'ASC' if ascending else 'DESC'
main_query = cur.mogrify(f"""SELECT {sessions_legacy.SESSION_PROJECTION_BASE_COLS}
{"," if len(meta_keys) > 0 else ""}{",".join([f'metadata_{m["index"]}' for m in meta_keys])}
FROM public.sessions AS s
WHERE project_id=%(project_id)s
AND session_id IN %(session_ids)s
ORDER BY {sort_by} {order_direction};""", params)
cur.execute(main_query)
rows = cur.fetchall()
if len(meta_keys) > 0:
for s in rows:
s["metadata"] = {}
for m in meta_keys:
s["metadata"][m["key"]] = s.pop(f'metadata_{m["index"]}')
return {"total": len(rows), "sessions": helper.list_to_camel_case(rows)}

View file

@ -1,8 +1,8 @@
from typing import Optional
import schemas
import schemas
from chalicelib.core import metrics
from chalicelib.core import sessions_exp
from chalicelib.utils import ch_client
@ -188,7 +188,8 @@ def __filter_subquery(project_id: int, filters: Optional[schemas.SessionsSearchP
errors_only=True, favorite_only=None,
issue=None, user_id=None)
params = {**params, **qp_params}
sub_query = f"INNER JOIN {sub_query} USING(session_id)"
# TODO: test if this line impacts other cards beside insights
# sub_query = f"INNER JOIN {sub_query} USING(session_id)"
return params, sub_query
@ -351,7 +352,7 @@ def query_cpu_memory_by_period(project_id, start_time, end_time,
'value': cpu_newvalue,
'oldValue': cpu_oldvalue,
'change': 100 * (
cpu_newvalue - cpu_oldvalue) / cpu_oldvalue if cpu_ratio is not None else cpu_ratio,
cpu_newvalue - cpu_oldvalue) / cpu_oldvalue if cpu_ratio is not None else cpu_ratio,
'isNew': True if cpu_newvalue is not None and cpu_oldvalue is None else False})
if mem_oldvalue is not None or mem_newvalue is not None:
output.append({'category': schemas.InsightCategories.resources,
@ -359,14 +360,11 @@ def query_cpu_memory_by_period(project_id, start_time, end_time,
'value': mem_newvalue,
'oldValue': mem_oldvalue,
'change': 100 * (
mem_newvalue - mem_oldvalue) / mem_oldvalue if mem_ratio is not None else mem_ratio,
mem_newvalue - mem_oldvalue) / mem_oldvalue if mem_ratio is not None else mem_ratio,
'isNew': True if mem_newvalue is not None and mem_oldvalue is None else False})
return output
from chalicelib.core import sessions_exp
def query_click_rage_by_period(project_id, start_time, end_time,
filters: Optional[schemas.SessionsSearchPayloadSchema]):
params = {

View file

@ -128,16 +128,15 @@ def edit(tenant_id, user_id, project_id, note_id, data: schemas.SessionUpdateNot
return row
def delete(tenant_id, user_id, project_id, note_id):
def delete(project_id, note_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(""" UPDATE public.sessions_notes
SET deleted_at = timezone('utc'::text, now())
WHERE note_id = %(note_id)s
AND project_id = %(project_id)s
AND user_id = %(user_id)s
AND deleted_at ISNULL;""",
{"project_id": project_id, "user_id": user_id, "note_id": note_id})
{"project_id": project_id, "note_id": note_id})
)
return {"data": {"state": "success"}}

View file

@ -1,619 +0,0 @@
__author__ = "AZNAUROV David"
__maintainer__ = "KRAIEM Taha Yassine"
import logging
from decouple import config
import schemas
from chalicelib.core import events, metadata
from chalicelib.utils import sql_helper as sh
if config("EXP_SESSIONS_SEARCH", cast=bool, default=False):
from chalicelib.core import sessions_legacy as sessions
else:
from chalicelib.core import sessions
"""
todo: remove LIMIT from the query
"""
from typing import List
import math
import warnings
from collections import defaultdict
from psycopg2.extras import RealDictRow
from chalicelib.utils import pg_client, helper
logger = logging.getLogger(__name__)
SIGNIFICANCE_THRSH = 0.4
# Taha: the value 24 was estimated in v1.15
T_VALUES = {1: 12.706, 2: 4.303, 3: 3.182, 4: 2.776, 5: 2.571, 6: 2.447, 7: 2.365, 8: 2.306, 9: 2.262, 10: 2.228,
11: 2.201, 12: 2.179, 13: 2.160, 14: 2.145, 15: 2.13, 16: 2.120, 17: 2.110, 18: 2.101, 19: 2.093, 20: 2.086,
21: 2.080, 22: 2.074, 23: 2.069, 24: 2.067, 25: 2.064, 26: 2.060, 27: 2.056, 28: 2.052, 29: 2.045,
30: 2.042}
def get_stages_and_events(filter_d: schemas.CardSeriesFilterSchema, project_id) -> List[RealDictRow]:
"""
Add minimal timestamp
:param filter_d: dict contains events&filters&...
:return:
"""
stages: [dict] = filter_d.events
filters: [dict] = filter_d.filters
filter_issues = []
# TODO: enable this if needed by an endpoint
# filter_issues = filter_d.get("issueTypes")
# if filter_issues is None or len(filter_issues) == 0:
# filter_issues = []
stage_constraints = ["main.timestamp <= %(endTimestamp)s"]
first_stage_extra_constraints = ["s.project_id=%(project_id)s", "s.start_ts >= %(startTimestamp)s",
"s.start_ts <= %(endTimestamp)s"]
filter_extra_from = []
n_stages_query = []
values = {}
if len(filters) > 0:
meta_keys = None
for i, f in enumerate(filters):
if len(f.value) == 0:
continue
f.value = helper.values_for_operator(value=f.value, op=f.operator)
# filter_args = _multiple_values(f["value"])
op = sh.get_sql_operator(f.operator)
filter_type = f.type
# values[f_k] = sessions.__get_sql_value_multiple(f["value"])
f_k = f"f_value{i}"
values = {**values,
**sh.multi_values(helper.values_for_operator(value=f.value, op=f.operator),
value_key=f_k)}
if filter_type == schemas.FilterType.user_browser:
# op = sessions.__get_sql_operator_multiple(f["operator"])
first_stage_extra_constraints.append(
sh.multi_conditions(f's.user_browser {op} %({f_k})s', f.value, value_key=f_k))
elif filter_type in [schemas.FilterType.user_os, schemas.FilterType.user_os_ios]:
# op = sessions.__get_sql_operator_multiple(f["operator"])
first_stage_extra_constraints.append(
sh.multi_conditions(f's.user_os {op} %({f_k})s', f.value, value_key=f_k))
elif filter_type in [schemas.FilterType.user_device, schemas.FilterType.user_device_ios]:
# op = sessions.__get_sql_operator_multiple(f["operator"])
first_stage_extra_constraints.append(
sh.multi_conditions(f's.user_device {op} %({f_k})s', f.value, value_key=f_k))
elif filter_type in [schemas.FilterType.user_country, schemas.FilterType.user_country_ios]:
# op = sessions.__get_sql_operator_multiple(f["operator"])
first_stage_extra_constraints.append(
sh.multi_conditions(f's.user_country {op} %({f_k})s', f.value, value_key=f_k))
elif filter_type == schemas.FilterType.duration:
if len(f.value) > 0 and f.value[0] is not None:
first_stage_extra_constraints.append(f's.duration >= %(minDuration)s')
values["minDuration"] = f.value[0]
if len(f["value"]) > 1 and f.value[1] is not None and int(f.value[1]) > 0:
first_stage_extra_constraints.append('s.duration <= %(maxDuration)s')
values["maxDuration"] = f.value[1]
elif filter_type == schemas.FilterType.referrer:
# events_query_part = events_query_part + f"INNER JOIN events.pages AS p USING(session_id)"
filter_extra_from = [f"INNER JOIN {events.EventType.LOCATION.table} AS p USING(session_id)"]
# op = sessions.__get_sql_operator_multiple(f["operator"])
first_stage_extra_constraints.append(
sh.multi_conditions(f"p.base_referrer {op} %({f_k})s", f.value, value_key=f_k))
elif filter_type == events.EventType.METADATA.ui_type:
if meta_keys is None:
meta_keys = metadata.get(project_id=project_id)
meta_keys = {m["key"]: m["index"] for m in meta_keys}
# op = sessions.__get_sql_operator(f["operator"])
if f.source in meta_keys.keys():
first_stage_extra_constraints.append(
sh.multi_conditions(
f's.{metadata.index_to_colname(meta_keys[f.source])} {op} %({f_k})s', f.value,
value_key=f_k))
# values[f_k] = helper.string_to_sql_like_with_op(f["value"][0], op)
elif filter_type in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
# op = sessions.__get_sql_operator(f["operator"])
first_stage_extra_constraints.append(
sh.multi_conditions(f's.user_id {op} %({f_k})s', f.value, value_key=f_k))
# values[f_k] = helper.string_to_sql_like_with_op(f["value"][0], op)
elif filter_type in [schemas.FilterType.user_anonymous_id,
schemas.FilterType.user_anonymous_id_ios]:
# op = sessions.__get_sql_operator(f["operator"])
first_stage_extra_constraints.append(
sh.multi_conditions(f's.user_anonymous_id {op} %({f_k})s', f.value, value_key=f_k))
# values[f_k] = helper.string_to_sql_like_with_op(f["value"][0], op)
elif filter_type in [schemas.FilterType.rev_id, schemas.FilterType.rev_id_ios]:
# op = sessions.__get_sql_operator(f["operator"])
first_stage_extra_constraints.append(
sh.multi_conditions(f's.rev_id {op} %({f_k})s', f.value, value_key=f_k))
# values[f_k] = helper.string_to_sql_like_with_op(f["value"][0], op)
i = -1
for s in stages:
if s.operator is None:
s.operator = schemas.SearchEventOperator._is
if not isinstance(s.value, list):
s.value = [s.value]
is_any = sh.isAny_opreator(s.operator)
if not is_any and isinstance(s.value, list) and len(s.value) == 0:
continue
i += 1
if i == 0:
extra_from = filter_extra_from + ["INNER JOIN public.sessions AS s USING (session_id)"]
else:
extra_from = []
op = sh.get_sql_operator(s.operator)
# event_type = s["type"].upper()
event_type = s.type
if event_type == events.EventType.CLICK.ui_type:
next_table = events.EventType.CLICK.table
next_col_name = events.EventType.CLICK.column
elif event_type == events.EventType.INPUT.ui_type:
next_table = events.EventType.INPUT.table
next_col_name = events.EventType.INPUT.column
elif event_type == events.EventType.LOCATION.ui_type:
next_table = events.EventType.LOCATION.table
next_col_name = events.EventType.LOCATION.column
elif event_type == events.EventType.CUSTOM.ui_type:
next_table = events.EventType.CUSTOM.table
next_col_name = events.EventType.CUSTOM.column
# IOS --------------
elif event_type == events.EventType.CLICK_IOS.ui_type:
next_table = events.EventType.CLICK_IOS.table
next_col_name = events.EventType.CLICK_IOS.column
elif event_type == events.EventType.INPUT_IOS.ui_type:
next_table = events.EventType.INPUT_IOS.table
next_col_name = events.EventType.INPUT_IOS.column
elif event_type == events.EventType.VIEW_IOS.ui_type:
next_table = events.EventType.VIEW_IOS.table
next_col_name = events.EventType.VIEW_IOS.column
elif event_type == events.EventType.CUSTOM_IOS.ui_type:
next_table = events.EventType.CUSTOM_IOS.table
next_col_name = events.EventType.CUSTOM_IOS.column
else:
logging.warning(f"=================UNDEFINED:{event_type}")
continue
values = {**values, **sh.multi_values(helper.values_for_operator(value=s.value, op=s.operator),
value_key=f"value{i + 1}")}
if sh.is_negation_operator(s.operator) and i > 0:
op = sh.reverse_sql_operator(op)
main_condition = "left_not.session_id ISNULL"
extra_from.append(f"""LEFT JOIN LATERAL (SELECT session_id
FROM {next_table} AS s_main
WHERE
{sh.multi_conditions(f"s_main.{next_col_name} {op} %(value{i + 1})s",
values=s.value, value_key=f"value{i + 1}")}
AND s_main.timestamp >= T{i}.stage{i}_timestamp
AND s_main.session_id = T1.session_id) AS left_not ON (TRUE)""")
else:
if is_any:
main_condition = "TRUE"
else:
main_condition = sh.multi_conditions(f"main.{next_col_name} {op} %(value{i + 1})s",
values=s.value, value_key=f"value{i + 1}")
n_stages_query.append(f"""
(SELECT main.session_id,
{"MIN(main.timestamp)" if i + 1 < len(stages) else "MAX(main.timestamp)"} AS stage{i + 1}_timestamp
FROM {next_table} AS main {" ".join(extra_from)}
WHERE main.timestamp >= {f"T{i}.stage{i}_timestamp" if i > 0 else "%(startTimestamp)s"}
{f"AND main.session_id=T1.session_id" if i > 0 else ""}
AND {main_condition}
{(" AND " + " AND ".join(stage_constraints)) if len(stage_constraints) > 0 else ""}
{(" AND " + " AND ".join(first_stage_extra_constraints)) if len(first_stage_extra_constraints) > 0 and i == 0 else ""}
GROUP BY main.session_id)
AS T{i + 1} {"ON (TRUE)" if i > 0 else ""}
""")
n_stages = len(n_stages_query)
if n_stages == 0:
return []
n_stages_query = " LEFT JOIN LATERAL ".join(n_stages_query)
n_stages_query += ") AS stages_t"
n_stages_query = f"""
SELECT stages_and_issues_t.*, sessions.user_uuid
FROM (
SELECT * FROM (
SELECT T1.session_id, {",".join([f"stage{i + 1}_timestamp" for i in range(n_stages)])}
FROM {n_stages_query}
LEFT JOIN LATERAL
( SELECT ISS.type as issue_type,
ISE.timestamp AS issue_timestamp,
COALESCE(ISS.context_string,'') as issue_context,
ISS.issue_id as issue_id
FROM events_common.issues AS ISE INNER JOIN issues AS ISS USING (issue_id)
WHERE ISE.timestamp >= stages_t.stage1_timestamp
AND ISE.timestamp <= stages_t.stage{i + 1}_timestamp
AND ISS.project_id=%(project_id)s
AND ISE.session_id = stages_t.session_id
AND ISS.type!='custom' -- ignore custom issues because they are massive
{"AND ISS.type IN %(issueTypes)s" if len(filter_issues) > 0 else ""}
LIMIT 10 -- remove the limit to get exact stats
) AS issues_t ON (TRUE)
) AS stages_and_issues_t INNER JOIN sessions USING(session_id);
"""
# LIMIT 10000
params = {"project_id": project_id, "startTimestamp": filter_d.startTimestamp,
"endTimestamp": filter_d.endTimestamp,
"issueTypes": tuple(filter_issues), **values}
with pg_client.PostgresClient() as cur:
query = cur.mogrify(n_stages_query, params)
logging.debug("---------------------------------------------------")
logging.debug(query)
logging.debug("---------------------------------------------------")
try:
cur.execute(query)
rows = cur.fetchall()
except Exception as err:
logging.warning("--------- FUNNEL SEARCH QUERY EXCEPTION -----------")
logging.warning(query.decode('UTF-8'))
logging.warning("--------- PAYLOAD -----------")
logging.warning(filter_d.model_dump_json())
logging.warning("--------------------")
raise err
return rows
def pearson_corr(x: list, y: list):
n = len(x)
if n != len(y):
raise ValueError(f'x and y must have the same length. Got {len(x)} and {len(y)} instead')
if n < 2:
warnings.warn(f'x and y must have length at least 2. Got {n} instead')
return None, None, False
# If an input is constant, the correlation coefficient is not defined.
if all(t == x[0] for t in x) or all(t == y[0] for t in y):
warnings.warn("An input array is constant; the correlation coefficent is not defined.")
return None, None, False
if n == 2:
return math.copysign(1, x[1] - x[0]) * math.copysign(1, y[1] - y[0]), 1.0, True
xmean = sum(x) / len(x)
ymean = sum(y) / len(y)
xm = [el - xmean for el in x]
ym = [el - ymean for el in y]
normxm = math.sqrt((sum([xm[i] * xm[i] for i in range(len(xm))])))
normym = math.sqrt((sum([ym[i] * ym[i] for i in range(len(ym))])))
threshold = 1e-8
if normxm < threshold * abs(xmean) or normym < threshold * abs(ymean):
# If all the values in x (likewise y) are very close to the mean,
# the loss of precision that occurs in the subtraction xm = x - xmean
# might result in large errors in r.
warnings.warn("An input array is constant; the correlation coefficent is not defined.")
r = sum(
i[0] * i[1] for i in zip([xm[i] / normxm for i in range(len(xm))], [ym[i] / normym for i in range(len(ym))]))
# Presumably, if abs(r) > 1, then it is only some small artifact of floating point arithmetic.
# However, if r < 0, we don't care, as our problem is to find only positive correlations
r = max(min(r, 1.0), 0.0)
# approximated confidence
if n < 31:
t_c = T_VALUES[n]
elif n < 50:
t_c = 2.02
else:
t_c = 2
if r >= 0.999:
confidence = 1
else:
confidence = r * math.sqrt(n - 2) / math.sqrt(1 - r ** 2)
if confidence > SIGNIFICANCE_THRSH:
return r, confidence, True
else:
return r, confidence, False
# def tuple_or(t: tuple):
# x = 0
# for el in t:
# x |= el # | is for bitwise OR
# return x
#
# The following function is correct optimization of the previous function because t is a list of 0,1
def tuple_or(t: tuple):
for el in t:
if el > 0:
return 1
return 0
def get_transitions_and_issues_of_each_type(rows: List[RealDictRow], all_issues, first_stage, last_stage):
"""
Returns two lists with binary values 0/1:
transitions ::: if transited from the first stage to the last - 1
else - 0
errors ::: a dictionary WHERE the keys are all unique issues (currently context-wise)
the values are lists
if an issue happened between the first stage to the last - 1
else - 0
For a small task of calculating a total drop due to issues,
we need to disregard the issue type when creating the `errors`-like array.
The `all_errors` array can be obtained by logical OR statement applied to all errors by issue
The `transitions` array stays the same
"""
transitions = []
n_sess_affected = 0
errors = {}
for row in rows:
t = 0
first_ts = row[f'stage{first_stage}_timestamp']
last_ts = row[f'stage{last_stage}_timestamp']
if first_ts is None:
continue
elif last_ts is not None:
t = 1
transitions.append(t)
ic_present = False
for error_id in all_issues:
if error_id not in errors:
errors[error_id] = []
ic = 0
row_issue_id = row['issue_id']
if row_issue_id is not None:
if last_ts is None or (first_ts < row['issue_timestamp'] < last_ts):
if error_id == row_issue_id:
ic = 1
ic_present = True
errors[error_id].append(ic)
if ic_present and t:
n_sess_affected += 1
all_errors = [tuple_or(t) for t in zip(*errors.values())]
return transitions, errors, all_errors, n_sess_affected
def get_affected_users_for_all_issues(rows, first_stage, last_stage):
"""
:param rows:
:param first_stage:
:param last_stage:
:return:
"""
affected_users = defaultdict(lambda: set())
affected_sessions = defaultdict(lambda: set())
all_issues = {}
n_affected_users_dict = defaultdict(lambda: None)
n_affected_sessions_dict = defaultdict(lambda: None)
n_issues_dict = defaultdict(lambda: 0)
issues_by_session = defaultdict(lambda: 0)
for row in rows:
# check that the session has reached the first stage of subfunnel:
if row[f'stage{first_stage}_timestamp'] is None:
continue
iss = row['issue_type']
iss_ts = row['issue_timestamp']
# check that the issue exists and belongs to subfunnel:
if iss is not None and (row[f'stage{last_stage}_timestamp'] is None or
(row[f'stage{first_stage}_timestamp'] < iss_ts < row[f'stage{last_stage}_timestamp'])):
if row["issue_id"] not in all_issues:
all_issues[row["issue_id"]] = {"context": row['issue_context'], "issue_type": row["issue_type"]}
n_issues_dict[row["issue_id"]] += 1
if row['user_uuid'] is not None:
affected_users[row["issue_id"]].add(row['user_uuid'])
affected_sessions[row["issue_id"]].add(row['session_id'])
issues_by_session[row[f'session_id']] += 1
if len(affected_users) > 0:
n_affected_users_dict.update({
iss: len(affected_users[iss]) for iss in affected_users
})
if len(affected_sessions) > 0:
n_affected_sessions_dict.update({
iss: len(affected_sessions[iss]) for iss in affected_sessions
})
return all_issues, n_issues_dict, n_affected_users_dict, n_affected_sessions_dict
def count_sessions(rows, n_stages):
session_counts = {i: set() for i in range(1, n_stages + 1)}
for row in rows:
for i in range(1, n_stages + 1):
if row[f"stage{i}_timestamp"] is not None:
session_counts[i].add(row[f"session_id"])
session_counts = {i: len(session_counts[i]) for i in session_counts}
return session_counts
def count_users(rows, n_stages):
users_in_stages = {i: set() for i in range(1, n_stages + 1)}
for row in rows:
for i in range(1, n_stages + 1):
if row[f"stage{i}_timestamp"] is not None:
users_in_stages[i].add(row["user_uuid"])
users_count = {i: len(users_in_stages[i]) for i in range(1, n_stages + 1)}
return users_count
def get_stages(stages, rows):
n_stages = len(stages)
session_counts = count_sessions(rows, n_stages)
users_counts = count_users(rows, n_stages)
stages_list = []
for i, stage in enumerate(stages):
drop = None
if i != 0:
if session_counts[i] == 0:
drop = 0
elif session_counts[i] > 0:
drop = int(100 * (session_counts[i] - session_counts[i + 1]) / session_counts[i])
stages_list.append(
{"value": stage.value,
"type": stage.type,
"operator": stage.operator,
"sessionsCount": session_counts[i + 1],
"drop_pct": drop,
"usersCount": users_counts[i + 1],
"dropDueToIssues": 0
}
)
return stages_list
def get_issues(stages, rows, first_stage=None, last_stage=None, drop_only=False):
"""
:param stages:
:param rows:
:param first_stage: If it's a part of the initial funnel, provide a number of the first stage (starting from 1)
:param last_stage: If it's a part of the initial funnel, provide a number of the last stage (starting from 1)
:return:
"""
n_stages = len(stages)
if first_stage is None:
first_stage = 1
if last_stage is None:
last_stage = n_stages
if last_stage > n_stages:
logging.debug(
"The number of the last stage provided is greater than the number of stages. Using n_stages instead")
last_stage = n_stages
n_critical_issues = 0
issues_dict = {"significant": [],
"insignificant": []}
session_counts = count_sessions(rows, n_stages)
drop = session_counts[first_stage] - session_counts[last_stage]
all_issues, n_issues_dict, affected_users_dict, affected_sessions = get_affected_users_for_all_issues(
rows, first_stage, last_stage)
transitions, errors, all_errors, n_sess_affected = get_transitions_and_issues_of_each_type(rows,
all_issues,
first_stage, last_stage)
del rows
if any(all_errors):
total_drop_corr, conf, is_sign = pearson_corr(transitions, all_errors)
if total_drop_corr is not None and drop is not None:
total_drop_due_to_issues = int(total_drop_corr * n_sess_affected)
else:
total_drop_due_to_issues = 0
else:
total_drop_due_to_issues = 0
if drop_only:
return total_drop_due_to_issues
for issue_id in all_issues:
if not any(errors[issue_id]):
continue
r, confidence, is_sign = pearson_corr(transitions, errors[issue_id])
if r is not None and drop is not None and is_sign:
lost_conversions = int(r * affected_sessions[issue_id])
else:
lost_conversions = None
if r is None:
r = 0
issues_dict['significant' if is_sign else 'insignificant'].append({
"type": all_issues[issue_id]["issue_type"],
"title": helper.get_issue_title(all_issues[issue_id]["issue_type"]),
"affected_sessions": affected_sessions[issue_id],
"unaffected_sessions": session_counts[1] - affected_sessions[issue_id],
"lost_conversions": lost_conversions,
"affected_users": affected_users_dict[issue_id],
"conversion_impact": round(r * 100),
"context_string": all_issues[issue_id]["context"],
"issue_id": issue_id
})
if is_sign:
n_critical_issues += n_issues_dict[issue_id]
# To limit the number of returned issues to the frontend
issues_dict["significant"] = issues_dict["significant"][:20]
issues_dict["insignificant"] = issues_dict["insignificant"][:20]
return n_critical_issues, issues_dict, total_drop_due_to_issues
def get_top_insights(filter_d: schemas.CardSeriesFilterSchema, project_id):
output = []
stages = filter_d.events
# TODO: handle 1 stage alone
if len(stages) == 0:
logging.debug("no stages found")
return output, 0
elif len(stages) == 1:
# TODO: count sessions, and users for single stage
output = [{
"type": stages[0].type,
"value": stages[0].value,
"dropPercentage": None,
"operator": stages[0].operator,
"sessionsCount": 0,
"dropPct": 0,
"usersCount": 0,
"dropDueToIssues": 0
}]
# original
# counts = sessions.search_sessions(data=schemas.SessionsSearchCountSchema.parse_obj(filter_d),
# project_id=project_id, user_id=None, count_only=True)
# first change
# counts = sessions.search_sessions(data=schemas.FlatSessionsSearchPayloadSchema.parse_obj(filter_d),
# project_id=project_id, user_id=None, count_only=True)
# last change
counts = sessions.search_sessions(data=schemas.SessionsSearchPayloadSchema.model_validate(filter_d),
project_id=project_id, user_id=None, count_only=True)
output[0]["sessionsCount"] = counts["countSessions"]
output[0]["usersCount"] = counts["countUsers"]
return output, 0
# The result of the multi-stage query
rows = get_stages_and_events(filter_d=filter_d, project_id=project_id)
if len(rows) == 0:
return get_stages(stages, []), 0
# Obtain the first part of the output
stages_list = get_stages(stages, rows)
# Obtain the second part of the output
total_drop_due_to_issues = get_issues(stages, rows,
first_stage=1,
last_stage=len(filter_d.events),
drop_only=True)
return stages_list, total_drop_due_to_issues
def get_issues_list(filter_d: schemas.CardSeriesFilterSchema, project_id, first_stage=None, last_stage=None):
output = dict({"total_drop_due_to_issues": 0, "critical_issues_count": 0, "significant": [], "insignificant": []})
stages = filter_d.events
# The result of the multi-stage query
rows = get_stages_and_events(filter_d=filter_d, project_id=project_id)
if len(rows) == 0:
return output
# Obtain the second part of the output
n_critical_issues, issues_dict, total_drop_due_to_issues = get_issues(stages, rows, first_stage=first_stage,
last_stage=last_stage)
output['total_drop_due_to_issues'] = total_drop_due_to_issues
# output['critical_issues_count'] = n_critical_issues
output = {**output, **issues_dict}
return output

View file

@ -1,618 +1,2 @@
__maintainer__ = "KRAIEM Taha Yassine"
import logging
from decouple import config
import schemas
from chalicelib.core import events, metadata
from chalicelib.utils import sql_helper as sh
if config("EXP_SESSIONS_SEARCH", cast=bool, default=False):
from chalicelib.core import sessions_legacy as sessions
else:
from chalicelib.core import sessions
"""
todo: remove LIMIT from the query
"""
from typing import List
import math
import warnings
from collections import defaultdict
from psycopg2.extras import RealDictRow
from chalicelib.utils import pg_client, helper
logger = logging.getLogger(__name__)
SIGNIFICANCE_THRSH = 0.4
# Taha: the value 24 was estimated in v1.15
T_VALUES = {1: 12.706, 2: 4.303, 3: 3.182, 4: 2.776, 5: 2.571, 6: 2.447, 7: 2.365, 8: 2.306, 9: 2.262, 10: 2.228,
11: 2.201, 12: 2.179, 13: 2.160, 14: 2.145, 15: 2.13, 16: 2.120, 17: 2.110, 18: 2.101, 19: 2.093, 20: 2.086,
21: 2.080, 22: 2.074, 23: 2.069, 24: 2.067, 25: 2.064, 26: 2.060, 27: 2.056, 28: 2.052, 29: 2.045,
30: 2.042}
def get_stages_and_events(filter_d: schemas.CardSeriesFilterSchema, project_id) -> List[RealDictRow]:
"""
Add minimal timestamp
:param filter_d: dict contains events&filters&...
:return:
"""
stages: [dict] = filter_d.events
filters: [dict] = filter_d.filters
filter_issues = []
# TODO: enable this if needed by an endpoint
# filter_issues = filter_d.get("issueTypes")
# if filter_issues is None or len(filter_issues) == 0:
# filter_issues = []
stage_constraints = ["main.timestamp <= %(endTimestamp)s"]
first_stage_extra_constraints = ["s.project_id=%(project_id)s", "s.start_ts >= %(startTimestamp)s",
"s.start_ts <= %(endTimestamp)s"]
filter_extra_from = []
n_stages_query = []
values = {}
if len(filters) > 0:
meta_keys = None
for i, f in enumerate(filters):
if len(f.value) == 0:
continue
f.value = helper.values_for_operator(value=f.value, op=f.operator)
# filter_args = _multiple_values(f["value"])
op = sh.get_sql_operator(f.operator)
filter_type = f.type
# values[f_k] = sessions.__get_sql_value_multiple(f["value"])
f_k = f"f_value{i}"
values = {**values,
**sh.multi_values(helper.values_for_operator(value=f.value, op=f.operator),
value_key=f_k)}
if filter_type == schemas.FilterType.user_browser:
# op = sessions.__get_sql_operator_multiple(f["operator"])
first_stage_extra_constraints.append(
sh.multi_conditions(f's.user_browser {op} %({f_k})s', f.value, value_key=f_k))
elif filter_type in [schemas.FilterType.user_os, schemas.FilterType.user_os_ios]:
# op = sessions.__get_sql_operator_multiple(f["operator"])
first_stage_extra_constraints.append(
sh.multi_conditions(f's.user_os {op} %({f_k})s', f.value, value_key=f_k))
elif filter_type in [schemas.FilterType.user_device, schemas.FilterType.user_device_ios]:
# op = sessions.__get_sql_operator_multiple(f["operator"])
first_stage_extra_constraints.append(
sh.multi_conditions(f's.user_device {op} %({f_k})s', f.value, value_key=f_k))
elif filter_type in [schemas.FilterType.user_country, schemas.FilterType.user_country_ios]:
# op = sessions.__get_sql_operator_multiple(f["operator"])
first_stage_extra_constraints.append(
sh.multi_conditions(f's.user_country {op} %({f_k})s', f.value, value_key=f_k))
elif filter_type == schemas.FilterType.duration:
if len(f.value) > 0 and f.value[0] is not None:
first_stage_extra_constraints.append(f's.duration >= %(minDuration)s')
values["minDuration"] = f.value[0]
if len(f["value"]) > 1 and f.value[1] is not None and int(f.value[1]) > 0:
first_stage_extra_constraints.append('s.duration <= %(maxDuration)s')
values["maxDuration"] = f.value[1]
elif filter_type == schemas.FilterType.referrer:
# events_query_part = events_query_part + f"INNER JOIN events.pages AS p USING(session_id)"
filter_extra_from = [f"INNER JOIN {events.EventType.LOCATION.table} AS p USING(session_id)"]
# op = sessions.__get_sql_operator_multiple(f["operator"])
first_stage_extra_constraints.append(
sh.multi_conditions(f"p.base_referrer {op} %({f_k})s", f.value, value_key=f_k))
elif filter_type == events.EventType.METADATA.ui_type:
if meta_keys is None:
meta_keys = metadata.get(project_id=project_id)
meta_keys = {m["key"]: m["index"] for m in meta_keys}
# op = sessions.__get_sql_operator(f["operator"])
if f.source in meta_keys.keys():
first_stage_extra_constraints.append(
sh.multi_conditions(
f's.{metadata.index_to_colname(meta_keys[f.source])} {op} %({f_k})s', f.value,
value_key=f_k))
# values[f_k] = helper.string_to_sql_like_with_op(f["value"][0], op)
elif filter_type in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
# op = sessions.__get_sql_operator(f["operator"])
first_stage_extra_constraints.append(
sh.multi_conditions(f's.user_id {op} %({f_k})s', f.value, value_key=f_k))
# values[f_k] = helper.string_to_sql_like_with_op(f["value"][0], op)
elif filter_type in [schemas.FilterType.user_anonymous_id,
schemas.FilterType.user_anonymous_id_ios]:
# op = sessions.__get_sql_operator(f["operator"])
first_stage_extra_constraints.append(
sh.multi_conditions(f's.user_anonymous_id {op} %({f_k})s', f.value, value_key=f_k))
# values[f_k] = helper.string_to_sql_like_with_op(f["value"][0], op)
elif filter_type in [schemas.FilterType.rev_id, schemas.FilterType.rev_id_ios]:
# op = sessions.__get_sql_operator(f["operator"])
first_stage_extra_constraints.append(
sh.multi_conditions(f's.rev_id {op} %({f_k})s', f.value, value_key=f_k))
# values[f_k] = helper.string_to_sql_like_with_op(f["value"][0], op)
i = -1
for s in stages:
if s.operator is None:
s.operator = schemas.SearchEventOperator._is
if not isinstance(s.value, list):
s.value = [s.value]
is_any = sh.isAny_opreator(s.operator)
if not is_any and isinstance(s.value, list) and len(s.value) == 0:
continue
i += 1
if i == 0:
extra_from = filter_extra_from + ["INNER JOIN public.sessions AS s USING (session_id)"]
else:
extra_from = []
op = sh.get_sql_operator(s.operator)
# event_type = s["type"].upper()
event_type = s.type
if event_type == events.EventType.CLICK.ui_type:
next_table = events.EventType.CLICK.table
next_col_name = events.EventType.CLICK.column
elif event_type == events.EventType.INPUT.ui_type:
next_table = events.EventType.INPUT.table
next_col_name = events.EventType.INPUT.column
elif event_type == events.EventType.LOCATION.ui_type:
next_table = events.EventType.LOCATION.table
next_col_name = events.EventType.LOCATION.column
elif event_type == events.EventType.CUSTOM.ui_type:
next_table = events.EventType.CUSTOM.table
next_col_name = events.EventType.CUSTOM.column
# IOS --------------
elif event_type == events.EventType.CLICK_IOS.ui_type:
next_table = events.EventType.CLICK_IOS.table
next_col_name = events.EventType.CLICK_IOS.column
elif event_type == events.EventType.INPUT_IOS.ui_type:
next_table = events.EventType.INPUT_IOS.table
next_col_name = events.EventType.INPUT_IOS.column
elif event_type == events.EventType.VIEW_IOS.ui_type:
next_table = events.EventType.VIEW_IOS.table
next_col_name = events.EventType.VIEW_IOS.column
elif event_type == events.EventType.CUSTOM_IOS.ui_type:
next_table = events.EventType.CUSTOM_IOS.table
next_col_name = events.EventType.CUSTOM_IOS.column
else:
logging.warning(f"=================UNDEFINED:{event_type}")
continue
values = {**values, **sh.multi_values(helper.values_for_operator(value=s.value, op=s.operator),
value_key=f"value{i + 1}")}
if sh.is_negation_operator(s.operator) and i > 0:
op = sh.reverse_sql_operator(op)
main_condition = "left_not.session_id ISNULL"
extra_from.append(f"""LEFT JOIN LATERAL (SELECT session_id
FROM {next_table} AS s_main
WHERE
{sh.multi_conditions(f"s_main.{next_col_name} {op} %(value{i + 1})s",
values=s.value, value_key=f"value{i + 1}")}
AND s_main.timestamp >= T{i}.stage{i}_timestamp
AND s_main.session_id = T1.session_id) AS left_not ON (TRUE)""")
else:
if is_any:
main_condition = "TRUE"
else:
main_condition = sh.multi_conditions(f"main.{next_col_name} {op} %(value{i + 1})s",
values=s.value, value_key=f"value{i + 1}")
n_stages_query.append(f"""
(SELECT main.session_id,
{"MIN(main.timestamp)" if i + 1 < len(stages) else "MAX(main.timestamp)"} AS stage{i + 1}_timestamp
FROM {next_table} AS main {" ".join(extra_from)}
WHERE main.timestamp >= {f"T{i}.stage{i}_timestamp" if i > 0 else "%(startTimestamp)s"}
{f"AND main.session_id=T1.session_id" if i > 0 else ""}
AND {main_condition}
{(" AND " + " AND ".join(stage_constraints)) if len(stage_constraints) > 0 else ""}
{(" AND " + " AND ".join(first_stage_extra_constraints)) if len(first_stage_extra_constraints) > 0 and i == 0 else ""}
GROUP BY main.session_id)
AS T{i + 1} {"ON (TRUE)" if i > 0 else ""}
""")
n_stages = len(n_stages_query)
if n_stages == 0:
return []
n_stages_query = " LEFT JOIN LATERAL ".join(n_stages_query)
n_stages_query += ") AS stages_t"
n_stages_query = f"""
SELECT stages_and_issues_t.*, sessions.user_uuid
FROM (
SELECT * FROM (
SELECT T1.session_id, {",".join([f"stage{i + 1}_timestamp" for i in range(n_stages)])}
FROM {n_stages_query}
LEFT JOIN LATERAL
( SELECT ISS.type as issue_type,
ISE.timestamp AS issue_timestamp,
COALESCE(ISS.context_string,'') as issue_context,
ISS.issue_id as issue_id
FROM events_common.issues AS ISE INNER JOIN issues AS ISS USING (issue_id)
WHERE ISE.timestamp >= stages_t.stage1_timestamp
AND ISE.timestamp <= stages_t.stage{i + 1}_timestamp
AND ISS.project_id=%(project_id)s
AND ISE.session_id = stages_t.session_id
AND ISS.type!='custom' -- ignore custom issues because they are massive
{"AND ISS.type IN %(issueTypes)s" if len(filter_issues) > 0 else ""}
LIMIT 10 -- remove the limit to get exact stats
) AS issues_t ON (TRUE)
) AS stages_and_issues_t INNER JOIN sessions USING(session_id);
"""
# LIMIT 10000
params = {"project_id": project_id, "startTimestamp": filter_d.startTimestamp,
"endTimestamp": filter_d.endTimestamp,
"issueTypes": tuple(filter_issues), **values}
with pg_client.PostgresClient() as cur:
query = cur.mogrify(n_stages_query, params)
logging.debug("---------------------------------------------------")
logging.debug(query)
logging.debug("---------------------------------------------------")
try:
cur.execute(query)
rows = cur.fetchall()
except Exception as err:
logging.warning("--------- FUNNEL SEARCH QUERY EXCEPTION -----------")
logging.warning(query.decode('UTF-8'))
logging.warning("--------- PAYLOAD -----------")
logging.warning(filter_d.model_dump_json())
logging.warning("--------------------")
raise err
return rows
def pearson_corr(x: list, y: list):
n = len(x)
if n != len(y):
raise ValueError(f'x and y must have the same length. Got {len(x)} and {len(y)} instead')
if n < 2:
warnings.warn(f'x and y must have length at least 2. Got {n} instead')
return None, None, False
# If an input is constant, the correlation coefficient is not defined.
if all(t == x[0] for t in x) or all(t == y[0] for t in y):
warnings.warn("An input array is constant; the correlation coefficent is not defined.")
return None, None, False
if n == 2:
return math.copysign(1, x[1] - x[0]) * math.copysign(1, y[1] - y[0]), 1.0, True
xmean = sum(x) / len(x)
ymean = sum(y) / len(y)
xm = [el - xmean for el in x]
ym = [el - ymean for el in y]
normxm = math.sqrt((sum([xm[i] * xm[i] for i in range(len(xm))])))
normym = math.sqrt((sum([ym[i] * ym[i] for i in range(len(ym))])))
threshold = 1e-8
if normxm < threshold * abs(xmean) or normym < threshold * abs(ymean):
# If all the values in x (likewise y) are very close to the mean,
# the loss of precision that occurs in the subtraction xm = x - xmean
# might result in large errors in r.
warnings.warn("An input array is constant; the correlation coefficent is not defined.")
r = sum(
i[0] * i[1] for i in zip([xm[i] / normxm for i in range(len(xm))], [ym[i] / normym for i in range(len(ym))]))
# Presumably, if abs(r) > 1, then it is only some small artifact of floating point arithmetic.
# However, if r < 0, we don't care, as our problem is to find only positive correlations
r = max(min(r, 1.0), 0.0)
# approximated confidence
if n < 31:
t_c = T_VALUES[n]
elif n < 50:
t_c = 2.02
else:
t_c = 2
if r >= 0.999:
confidence = 1
else:
confidence = r * math.sqrt(n - 2) / math.sqrt(1 - r ** 2)
if confidence > SIGNIFICANCE_THRSH:
return r, confidence, True
else:
return r, confidence, False
# def tuple_or(t: tuple):
# x = 0
# for el in t:
# x |= el # | is for bitwise OR
# return x
#
# The following function is correct optimization of the previous function because t is a list of 0,1
def tuple_or(t: tuple):
for el in t:
if el > 0:
return 1
return 0
def get_transitions_and_issues_of_each_type(rows: List[RealDictRow], all_issues, first_stage, last_stage):
"""
Returns two lists with binary values 0/1:
transitions ::: if transited from the first stage to the last - 1
else - 0
errors ::: a dictionary WHERE the keys are all unique issues (currently context-wise)
the values are lists
if an issue happened between the first stage to the last - 1
else - 0
For a small task of calculating a total drop due to issues,
we need to disregard the issue type when creating the `errors`-like array.
The `all_errors` array can be obtained by logical OR statement applied to all errors by issue
The `transitions` array stays the same
"""
transitions = []
n_sess_affected = 0
errors = {}
for row in rows:
t = 0
first_ts = row[f'stage{first_stage}_timestamp']
last_ts = row[f'stage{last_stage}_timestamp']
if first_ts is None:
continue
elif last_ts is not None:
t = 1
transitions.append(t)
ic_present = False
for error_id in all_issues:
if error_id not in errors:
errors[error_id] = []
ic = 0
row_issue_id = row['issue_id']
if row_issue_id is not None:
if last_ts is None or (first_ts < row['issue_timestamp'] < last_ts):
if error_id == row_issue_id:
ic = 1
ic_present = True
errors[error_id].append(ic)
if ic_present and t:
n_sess_affected += 1
all_errors = [tuple_or(t) for t in zip(*errors.values())]
return transitions, errors, all_errors, n_sess_affected
def get_affected_users_for_all_issues(rows, first_stage, last_stage):
"""
:param rows:
:param first_stage:
:param last_stage:
:return:
"""
affected_users = defaultdict(lambda: set())
affected_sessions = defaultdict(lambda: set())
all_issues = {}
n_affected_users_dict = defaultdict(lambda: None)
n_affected_sessions_dict = defaultdict(lambda: None)
n_issues_dict = defaultdict(lambda: 0)
issues_by_session = defaultdict(lambda: 0)
for row in rows:
# check that the session has reached the first stage of subfunnel:
if row[f'stage{first_stage}_timestamp'] is None:
continue
iss = row['issue_type']
iss_ts = row['issue_timestamp']
# check that the issue exists and belongs to subfunnel:
if iss is not None and (row[f'stage{last_stage}_timestamp'] is None or
(row[f'stage{first_stage}_timestamp'] < iss_ts < row[f'stage{last_stage}_timestamp'])):
if row["issue_id"] not in all_issues:
all_issues[row["issue_id"]] = {"context": row['issue_context'], "issue_type": row["issue_type"]}
n_issues_dict[row["issue_id"]] += 1
if row['user_uuid'] is not None:
affected_users[row["issue_id"]].add(row['user_uuid'])
affected_sessions[row["issue_id"]].add(row['session_id'])
issues_by_session[row[f'session_id']] += 1
if len(affected_users) > 0:
n_affected_users_dict.update({
iss: len(affected_users[iss]) for iss in affected_users
})
if len(affected_sessions) > 0:
n_affected_sessions_dict.update({
iss: len(affected_sessions[iss]) for iss in affected_sessions
})
return all_issues, n_issues_dict, n_affected_users_dict, n_affected_sessions_dict
def count_sessions(rows, n_stages):
session_counts = {i: set() for i in range(1, n_stages + 1)}
for row in rows:
for i in range(1, n_stages + 1):
if row[f"stage{i}_timestamp"] is not None:
session_counts[i].add(row[f"session_id"])
session_counts = {i: len(session_counts[i]) for i in session_counts}
return session_counts
def count_users(rows, n_stages):
users_in_stages = {i: set() for i in range(1, n_stages + 1)}
for row in rows:
for i in range(1, n_stages + 1):
if row[f"stage{i}_timestamp"] is not None:
users_in_stages[i].add(row["user_uuid"])
users_count = {i: len(users_in_stages[i]) for i in range(1, n_stages + 1)}
return users_count
def get_stages(stages, rows):
n_stages = len(stages)
session_counts = count_sessions(rows, n_stages)
users_counts = count_users(rows, n_stages)
stages_list = []
for i, stage in enumerate(stages):
drop = None
if i != 0:
if session_counts[i] == 0:
drop = 0
elif session_counts[i] > 0:
drop = int(100 * (session_counts[i] - session_counts[i + 1]) / session_counts[i])
stages_list.append(
{"value": stage.value,
"type": stage.type,
"operator": stage.operator,
"sessionsCount": session_counts[i + 1],
"drop_pct": drop,
"usersCount": users_counts[i + 1],
"dropDueToIssues": 0
}
)
return stages_list
def get_issues(stages, rows, first_stage=None, last_stage=None, drop_only=False):
"""
:param stages:
:param rows:
:param first_stage: If it's a part of the initial funnel, provide a number of the first stage (starting from 1)
:param last_stage: If it's a part of the initial funnel, provide a number of the last stage (starting from 1)
:return:
"""
n_stages = len(stages)
if first_stage is None:
first_stage = 1
if last_stage is None:
last_stage = n_stages
if last_stage > n_stages:
logging.debug(
"The number of the last stage provided is greater than the number of stages. Using n_stages instead")
last_stage = n_stages
n_critical_issues = 0
issues_dict = {"significant": [],
"insignificant": []}
session_counts = count_sessions(rows, n_stages)
drop = session_counts[first_stage] - session_counts[last_stage]
all_issues, n_issues_dict, affected_users_dict, affected_sessions = get_affected_users_for_all_issues(
rows, first_stage, last_stage)
transitions, errors, all_errors, n_sess_affected = get_transitions_and_issues_of_each_type(rows,
all_issues,
first_stage, last_stage)
del rows
if any(all_errors):
total_drop_corr, conf, is_sign = pearson_corr(transitions, all_errors)
if total_drop_corr is not None and drop is not None:
total_drop_due_to_issues = int(total_drop_corr * n_sess_affected)
else:
total_drop_due_to_issues = 0
else:
total_drop_due_to_issues = 0
if drop_only:
return total_drop_due_to_issues
for issue_id in all_issues:
if not any(errors[issue_id]):
continue
r, confidence, is_sign = pearson_corr(transitions, errors[issue_id])
if r is not None and drop is not None and is_sign:
lost_conversions = int(r * affected_sessions[issue_id])
else:
lost_conversions = None
if r is None:
r = 0
issues_dict['significant' if is_sign else 'insignificant'].append({
"type": all_issues[issue_id]["issue_type"],
"title": helper.get_issue_title(all_issues[issue_id]["issue_type"]),
"affected_sessions": affected_sessions[issue_id],
"unaffected_sessions": session_counts[1] - affected_sessions[issue_id],
"lost_conversions": lost_conversions,
"affected_users": affected_users_dict[issue_id],
"conversion_impact": round(r * 100),
"context_string": all_issues[issue_id]["context"],
"issue_id": issue_id
})
if is_sign:
n_critical_issues += n_issues_dict[issue_id]
# To limit the number of returned issues to the frontend
issues_dict["significant"] = issues_dict["significant"][:20]
issues_dict["insignificant"] = issues_dict["insignificant"][:20]
return n_critical_issues, issues_dict, total_drop_due_to_issues
def get_top_insights(filter_d: schemas.CardSeriesFilterSchema, project_id):
output = []
stages = filter_d.events
# TODO: handle 1 stage alone
if len(stages) == 0:
logging.debug("no stages found")
return output, 0
elif len(stages) == 1:
# TODO: count sessions, and users for single stage
output = [{
"type": stages[0].type,
"value": stages[0].value,
"dropPercentage": None,
"operator": stages[0].operator,
"sessionsCount": 0,
"dropPct": 0,
"usersCount": 0,
"dropDueToIssues": 0
}]
# original
# counts = sessions.search_sessions(data=schemas.SessionsSearchCountSchema.parse_obj(filter_d),
# project_id=project_id, user_id=None, count_only=True)
# first change
# counts = sessions.search_sessions(data=schemas.FlatSessionsSearchPayloadSchema.parse_obj(filter_d),
# project_id=project_id, user_id=None, count_only=True)
# last change
counts = sessions.search_sessions(data=schemas.SessionsSearchPayloadSchema.model_validate(filter_d),
project_id=project_id, user_id=None, count_only=True)
output[0]["sessionsCount"] = counts["countSessions"]
output[0]["usersCount"] = counts["countUsers"]
return output, 0
# The result of the multi-stage query
rows = get_stages_and_events(filter_d=filter_d, project_id=project_id)
if len(rows) == 0:
return get_stages(stages, []), 0
# Obtain the first part of the output
stages_list = get_stages(stages, rows)
# Obtain the second part of the output
total_drop_due_to_issues = get_issues(stages, rows,
first_stage=1,
last_stage=len(filter_d.events),
drop_only=True)
return stages_list, total_drop_due_to_issues
def get_issues_list(filter_d: schemas.CardSeriesFilterSchema, project_id, first_stage=None, last_stage=None):
output = dict({"total_drop_due_to_issues": 0, "critical_issues_count": 0, "significant": [], "insignificant": []})
stages = filter_d.events
# The result of the multi-stage query
rows = get_stages_and_events(filter_d=filter_d, project_id=project_id)
if len(rows) == 0:
return output
# Obtain the second part of the output
n_critical_issues, issues_dict, total_drop_due_to_issues = get_issues(stages, rows, first_stage=first_stage,
last_stage=last_stage)
output['total_drop_due_to_issues'] = total_drop_due_to_issues
# output['critical_issues_count'] = n_critical_issues
output = {**output, **issues_dict}
return output
from .significance import *
# TODO: use clickhouse for funnels

View file

@ -1,4 +1,5 @@
import json
import logging
import secrets
from decouple import config
@ -7,12 +8,14 @@ from starlette import status
import schemas
from chalicelib.core import authorizers, metadata, projects
from chalicelib.core import roles
from chalicelib.core import tenants, assist
from chalicelib.utils import email_helper, smtp
from chalicelib.utils import helper
from chalicelib.utils import pg_client
from chalicelib.utils.TimeUTC import TimeUTC
from chalicelib.core import roles
logger = logging.getLogger(__name__)
def __generate_invitation_token():
@ -861,7 +864,7 @@ def authenticate_sso(email, internal_id, exp=None):
jwt_jti=jwt_r_jti),
"refreshTokenMaxAge": config("JWT_REFRESH_EXPIRATION", cast=int),
}
logger.warning(f"SSO user not found with email: {email} and internal_id: {internal_id}")
return None

View file

@ -10,17 +10,18 @@ from starlette.datastructures import FormData
if config("ENABLE_SSO", cast=bool, default=True):
from onelogin.saml2.auth import OneLogin_Saml2_Auth
API_PREFIX = "/api"
SAML2 = {
"strict": config("saml_strict", cast=bool, default=True),
"debug": config("saml_debug", cast=bool, default=True),
"sp": {
"entityId": config("SITE_URL") + "/api/sso/saml2/metadata/",
"entityId": config("SITE_URL") + API_PREFIX + "/sso/saml2/metadata/",
"assertionConsumerService": {
"url": config("SITE_URL") + "/api/sso/saml2/acs/",
"url": config("SITE_URL") + API_PREFIX + "/sso/saml2/acs/",
"binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST"
},
"singleLogoutService": {
"url": config("SITE_URL") + "/api/sso/saml2/sls/",
"url": config("SITE_URL") + API_PREFIX + "/sso/saml2/sls/",
"binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect"
},
"NameIDFormat": "urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress",
@ -110,8 +111,8 @@ async def prepare_request(request: Request):
# add / to /acs
if not path.endswith("/"):
path = path + '/'
if not path.startswith("/api"):
path = "/api" + path
if len(API_PREFIX) > 0 and not path.startswith(API_PREFIX):
path = API_PREFIX + path
return {
'https': 'on' if proto == 'https' else 'off',
@ -136,7 +137,13 @@ def get_saml2_provider():
config("idp_name", default="saml2")) > 0 else None
def get_landing_URL(jwt):
def get_landing_URL(jwt, redirect_to_link2=False):
if redirect_to_link2:
if len(config("sso_landing_override", default="")) == 0:
logging.warning("SSO trying to redirect to custom URL, but sso_landing_override env var is empty")
else:
return config("sso_landing_override") + "?jwt=%s" % jwt
return config("SITE_URL") + config("sso_landing", default="/login?jwt=%s") % jwt

View file

@ -41,8 +41,13 @@ class ClickHouseClient:
keys = tuple(x for x, y in results[1])
return [dict(zip(keys, i)) for i in results[0]]
except Exception as err:
logging.error("--------- CH EXCEPTION -----------")
logging.error(err)
logging.error("--------- CH QUERY EXCEPTION -----------")
logging.error(self.format(query=query, params=params))
logging.error(self.format(query=query, params=params)
.replace('\n', '\\n')
.replace(' ', ' ')
.replace(' ', ' '))
logging.error("--------------------")
raise err

View file

@ -48,6 +48,7 @@ rm -rf ./chalicelib/core/saved_search.py
rm -rf ./chalicelib/core/sessions.py
rm -rf ./chalicelib/core/sessions_assignments.py
rm -rf ./chalicelib/core/sessions_mobs.py
rm -rf ./chalicelib/core/significance.py
rm -rf ./chalicelib/core/socket_ios.py
rm -rf ./chalicelib/core/sourcemaps.py
rm -rf ./chalicelib/core/sourcemaps_parser.py

View file

@ -16,12 +16,15 @@ mkdir .venv
# Installing dependencies (pipenv will detect the .venv folder and use it as a target)
pipenv install -r requirements.txt [--skip-lock]
# These commands must bu used everytime you make changes to FOSS.
# To clean the unused files before getting new ones
bash clean.sh
# To copy commun files from FOSS
bash prepare-dev.sh
# In case of an issue with python3-saml installation for MacOS,
# please follow these instructions:
https://github.com/xmlsec/python-xmlsec/issues/254#issuecomment-1726249435
```
### Building and deploying locally

View file

@ -20,8 +20,8 @@ apscheduler==3.10.4
clickhouse-driver[lz4]==0.2.6
# TODO: enable after xmlsec fix https://github.com/xmlsec/python-xmlsec/issues/252
#--no-binary is used to avoid libxml2 library version incompatibilities between xmlsec and lxml
#python3-saml==1.16.0 --no-binary=lxml
python3-saml==1.16.0
python3-saml==1.16.0 --no-binary=lxml
#python3-saml==1.16.0
python-multipart==0.0.6
redis==5.0.1

View file

@ -514,8 +514,7 @@ def edit_note(projectId: int, noteId: int, data: schemas.SessionUpdateNoteSchema
@app.delete('/{projectId}/notes/{noteId}', tags=["sessions", "notes"],
dependencies=[OR_scope(Permissions.session_replay)])
def delete_note(projectId: int, noteId: int, _=Body(None), context: schemas.CurrentContext = Depends(OR_context)):
data = sessions_notes.delete(tenant_id=context.tenant_id, project_id=projectId, user_id=context.user_id,
note_id=noteId)
data = sessions_notes.delete(project_id=projectId, note_id=noteId)
return data

View file

@ -1,9 +1,11 @@
import json
import logging
from fastapi import HTTPException, Request, Response, status
from chalicelib.utils import SAML2_helper
from chalicelib.utils.SAML2_helper import prepare_request, init_saml_auth
from routers.base import get_routers
import logging
logger = logging.getLogger(__name__)
@ -18,11 +20,11 @@ from starlette.responses import RedirectResponse
@public_app.get("/sso/saml2", tags=["saml2"])
@public_app.get("/sso/saml2/", tags=["saml2"])
async def start_sso(request: Request):
async def start_sso(request: Request, iFrame: bool = False):
request.path = ''
req = await prepare_request(request=request)
auth = init_saml_auth(req)
sso_built_url = auth.login()
sso_built_url = auth.login(return_to=json.dumps({'iFrame': iFrame}))
return RedirectResponse(url=sso_built_url)
@ -33,6 +35,29 @@ async def process_sso_assertion(request: Request):
session = req["cookie"]["session"]
auth = init_saml_auth(req)
post_data = req.get("post_data")
if post_data is None:
post_data = {}
elif isinstance(post_data, str):
post_data = json.loads(post_data)
elif not isinstance(post_data, dict):
logger.error("Received invalid post_data")
logger.error("type: {}".format(type(post_data)))
logger.error(post_data)
post_data = {}
redirect_to_link2 = None
relay_state = post_data.get('RelayState')
if relay_state:
if isinstance(relay_state, str):
relay_state = json.loads(relay_state)
elif not isinstance(relay_state, dict):
logger.error("Received invalid relay_state")
logger.error("type: {}".format(type(relay_state)))
logger.error(relay_state)
relay_state = {}
redirect_to_link2 = relay_state.get("iFrame")
request_id = None
if 'AuthNRequestID' in session:
request_id = session['AuthNRequestID']
@ -111,7 +136,7 @@ async def process_sso_assertion(request: Request):
refresh_token_max_age = jwt["refreshTokenMaxAge"]
response = Response(
status_code=status.HTTP_302_FOUND,
headers={'Location': SAML2_helper.get_landing_URL(jwt["jwt"])})
headers={'Location': SAML2_helper.get_landing_URL(jwt["jwt"], redirect_to_link2=redirect_to_link2)})
response.set_cookie(key="refreshToken", value=refresh_token, path="/api/refresh",
max_age=refresh_token_max_age, secure=True, httponly=True)
return response
@ -124,6 +149,29 @@ async def process_sso_assertion_tk(tenantKey: str, request: Request):
session = req["cookie"]["session"]
auth = init_saml_auth(req)
post_data = req.get("post_data")
if post_data is None:
post_data = {}
elif isinstance(post_data, str):
post_data = json.loads(post_data)
elif not isinstance(post_data, dict):
logger.error("Received invalid post_data")
logger.error("type: {}".format(type(post_data)))
logger.error(post_data)
post_data = {}
redirect_to_link2 = None
relay_state = post_data.get('RelayState')
if relay_state:
if isinstance(relay_state, str):
relay_state = json.loads(relay_state)
elif not isinstance(relay_state, dict):
logger.error("Received invalid relay_state")
logger.error("type: {}".format(type(relay_state)))
logger.error(relay_state)
relay_state = {}
redirect_to_link2 = relay_state.get("iFrame")
request_id = None
if 'AuthNRequestID' in session:
request_id = session['AuthNRequestID']
@ -194,9 +242,14 @@ async def process_sso_assertion_tk(tenantKey: str, request: Request):
jwt = users.authenticate_sso(email=email, internal_id=internal_id, exp=expiration)
if jwt is None:
return {"errors": ["null JWT"]}
return Response(
refresh_token = jwt["refreshToken"]
refresh_token_max_age = jwt["refreshTokenMaxAge"]
response = Response(
status_code=status.HTTP_302_FOUND,
headers={'Location': SAML2_helper.get_landing_URL(jwt)})
headers={'Location': SAML2_helper.get_landing_URL(jwt["jwt"], redirect_to_link2=redirect_to_link2)})
response.set_cookie(key="refreshToken", value=refresh_token, path="/api/refresh",
max_age=refresh_token_max_age, secure=True, httponly=True)
return response
@public_app.get('/sso/saml2/sls', tags=["saml2"])

View file

@ -2,11 +2,11 @@ from fastapi import Body, Depends
from chalicelib.core.usability_testing import service
from chalicelib.core.usability_testing.schema import UTTestCreate, UTTestUpdate, UTTestSearch
from or_dependencies import OR_context, OR_role
from or_dependencies import OR_context
from routers.base import get_routers
from schemas import schemas
public_app, app, app_apikey = get_routers(extra_dependencies=[OR_role("owner", "admin")])
public_app, app, app_apikey = get_routers()
tags = ["usability-tests"]

View file

@ -34,7 +34,7 @@ class CurrentContext(schemas.CurrentContext):
if values.get("permissions") is not None:
perms = []
for p in values["permissions"]:
if Permissions.has_value(p):
if Permissions.has_value(p) or ServicePermissions.has_value(p):
perms.append(p)
values["permissions"] = perms
return values

View file

@ -9,19 +9,20 @@
# Helper function
exit_err() {
err_code=$1
if [[ $err_code != 0 ]]; then
exit "$err_code"
fi
err_code=$1
if [[ $err_code != 0 ]]; then
exit "$err_code"
fi
}
source ../scripts/lib/_docker.sh
app="assist-stats" # Set the app variable to "chalice"
app="assist-stats" # Set the app variable to "chalice"
environment=$1
git_sha=$(git rev-parse --short HEAD)
image_tag=${IMAGE_TAG:-git_sha}
envarg="default-foss"
chart="$app" # Use the app variable here
chart="$app" # Use the app variable here
check_prereq() {
which docker || {
echo "Docker not installed, please install docker."
@ -32,31 +33,31 @@ check_prereq() {
[[ $1 == ee ]] && ee=true
[[ $PATCH -eq 1 ]] && {
image_tag="$(grep -ER ^.ppVersion ../scripts/helmcharts/openreplay/charts/$chart | xargs | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
[[ $ee == "true" ]] && {
image_tag="${image_tag}-ee"
}
image_tag="$(grep -ER ^.ppVersion ../scripts/helmcharts/openreplay/charts/$chart | xargs | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
[[ $ee == "true" ]] && {
image_tag="${image_tag}-ee"
}
}
update_helm_release() {
[[ $ee == "true" ]] && return
HELM_TAG="$(grep -iER ^version ../scripts/helmcharts/openreplay/charts/$chart | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
# Update the chart version
sed -i "s#^version.*#version: $HELM_TAG# g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
# Update image tags
sed -i "s#ppVersion.*#ppVersion: \"$image_tag\"#g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
# Commit the changes
git add ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
git commit -m "chore(helm): Updating $chart image release"
[[ $ee == "true" ]] && return
HELM_TAG="$(grep -iER ^version ../scripts/helmcharts/openreplay/charts/$chart | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
# Update the chart version
sed -i "s#^version.*#version: $HELM_TAG# g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
# Update image tags
sed -i "s#ppVersion.*#ppVersion: \"$image_tag\"#g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
# Commit the changes
git add ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
git commit -m "chore(helm): Updating $chart image release"
}
function build_api(){
function build_api() {
destination="_assist_stats"
[[ $1 == "ee" ]] && {
destination="_assist_stats_ee"
}
[[ -d ../${destination} ]] && {
echo "Removing previous build cache"
rm -rf ../${destination}
echo "Removing previous build cache"
rm -rf ../${destination}
}
cp -R ../assist-stats ../${destination}
cd ../${destination} || exit_err 100
@ -86,5 +87,5 @@ check_prereq
build_api $environment
echo buil_complete
if [[ $PATCH -eq 1 ]]; then
update_helm_release
update_helm_release
fi

View file

@ -14,7 +14,7 @@ const {
socketsLiveByProject,
socketsLiveBySession,
autocomplete
} = require('../utils/httpHandlers-cluster');
} = require('../utils/httpHandlers');
const {createAdapter} = require("@socket.io/redis-adapter");
const {createClient} = require("redis");

View file

@ -1,176 +0,0 @@
const {
hasFilters,
extractPeerId,
isValidSession,
sortPaginate,
getValidAttributes,
uniqueAutocomplete
} = require("./helper");
const {
extractProjectKeyFromRequest,
extractSessionIdFromRequest,
extractPayloadFromRequest,
getAvailableRooms
} = require("./extractors");
const {
IDENTITIES
} = require("./assistHelper");
const {
getServer
} = require('../utils/wsServer');
const {
RecordRequestDuration,
IncreaseTotalRequests
} = require('../utils/metrics');
const debug_log = process.env.debug === "1";
const respond = function (req, res, data) {
let result = {data}
if (process.env.uws !== "true") {
res.statusCode = 200;
res.setHeader('Content-Type', 'application/json');
res.end(JSON.stringify(result));
} else {
res.writeStatus('200 OK').writeHeader('Content-Type', 'application/json').end(JSON.stringify(result));
}
const duration = performance.now() - req.startTs;
IncreaseTotalRequests();
RecordRequestDuration(req.method.toLowerCase(), res.handlerName, 200, duration/1000.0);
}
const socketsListByProject = async function (req, res) {
debug_log && console.log("[WS]looking for available sessions");
res.handlerName = 'socketsListByProject';
let io = getServer();
let _projectKey = extractProjectKeyFromRequest(req);
let _sessionId = extractSessionIdFromRequest(req);
if (_sessionId === undefined) {
return respond(req, res, null);
}
let filters = await extractPayloadFromRequest(req, res);
let connected_sockets = await io.in(_projectKey + '-' + _sessionId).fetchSockets();
for (let item of connected_sockets) {
if (item.handshake.query.identity === IDENTITIES.session && item.handshake.query.sessionInfo
&& isValidSession(item.handshake.query.sessionInfo, filters.filter)) {
return respond(req, res, _sessionId);
}
}
respond(req, res, null);
}
const socketsLiveByProject = async function (req, res) {
debug_log && console.log("[WS]looking for available LIVE sessions");
res.handlerName = 'socketsLiveByProject';
let io = getServer();
let _projectKey = extractProjectKeyFromRequest(req);
let _sessionId = extractSessionIdFromRequest(req);
let filters = await extractPayloadFromRequest(req, res);
let withFilters = hasFilters(filters);
let liveSessions = new Set();
const sessIDs = new Set();
let rooms = await getAvailableRooms(io);
for (let roomId of rooms.keys()) {
let {projectKey, sessionId} = extractPeerId(roomId);
if (projectKey === _projectKey && (_sessionId === undefined || _sessionId === sessionId)) {
let connected_sockets = await io.in(roomId).fetchSockets();
for (let item of connected_sockets) {
if (item.handshake.query.identity === IDENTITIES.session) {
if (withFilters) {
if (item.handshake.query.sessionInfo &&
isValidSession(item.handshake.query.sessionInfo, filters.filter) &&
!sessIDs.has(item.handshake.query.sessionInfo.sessionID)
) {
liveSessions.add(item.handshake.query.sessionInfo);
sessIDs.add(item.handshake.query.sessionInfo.sessionID);
}
} else {
if (!sessIDs.has(item.handshake.query.sessionInfo.sessionID)) {
liveSessions.add(item.handshake.query.sessionInfo);
sessIDs.add(item.handshake.query.sessionInfo.sessionID);
}
}
}
}
}
}
let sessions = Array.from(liveSessions);
respond(req, res, _sessionId === undefined ? sortPaginate(sessions, filters) : sessions.length > 0 ? sessions[0] : null);
}
const socketsLiveBySession = async function (req, res) {
debug_log && console.log("[WS]looking for LIVE session");
res.handlerName = 'socketsLiveBySession';
let io = getServer();
let _projectKey = extractProjectKeyFromRequest(req);
let _sessionId = extractSessionIdFromRequest(req);
if (_sessionId === undefined) {
return respond(req, res, null);
}
let filters = await extractPayloadFromRequest(req, res);
let withFilters = hasFilters(filters);
let liveSessions = new Set();
const sessIDs = new Set();
let connected_sockets = await io.in(_projectKey + '-' + _sessionId).fetchSockets();
for (let item of connected_sockets) {
if (item.handshake.query.identity === IDENTITIES.session) {
if (withFilters) {
if (item.handshake.query.sessionInfo &&
isValidSession(item.handshake.query.sessionInfo, filters.filter) &&
!sessIDs.has(item.handshake.query.sessionInfo.sessionID)
) {
liveSessions.add(item.handshake.query.sessionInfo);
sessIDs.add(item.handshake.query.sessionInfo.sessionID);
}
} else {
if (!sessIDs.has(item.handshake.query.sessionInfo.sessionID)) {
liveSessions.add(item.handshake.query.sessionInfo);
sessIDs.add(item.handshake.query.sessionInfo.sessionID);
}
}
}
}
let sessions = Array.from(liveSessions);
respond(req, res, sessions.length > 0 ? sessions[0] : null);
}
const autocomplete = async function (req, res) {
debug_log && console.log("[WS]autocomplete");
res.handlerName = 'autocomplete';
let io = getServer();
let _projectKey = extractProjectKeyFromRequest(req);
let filters = await extractPayloadFromRequest(req);
let results = [];
if (filters.query && Object.keys(filters.query).length > 0) {
let rooms = await getAvailableRooms(io);
for (let roomId of rooms.keys()) {
let {projectKey} = extractPeerId(roomId);
if (projectKey === _projectKey) {
let connected_sockets = await io.in(roomId).fetchSockets();
for (let item of connected_sockets) {
if (item.handshake.query.identity === IDENTITIES.session && item.handshake.query.sessionInfo) {
results = [...results, ...getValidAttributes(item.handshake.query.sessionInfo, filters.query)];
}
}
}
}
}
respond(req, res, uniqueAutocomplete(results));
}
module.exports = {
respond,
socketsListByProject,
socketsLiveByProject,
socketsLiveBySession,
autocomplete
}

View file

@ -50,10 +50,10 @@ async function postData(payload) {
function startAssist(socket, agentID) {
const tsNow = +new Date();
const eventID = `${socket.sessId}_${agentID}_assist_${tsNow}`;
const eventID = `${socket.handshake.query.sessId}_${agentID}_assist_${tsNow}`;
void postData({
"project_id": socket.projectId,
"session_id": socket.sessId,
"project_id": socket.handshake.query.projectId,
"session_id": socket.handshake.query.sessId,
"agent_id": agentID,
"event_id": eventID,
"event_type": "assist",
@ -61,20 +61,20 @@ function startAssist(socket, agentID) {
"timestamp": tsNow,
});
// Save uniq eventID to cache
cache.set(`${socket.sessId}_${agentID}_assist`, eventID);
cache.set(`${socket.handshake.query.sessId}_${agentID}_assist`, eventID);
// Debug log
debug && console.log(`assist_started, agentID: ${agentID}, sessID: ${socket.sessId}, projID: ${socket.projectId}, time: ${tsNow}`);
debug && console.log(`assist_started, agentID: ${agentID}, sessID: ${socket.handshake.query.sessId}, projID: ${socket.handshake.query.projectId}, time: ${tsNow}`);
}
function endAssist(socket, agentID) {
const eventID = cache.get(`${socket.sessId}_${agentID}_assist`);
const eventID = cache.get(`${socket.handshake.query.sessId}_${agentID}_assist`);
if (eventID === undefined) {
debug && console.log(`have to skip assist_ended, no eventID in the cache, agentID: ${socket.agentID}, sessID: ${socket.sessId}, projID: ${socket.projectId}`);
debug && console.log(`have to skip assist_ended, no eventID in the cache, agentID: ${socket.handshake.query.agentID}, sessID: ${socket.handshake.query.sessId}, projID: ${socket.handshake.query.projectId}`);
return
}
void postData({
"project_id": socket.projectId,
"session_id": socket.sessId,
"project_id": socket.handshake.query.projectId,
"session_id": socket.handshake.query.sessId,
"agent_id": agentID,
"event_id": eventID,
"event_type": "assist",
@ -82,17 +82,17 @@ function endAssist(socket, agentID) {
"timestamp": +new Date(),
})
// Remove eventID from cache
cache.delete(`${socket.sessId}_${agentID}_assist`);
cache.delete(`${socket.handshake.query.sessId}_${agentID}_assist`);
// Debug logs
debug && console.log(`assist_ended, agentID: ${socket.agentID}, sessID: ${socket.sessId}, projID: ${socket.projectId}`);
debug && console.log(`assist_ended, agentID: ${socket.handshake.query.agentID}, sessID: ${socket.handshake.query.sessId}, projID: ${socket.handshake.query.projectId}`);
}
function startCall(socket, agentID) {
const tsNow = +new Date();
const eventID = `${socket.sessId}_${agentID}_call_${tsNow}`;
const eventID = `${socket.handshake.query.sessId}_${agentID}_call_${tsNow}`;
void postData({
"project_id": socket.projectId,
"session_id": socket.sessId,
"project_id": socket.handshake.query.projectId,
"session_id": socket.handshake.query.sessId,
"agent_id": agentID,
"event_id": eventID,
"event_type": "call",
@ -100,102 +100,102 @@ function startCall(socket, agentID) {
"timestamp": tsNow,
});
// Save uniq eventID to cache
cache.set(`${socket.sessId}_call`, eventID);
cache.set(`${socket.handshake.query.sessId}_call`, eventID);
// Debug logs
debug && console.log(`s_call_started, agentID: ${agentID}, sessID: ${socket.sessId}, projID: ${socket.projectId}, time: ${tsNow}`);
debug && console.log(`s_call_started, agentID: ${agentID}, sessID: ${socket.handshake.query.sessId}, projID: ${socket.handshake.query.projectId}, time: ${tsNow}`);
}
function endCall(socket, agentID) {
const tsNow = +new Date();
const eventID = cache.get(`${socket.sessId}_call`);
const eventID = cache.get(`${socket.handshake.query.sessId}_call`);
if (eventID === undefined) {
debug && console.log(`have to skip s_call_ended, no eventID in the cache, agentID: ${agentID}, sessID: ${socket.sessId}, projID: ${socket.projectId}, time: ${tsNow}`);
debug && console.log(`have to skip s_call_ended, no eventID in the cache, agentID: ${agentID}, sessID: ${socket.handshake.query.sessId}, projID: ${socket.handshake.query.projectId}, time: ${tsNow}`);
return
}
void postData({
"project_id": socket.projectId,
"session_id": socket.sessId,
"project_id": socket.handshake.query.projectId,
"session_id": socket.handshake.query.sessId,
"agent_id": agentID,
"event_id": eventID,
"event_type": "call",
"event_state": "end",
"timestamp": tsNow,
});
cache.delete(`${socket.sessId}_call`)
cache.delete(`${socket.handshake.query.sessId}_call`)
// Debug logs
debug && console.log(`s_call_ended, agentID: ${agentID}, sessID: ${socket.sessId}, projID: ${socket.projectId}, time: ${tsNow}`);
debug && console.log(`s_call_ended, agentID: ${agentID}, sessID: ${socket.handshake.query.sessId}, projID: ${socket.handshake.query.projectId}, time: ${tsNow}`);
}
function startControl(socket, agentID) {
const tsNow = +new Date();
const eventID = `${socket.sessId}_${agentID}_control_${tsNow}`;
const eventID = `${socket.handshake.query.sessId}_${agentID}_control_${tsNow}`;
void postData({
"project_id": socket.projectId,
"session_id": socket.sessId,
"project_id": socket.handshake.query.projectId,
"session_id": socket.handshake.query.sessId,
"agent_id": agentID,
"event_id": eventID,
"event_type": "control",
"event_state": "start",
"timestamp": tsNow,
});
cache.set(`${socket.sessId}_control`, eventID)
cache.set(`${socket.handshake.query.sessId}_control`, eventID)
// Debug logs
debug && console.log(`s_control_started, agentID: ${agentID}, sessID: ${socket.sessId}, projID: ${socket.projectId}, time: ${+new Date()}`);
debug && console.log(`s_control_started, agentID: ${agentID}, sessID: ${socket.handshake.query.sessId}, projID: ${socket.handshake.query.projectId}, time: ${+new Date()}`);
}
function endControl(socket, agentID) {
const tsNow = +new Date();
const eventID = cache.get(`${socket.sessId}_control`);
const eventID = cache.get(`${socket.handshake.query.sessId}_control`);
if (eventID === undefined) {
debug && console.log(`have to skip s_control_ended, no eventID in the cache, agentID: ${agentID}, sessID: ${socket.sessId}, projID: ${socket.projectId}, time: ${tsNow}`);
debug && console.log(`have to skip s_control_ended, no eventID in the cache, agentID: ${agentID}, sessID: ${socket.handshake.query.sessId}, projID: ${socket.handshake.query.projectId}, time: ${tsNow}`);
return
}
void postData({
"project_id": socket.projectId,
"session_id": socket.sessId,
"project_id": socket.handshake.query.projectId,
"session_id": socket.handshake.query.sessId,
"agent_id": agentID,
"event_id": eventID,
"event_type": "control",
"event_state": "end",
"timestamp": tsNow,
});
cache.delete(`${socket.sessId}_control`)
cache.delete(`${socket.handshake.query.sessId}_control`)
// Debug logs
debug && console.log(`s_control_ended, agentID: ${agentID}, sessID: ${socket.sessId}, projID: ${socket.projectId}, time: ${+new Date()}`);
debug && console.log(`s_control_ended, agentID: ${agentID}, sessID: ${socket.handshake.query.sessId}, projID: ${socket.handshake.query.projectId}, time: ${+new Date()}`);
}
function startRecord(socket, agentID) {
const tsNow = +new Date();
const eventID = `${socket.sessId}_${agentID}_record_${tsNow}`;
const eventID = `${socket.handshake.query.sessId}_${agentID}_record_${tsNow}`;
void postData({
"project_id": socket.projectId,
"session_id": socket.sessId,
"project_id": socket.handshake.query.projectId,
"session_id": socket.handshake.query.sessId,
"agent_id": agentID,
"event_id": eventID,
"event_type": "record",
"event_state": "start",
"timestamp": tsNow,
});
cache.set(`${socket.sessId}_record`, eventID)
cache.set(`${socket.handshake.query.sessId}_record`, eventID)
// Debug logs
debug && console.log(`s_recording_started, agentID: ${agentID}, sessID: ${socket.sessId}, projID: ${socket.projectId}, time: ${+new Date()}`);
debug && console.log(`s_recording_started, agentID: ${agentID}, sessID: ${socket.handshake.query.sessId}, projID: ${socket.handshake.query.projectId}, time: ${+new Date()}`);
}
function endRecord(socket, agentID) {
const tsNow = +new Date();
const eventID = cache.get(`${socket.sessId}_record`);
void postData({
"project_id": socket.projectId,
"session_id": socket.sessId,
"project_id": socket.handshake.query.projectId,
"session_id": socket.handshake.query.sessId,
"agent_id": agentID,
"event_id": eventID,
"event_type": "record",
"event_state": "end",
"timestamp": tsNow,
});
cache.delete(`${socket.sessId}_record`)
cache.delete(`${socket.handshake.query.sessId}_record`)
// Debug logs
debug && console.log(`s_recording_ended, agentID: ${agentID}, sessID: ${socket.sessId}, projID: ${socket.projectId}, time: ${+new Date()}`);
debug && console.log(`s_recording_ended, agentID: ${agentID}, sessID: ${socket.handshake.query.sessId}, projID: ${socket.handshake.query.projectId}, time: ${+new Date()}`);
}
function handleEvent(eventName, socket, agentID) {

View file

@ -7,6 +7,54 @@ const getServer = function () {
return io;
}
let redisClient;
const useRedis = process.env.redis === "true";
if (useRedis) {
const {createClient} = require("redis");
const REDIS_URL = (process.env.REDIS_URL || "localhost:6379").replace(/((^\w+:|^)\/\/|^)/, 'redis://');
redisClient = createClient({url: REDIS_URL});
redisClient.on("error", (error) => console.error(`Redis error : ${error}`));
void redisClient.connect();
}
const processSocketsList = function (sockets) {
let res = []
for (let socket of sockets) {
let {handshake} = socket;
res.push({handshake});
}
return res
}
const doFetchAllSockets = async function () {
if (useRedis) {
try {
let cachedResult = await redisClient.get('fetchSocketsResult');
if (cachedResult) {
return JSON.parse(cachedResult);
}
let result = await io.fetchSockets();
let cachedString = JSON.stringify(processSocketsList(result));
await redisClient.set('fetchSocketsResult', cachedString, {EX: 5});
return result;
} catch (error) {
console.error('Error setting value with expiration:', error);
}
}
return await io.fetchSockets();
}
const fetchSockets = async function (roomID) {
if (!io) {
return [];
}
if (!roomID) {
return await doFetchAllSockets();
}
return await io.in(roomID).fetchSockets();
}
const createSocketIOServer = function (server, prefix) {
if (io) {
return io;
@ -41,4 +89,5 @@ const createSocketIOServer = function (server, prefix) {
module.exports = {
createSocketIOServer,
getServer,
fetchSockets,
}

View file

@ -1,59 +1,67 @@
package main
import (
"log"
"openreplay/backend/pkg/db/postgres/pool"
"openreplay/backend/pkg/db/redis"
"openreplay/backend/pkg/projects"
"openreplay/backend/pkg/sessions"
"context"
config "openreplay/backend/internal/config/connector"
"openreplay/backend/internal/connector"
saver "openreplay/backend/pkg/connector"
"openreplay/backend/pkg/db/postgres/pool"
"openreplay/backend/pkg/db/redis"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/memory"
"openreplay/backend/pkg/messages"
"openreplay/backend/pkg/objectstorage/store"
"openreplay/backend/pkg/projects"
"openreplay/backend/pkg/queue"
"openreplay/backend/pkg/sessions"
"openreplay/backend/pkg/terminator"
)
func main() {
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
ctx := context.Background()
log := logger.New()
cfg := config.New()
objStore, err := store.NewStore(&cfg.ObjectsConfig)
if err != nil {
log.Fatalf("can't init object storage: %s", err)
log.Fatal(ctx, "can't init object storage: %s", err)
}
batches, err := saver.NewBatches(cfg, objStore)
if err != nil {
log.Fatal(ctx, "can't init s3 buckets: %s", err)
}
var db saver.Database
switch cfg.ConnectorType {
case "redshift":
if db, err = saver.NewRedshift(cfg, objStore); err != nil {
log.Fatalf("can't init redshift connection: %s", err)
if db, err = saver.NewRedshift(log, cfg, batches); err != nil {
log.Fatal(ctx, "can't init redshift connection: %s", err)
}
case "clickhouse":
if db, err = saver.NewClickHouse(cfg); err != nil {
log.Fatalf("can't init clickhouse connection: %s", err)
if db, err = saver.NewClickHouse(log, cfg, batches); err != nil {
log.Fatal(ctx, "can't init clickhouse connection: %s", err)
}
case "s3":
if db, err = saver.NewS3Storage(log, cfg, batches); err != nil {
log.Fatal(ctx, "can't init s3 connection: %s", err)
}
default:
log.Fatalf("unknown connector type: %s", cfg.ConnectorType)
log.Fatal(ctx, "unknown connector type: %s", cfg.ConnectorType)
}
defer db.Close()
// Init postgres connection
pgConn, err := pool.New(cfg.Postgres.String())
if err != nil {
log.Printf("can't init postgres connection: %s", err)
return
log.Fatal(ctx, "can't init postgres connection: %s", err)
}
defer pgConn.Close()
// Init redis connection
redisClient, err := redis.New(&cfg.Redis)
if err != nil {
log.Printf("can't init redis connection: %s", err)
log.Warn(ctx, "can't init redis connection: %s", err)
}
defer redisClient.Close()
@ -61,7 +69,7 @@ func main() {
sessManager := sessions.New(pgConn, projManager, redisClient)
// Saves messages to Redshift
dataSaver := saver.New(cfg, db, sessManager, projManager)
dataSaver := saver.New(log, cfg, db, sessManager, projManager)
// Message filter
msgFilter := []int{messages.MsgConsoleLog, messages.MsgCustomEvent, messages.MsgJSException,
@ -69,7 +77,13 @@ func main() {
messages.MsgSessionStart, messages.MsgSessionEnd, messages.MsgConnectionInformation,
messages.MsgMetadata, messages.MsgPageEvent, messages.MsgPerformanceTrackAggr, messages.MsgUserID,
messages.MsgUserAnonymousID, messages.MsgJSException, messages.MsgJSExceptionDeprecated,
messages.MsgInputEvent, messages.MsgMouseClick, messages.MsgIssueEventDeprecated}
messages.MsgInputEvent, messages.MsgMouseClick, messages.MsgIssueEventDeprecated, messages.MsgInputChange,
// Mobile messages
messages.MsgIOSSessionStart, messages.MsgIOSSessionEnd, messages.MsgIOSUserID, messages.MsgIOSUserAnonymousID,
messages.MsgIOSMetadata, messages.MsgIOSEvent, messages.MsgIOSNetworkCall,
messages.MsgIOSClickEvent, messages.MsgIOSSwipeEvent, messages.MsgIOSInputEvent,
messages.MsgIOSCrash, messages.MsgIOSIssueEvent,
}
// Init consumer
consumer := queue.NewConsumer(
@ -86,12 +100,11 @@ func main() {
// Init memory manager
memoryManager, err := memory.NewManager(cfg.MemoryLimitMB, cfg.MaxMemoryUsage)
if err != nil {
log.Printf("can't init memory manager: %s", err)
return
log.Fatal(ctx, "can't init memory manager: %s", err)
}
// Run service and wait for TERM signal
service := connector.New(cfg, consumer, dataSaver, memoryManager)
log.Printf("Connector service started\n")
log.Info(ctx, "Connector service started")
terminator.Wait(service)
}

View file

@ -2,22 +2,25 @@ package connector
import (
"context"
"log"
"strconv"
"strings"
"time"
"github.com/ClickHouse/clickhouse-go/v2"
"github.com/ClickHouse/clickhouse-go/v2/lib/driver"
"openreplay/backend/internal/config/connector"
"openreplay/backend/pkg/logger"
)
type ClickHouse struct {
cfg *connector.Config
conn driver.Conn
log logger.Logger
cfg *connector.Config
conn driver.Conn
batches *Batches
}
func NewClickHouse(cfg *connector.Config) (*ClickHouse, error) {
func NewClickHouse(log logger.Logger, cfg *connector.Config, batches *Batches) (*ClickHouse, error) {
url := cfg.Clickhouse.URL
url = strings.TrimPrefix(url, "tcp://")
url = strings.TrimSuffix(url, "/default")
@ -43,22 +46,30 @@ func NewClickHouse(cfg *connector.Config) (*ClickHouse, error) {
return nil, err
}
c := &ClickHouse{
cfg: cfg,
conn: conn,
log: log,
cfg: cfg,
conn: conn,
batches: batches,
}
return c, nil
}
func (c *ClickHouse) InsertEvents(batch []map[string]string) error {
return c.insertEventsUsingBuffer(batch)
}
const eventsSQL = "INSERT INTO connector_events_buffer (sessionid, consolelog_level, consolelog_value, customevent_name, customevent_payload, jsexception_message, jsexception_name, jsexception_payload, jsexception_metadata, networkrequest_type, networkrequest_method, networkrequest_url, networkrequest_request, networkrequest_response, networkrequest_status, networkrequest_timestamp, networkrequest_duration, issueevent_message_id, issueevent_timestamp, issueevent_type, issueevent_context_string, issueevent_context, issueevent_payload, issueevent_url, customissue_name, customissue_payload, received_at, batch_order_number) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
func (c *ClickHouse) InsertEvents(batch []map[string]string) error {
func (c *ClickHouse) insertEventsUsingBuffer(batch []map[string]string) error {
bulk, err := c.conn.PrepareBatch(context.Background(), eventsSQL)
if err != nil {
return err
}
for _, event := range batch {
ctx := context.Background()
ctx = context.WithValue(ctx, "sessionID", c.Uint64(ctx, event["sessionid"]))
if err := bulk.Append(
Uint64(event["sessionid"]),
c.Uint64(ctx, event["sessionid"]),
nullableString(event["consolelog_level"]),
nullableString(event["consolelog_value"]),
nullableString(event["customevent_name"]),
@ -72,11 +83,11 @@ func (c *ClickHouse) InsertEvents(batch []map[string]string) error {
nullableString(event["networkrequest_url"]),
nullableString(event["networkrequest_request"]),
nullableString(event["networkrequest_response"]),
nullableUint64(event["networkrequest_status"]),
nullableUint64(event["networkrequest_timestamp"]),
nullableUint64(event["networkrequest_duration"]),
c.nullableUint64(ctx, event["networkrequest_status"]),
c.nullableUint64(ctx, event["networkrequest_timestamp"]),
c.nullableUint64(ctx, event["networkrequest_duration"]),
nullableString(event["issueevent_message_id"]),
nullableUint64(event["issueevent_timestamp"]),
c.nullableUint64(ctx, event["issueevent_timestamp"]),
nullableString(event["issueevent_type"]),
nullableString(event["issueevent_context_string"]),
nullableString(event["issueevent_context"]),
@ -84,59 +95,65 @@ func (c *ClickHouse) InsertEvents(batch []map[string]string) error {
nullableString(event["issueevent_url"]),
nullableString(event["customissue_name"]),
nullableString(event["customissue_payload"]),
nullableUint64(event["received_at"]),
nullableUint64(event["batch_order_number"]),
c.nullableUint64(ctx, event["received_at"]),
c.nullableUint64(ctx, event["batch_order_number"]),
); err != nil {
log.Printf("can't append value set to batch, err: %s", err)
c.log.Error(ctx, "can't append value set to batch, err: ", err)
}
}
return bulk.Send()
}
func (c *ClickHouse) InsertSessions(batch []map[string]string) error {
return c.insertSessionsUsingBuffer(batch)
}
const sessionsSQL = "INSERT INTO connector_user_sessions_buffer (sessionid, user_agent, user_browser, user_browser_version, user_country, user_device, user_device_heap_size, user_device_memory_size, user_device_type, user_os, user_os_version, user_uuid, connection_effective_bandwidth, connection_type, referrer, user_anonymous_id, user_id, session_start_timestamp, session_end_timestamp, session_duration, first_contentful_paint, speed_index, visually_complete, timing_time_to_interactive, avg_cpu, avg_fps, max_cpu, max_fps, max_total_js_heap_size, max_used_js_heap_size, js_exceptions_count, inputs_count, clicks_count, issues_count, pages_count, metadata_1, metadata_2, metadata_3, metadata_4, metadata_5, metadata_6, metadata_7, metadata_8, metadata_9, metadata_10) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
func (c *ClickHouse) InsertSessions(batch []map[string]string) error {
func (c *ClickHouse) insertSessionsUsingBuffer(batch []map[string]string) error {
bulk, err := c.conn.PrepareBatch(context.Background(), sessionsSQL)
if err != nil {
return err
}
for _, sess := range batch {
ctx := context.Background()
ctx = context.WithValue(ctx, "sessionID", c.Uint64(ctx, sess["sessionid"]))
if err := bulk.Append(
Uint64(sess["sessionid"]),
c.Uint64(ctx, sess["sessionid"]),
nullableString(sess["user_agent"]),
nullableString(sess["user_browser"]),
nullableString(sess["user_browser_version"]),
nullableString(sess["user_country"]),
nullableString(sess["user_device"]),
nullableUint64(sess["user_device_heap_size"]),
nullableUint64(sess["user_device_memory_size"]),
c.nullableUint64(ctx, sess["user_device_heap_size"]),
c.nullableUint64(ctx, sess["user_device_memory_size"]),
nullableString(sess["user_device_type"]),
nullableString(sess["user_os"]),
nullableString(sess["user_os_version"]),
nullableString(sess["user_uuid"]),
nullableUint64(sess["connection_effective_bandwidth"]),
c.nullableUint64(ctx, sess["connection_effective_bandwidth"]),
nullableString(sess["connection_type"]),
nullableString(sess["referrer"]),
nullableString(sess["user_anonymous_id"]),
nullableString(sess["user_id"]),
nullableUint64(sess["session_start_timestamp"]),
nullableUint64(sess["session_end_timestamp"]),
nullableUint64(sess["session_duration"]),
nullableUint64(sess["first_contentful_paint"]),
nullableUint64(sess["speed_index"]),
nullableUint64(sess["visually_complete"]),
nullableUint64(sess["timing_time_to_interactive"]),
nullableUint64(sess["avg_cpu"]),
nullableUint64(sess["avg_fps"]),
nullableUint64(sess["max_cpu"]),
nullableUint64(sess["max_fps"]),
nullableUint64(sess["max_total_js_heap_size"]),
nullableUint64(sess["max_used_js_heap_size"]),
nullableUint64(sess["js_exceptions_count"]),
nullableUint64(sess["inputs_count"]),
nullableUint64(sess["clicks_count"]),
nullableUint64(sess["issues_count"]),
nullableUint64(sess["pages_count"]),
c.nullableUint64(ctx, sess["session_start_timestamp"]),
c.nullableUint64(ctx, sess["session_end_timestamp"]),
c.nullableUint64(ctx, sess["session_duration"]),
c.nullableUint64(ctx, sess["first_contentful_paint"]),
c.nullableUint64(ctx, sess["speed_index"]),
c.nullableUint64(ctx, sess["visually_complete"]),
c.nullableUint64(ctx, sess["timing_time_to_interactive"]),
c.nullableUint64(ctx, sess["avg_cpu"]),
c.nullableUint64(ctx, sess["avg_fps"]),
c.nullableUint64(ctx, sess["max_cpu"]),
c.nullableUint64(ctx, sess["max_fps"]),
c.nullableUint64(ctx, sess["max_total_js_heap_size"]),
c.nullableUint64(ctx, sess["max_used_js_heap_size"]),
c.nullableUint64(ctx, sess["js_exceptions_count"]),
c.nullableUint64(ctx, sess["inputs_count"]),
c.nullableUint64(ctx, sess["clicks_count"]),
c.nullableUint64(ctx, sess["issues_count"]),
c.nullableUint64(ctx, sess["pages_count"]),
nullableString(sess["metadata_1"]),
nullableString(sess["metadata_2"]),
nullableString(sess["metadata_3"]),
@ -148,7 +165,7 @@ func (c *ClickHouse) InsertSessions(batch []map[string]string) error {
nullableString(sess["metadata_9"]),
nullableString(sess["metadata_10"]),
); err != nil {
log.Printf("can't append value set to batch, err: %s", err)
c.log.Error(ctx, "can't append value set to batch, err: ", err)
}
}
return bulk.Send()
@ -158,13 +175,13 @@ func (c *ClickHouse) Close() error {
return c.conn.Close()
}
func Uint64(v string) uint64 {
func (c *ClickHouse) Uint64(ctx context.Context, v string) uint64 {
if v == "" {
return 0
}
res, err := strconv.Atoi(v)
if err != nil {
log.Printf("can't convert string to uint64, err: %s", err)
c.log.Error(ctx, "can't convert string to uint64, err: %s", err)
return 0
}
return uint64(res)
@ -178,12 +195,12 @@ func nullableString(v string) *string {
return p
}
func nullableUint64(v string) *uint64 {
func (c *ClickHouse) nullableUint64(ctx context.Context, v string) *uint64 {
var p *uint64 = nil
if v != "" {
res, err := strconv.Atoi(v)
if err != nil {
log.Printf("can't convert string to uint64, err: %s", err)
c.log.Error(ctx, "can't convert string to uint64, err: %s", err)
return nil
}
a := uint64(res)

View file

@ -4,10 +4,11 @@ import "strconv"
var sessionColumns = []string{
"sessionid",
"user_agent",
"user_browser",
"user_browser_version",
"user_country",
"user_city",
"user_state",
"user_device",
"user_device_heap_size",
"user_device_memory_size",
@ -20,6 +21,8 @@ var sessionColumns = []string{
"referrer",
"user_anonymous_id",
"user_id",
"tracker_version",
"rev_id",
"session_start_timestamp",
"session_end_timestamp",
"session_duration",
@ -98,8 +101,77 @@ var eventColumns = []string{
"issueevent_url",
"customissue_name",
"customissue_payload",
"received_at",
"batch_order_number",
"mobile_event_name",
"mobile_event_payload",
"mobile_networkcall_type",
"mobile_networkcall_method",
"mobile_networkcall_url",
"mobile_networkcall_request",
"mobile_networkcall_response",
"mobile_networkcall_status",
"mobile_networkcall_timestamp",
"mobile_networkcall_duration",
"mobile_clickevent_x",
"mobile_clickevent_y",
"mobile_clickevent_timestamp",
"mobile_clickevent_label",
"mobile_swipeevent_x",
"mobile_swipeevent_y",
"mobile_swipeevent_timestamp",
"mobile_swipeevent_label",
"mobile_inputevent_label",
"mobile_inputevent_value",
"mobile_crash_name",
"mobile_crash_reason",
"mobile_crash_stacktrace",
"mobile_issueevent_timestamp",
"mobile_issueevent_type",
"mobile_issueevent_context_string",
"mobile_issueevent_context",
"mobile_issueevent_payload",
"mouseclick_label",
"mouseclick_selector",
"mouseclick_url",
"mouseclick_hesitation_time",
"mouseclick_timestamp",
"pageevent_url",
"pageevent_referrer",
"pageevent_speed_index",
"pageevent_timestamp",
"inputevent_label",
"inputevent_hesitation_time",
"inputevent_input_duration",
"inputevent_timestamp",
"mobile_viewcomponentevent_screen_name",
"mobile_viewcomponentevent_view_name",
"mobile_viewcomponentevent_visible",
"mobile_viewcomponentevent_timestamp",
}
var eventInts = []string{
"networkrequest_status",
"networkrequest_timestamp",
"networkrequest_duration",
"issueevent_message_id",
"issueevent_timestamp",
"mouseclick_hesitation_time",
"mouseclick_timestamp",
"pageevent_speed_index",
"pageevent_timestamp",
"inputevent_hesitation_time",
"inputevent_input_duration",
"inputevent_timestamp",
"mobile_networkcall_status",
"mobile_networkcall_timestamp",
"mobile_networkcall_duration",
"mobile_clickevent_x",
"mobile_clickevent_y",
"mobile_clickevent_timestamp",
"mobile_swipeevent_x",
"mobile_swipeevent_y",
"mobile_swipeevent_timestamp",
"mobile_issueevent_timestamp",
"mobile_viewcomponentevent_timestamp",
}
func QUOTES(s string) string {

View file

@ -1,27 +1,25 @@
package connector
import (
"bytes"
"context"
"database/sql"
"fmt"
"github.com/google/uuid"
"log"
"openreplay/backend/pkg/objectstorage"
"openreplay/backend/internal/config/connector"
_ "github.com/lib/pq"
"openreplay/backend/internal/config/connector"
"openreplay/backend/pkg/logger"
)
type Redshift struct {
cfg *connector.Config
ctx context.Context
db *sql.DB
objStorage objectstorage.ObjectStorage
log logger.Logger
cfg *connector.Config
ctx context.Context
db *sql.DB
batches *Batches
}
func NewRedshift(cfg *connector.Config, objStorage objectstorage.ObjectStorage) (*Redshift, error) {
func NewRedshift(log logger.Logger, cfg *connector.Config, batches *Batches) (*Redshift, error) {
var source string
if cfg.ConnectionString != "" {
source = cfg.ConnectionString
@ -29,7 +27,7 @@ func NewRedshift(cfg *connector.Config, objStorage objectstorage.ObjectStorage)
source = fmt.Sprintf("postgres://%s:%s@%s:%d/%s",
cfg.Redshift.User, cfg.Redshift.Password, cfg.Redshift.Host, cfg.Redshift.Port, cfg.Redshift.Database)
}
log.Println("Connecting to Redshift Source: ", source)
log.Info(context.Background(), "Connecting to Redshift Source: ", source)
sqldb, err := sql.Open("postgres", source)
if err != nil {
return nil, err
@ -38,121 +36,68 @@ func NewRedshift(cfg *connector.Config, objStorage objectstorage.ObjectStorage)
return nil, err
}
return &Redshift{
cfg: cfg,
ctx: context.Background(),
db: sqldb,
objStorage: objStorage,
log: log,
cfg: cfg,
ctx: context.Background(),
db: sqldb,
batches: batches,
}, nil
}
func eventsToBuffer(batch []map[string]string) *bytes.Buffer {
buf := bytes.NewBuffer(nil)
// Write header
for _, column := range eventColumns {
buf.WriteString(column + "|")
func (r *Redshift) InsertSessions(batch []map[string]string) error {
fileName := generateName(r.cfg.SessionsTableName)
if err := r.batches.Insert(batch, fileName, sessionColumns); err != nil {
return fmt.Errorf("can't insert sessions batch: %s", err)
}
buf.Truncate(buf.Len() - 1)
// Write data
for _, event := range batch {
buf.WriteString("\n")
for _, column := range eventColumns {
buf.WriteString(event[column] + "|")
}
buf.Truncate(buf.Len() - 1)
// Copy data from s3 bucket to redshift
if err := r.copy(r.cfg.SessionsTableName, fileName, "|", true, false); err != nil {
return fmt.Errorf("can't copy data from s3 to redshift: %s", err)
}
return buf
r.log.Info(context.Background(), "sessions batch of %d sessions is successfully saved", len(batch))
return nil
}
func (r *Redshift) InsertEvents(batch []map[string]string) error {
// Send data to S3
fileName := fmt.Sprintf("connector_data/%s-%s.csv", r.cfg.EventsTableName, uuid.New().String())
// Create csv file
buf := eventsToBuffer(batch)
reader := bytes.NewReader(buf.Bytes())
if err := r.objStorage.Upload(reader, fileName, "text/csv", objectstorage.NoCompression); err != nil {
log.Printf("can't upload file to s3: %s", err)
return err
fileName := generateName(r.cfg.EventsTableName)
if err := r.batches.Insert(batch, fileName, eventColumns); err != nil {
return fmt.Errorf("can't insert events batch: %s", err)
}
// Copy data from s3 bucket to redshift
if err := r.Copy(r.cfg.EventsTableName, fileName, "|", true, false); err != nil {
log.Printf("can't copy data from s3 to redshift: %s", err)
return err
if err := r.copy(r.cfg.EventsTableName, fileName, "|", true, false); err != nil {
return fmt.Errorf("can't copy data from s3 to redshift: %s", err)
}
log.Printf("events batch of %d events is successfully saved", len(batch))
r.log.Info(context.Background(), "events batch of %d events is successfully saved", len(batch))
return nil
}
func sessionsToBuffer(batch []map[string]string) *bytes.Buffer {
buf := bytes.NewBuffer(nil)
// Write header
for _, column := range sessionColumns {
buf.WriteString(column + "|")
}
buf.Truncate(buf.Len() - 1)
// Write data
for _, sess := range batch {
buf.WriteString("\n")
for _, column := range sessionColumns {
buf.WriteString(sess[column] + "|")
}
buf.Truncate(buf.Len() - 1)
}
return buf
}
func (r *Redshift) InsertSessions(batch []map[string]string) error {
// Send data to S3
fileName := fmt.Sprintf("connector_data/%s-%s.csv", r.cfg.SessionsTableName, uuid.New().String())
// Create csv file
buf := sessionsToBuffer(batch)
reader := bytes.NewReader(buf.Bytes())
if err := r.objStorage.Upload(reader, fileName, "text/csv", objectstorage.NoCompression); err != nil {
log.Printf("can't upload file to s3: %s", err)
return err
}
// Copy data from s3 bucket to redshift
if err := r.Copy(r.cfg.SessionsTableName, fileName, "|", true, false); err != nil {
log.Printf("can't copy data from s3 to redshift: %s", err)
return err
}
log.Printf("sessions batch of %d sessions is successfully saved", len(batch))
return nil
}
func (r *Redshift) Copy(tableName, fileName, delimiter string, creds, gzip bool) error {
func (r *Redshift) copy(tableName, fileName, delimiter string, creds, gzip bool) error {
var (
credentials string
gzipSQL string
)
if creds {
credentials = fmt.Sprintf(`ACCESS_KEY_ID '%s' SECRET_ACCESS_KEY '%s'`, r.cfg.AWSAccessKeyID, r.cfg.AWSSecretAccessKey)
if r.cfg.AWSAccessKeyID != "" && r.cfg.AWSSecretAccessKey != "" {
credentials = fmt.Sprintf(`ACCESS_KEY_ID '%s' SECRET_ACCESS_KEY '%s'`, r.cfg.AWSAccessKeyID, r.cfg.AWSSecretAccessKey)
} else if r.cfg.AWSIAMRole != "" {
credentials = fmt.Sprintf(`IAM_ROLE '%s'`, r.cfg.AWSIAMRole)
} else {
credentials = "IAM_ROLE default"
}
}
if gzip {
gzipSQL = "GZIP"
}
bucketName := "rdshftbucket"
filePath := fmt.Sprintf("s3://%s/%s", bucketName, fileName)
filePath := fmt.Sprintf("s3://%s/%s", r.cfg.Redshift.Bucket, fileName)
copySQL := fmt.Sprintf(`COPY "%s" FROM '%s' WITH %s TIMEFORMAT 'auto' DATEFORMAT 'auto' TRUNCATECOLUMNS
STATUPDATE ON %s DELIMITER AS '%s' IGNOREHEADER 1 REMOVEQUOTES ESCAPE TRIMBLANKS EMPTYASNULL ACCEPTANYDATE`,
copySQL := fmt.Sprintf(`COPY "%s" FROM '%s' WITH %s TIMEFORMAT 'auto' DATEFORMAT 'auto' TRUNCATECOLUMNS STATUPDATE ON %s DELIMITER AS '%s' IGNOREHEADER 1 REMOVEQUOTES ESCAPE TRIMBLANKS EMPTYASNULL ACCEPTANYDATE`,
tableName, filePath, gzipSQL, credentials, delimiter)
log.Printf("Running command: %s", copySQL)
r.log.Debug(context.Background(), "Executing COPY SQL: %s", copySQL)
_, err := r.db.ExecContext(r.ctx, copySQL)
return err
}
func (r *Redshift) ExecutionDuration(fileName string) (int, error) {
return 0, nil
}
func (r *Redshift) Close() error {
return r.db.Close()
}

View file

@ -0,0 +1,47 @@
package connector
import (
"context"
"fmt"
"openreplay/backend/internal/config/connector"
"openreplay/backend/pkg/logger"
)
type S3Storage struct {
log logger.Logger
cfg *connector.Config
ctx context.Context
batches *Batches
}
func NewS3Storage(log logger.Logger, cfg *connector.Config, buckets *Batches) (*S3Storage, error) {
return &S3Storage{
log: log,
cfg: cfg,
ctx: context.Background(),
batches: buckets,
}, nil
}
func (ds *S3Storage) InsertSessions(batch []map[string]string) error {
fileName := generateName(ds.cfg.SessionsTableName)
if err := ds.batches.Insert(batch, fileName, sessionColumns); err != nil {
return fmt.Errorf("can't insert sessions batch: %s", err)
}
ds.log.Info(context.Background(), "sessions batch of %d sessions is successfully saved", len(batch))
return nil
}
func (ds *S3Storage) InsertEvents(batch []map[string]string) error {
fileName := generateName(ds.cfg.EventsTableName)
if err := ds.batches.Insert(batch, fileName, eventColumns); err != nil {
return fmt.Errorf("can't insert events batch: %s", err)
}
ds.log.Info(context.Background(), "events batch of %d events is successfully saved", len(batch))
return nil
}
func (ds *S3Storage) Close() error {
return nil
}

Some files were not shown because too many files have changed in this diff Show more