Compare commits

...
Sign in to create a new pull request.

658 commits

Author SHA1 Message Date
Alexander Zavorotynskiy
1bf1999f65 fix(backend/db): fixed bug (index row size exceeds maximum) by adding left() func in sql requests 2022-06-17 16:23:55 +02:00
rjshrjndrn
b993b1fefa chore(backend): clean go mod
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-06-17 16:23:55 +02:00
Shekar Siri
9867a5b8bc feat(ui) - sessions - widget - data reload 2022-06-17 16:19:03 +02:00
Taha Yassine Kraiem
e813cd593f feat(api): elasticsearch fixed typo 2022-06-17 16:19:03 +02:00
Taha Yassine Kraiem
41e3d9d351 feat(api): elasticsearch upgrade fix 2022-06-17 16:19:03 +02:00
Shekar Siri
f53ad1bbc4 feat(ui) - sessions - widget - pagination 2022-06-17 16:19:03 +02:00
Shekar Siri
506fefb6e1 feat(ui) - sessions - widget - pagination 2022-06-17 16:19:03 +02:00
Taha Yassine Kraiem
9e7e35769c Merge remote-tracking branch 'origin/api-v1.7.0' into dev 2022-06-17 12:57:03 +02:00
Taha Yassine Kraiem
c10140b8d1 feat(api): changed empty funnel response 2022-06-17 12:39:21 +02:00
Taha Yassine Kraiem
38b65537c7 feat(api): fixed Elasticsearch upgrade 2022-06-17 11:31:31 +02:00
rjshrjndrn
215d889782 ci(workers): build both ee and oss for deployment changes
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-06-17 11:01:52 +02:00
Taha Yassine Kraiem
1ee50b62ed feat(api): full dependencies upgrade 2022-06-17 10:53:43 +02:00
rjshrjndrn
a08ac6101a chore(helm): change nginx-ingress default lb to ewma
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-06-17 10:45:41 +02:00
Taha Yassine Kraiem
778db9af34 Merge remote-tracking branch 'origin/api-v1.7.0' into api-v1.7.0 2022-06-17 10:43:13 +02:00
Taha Yassine Kraiem
4d111d6f4a feat(db): migrate to v1.7.0: fixed cross-database references issue 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
7beb08f398 feat(db): migrate old funnels to new metric-funnels 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
891c7600a7 feat(api): custom metrics errors pagination
feat(api): custom metrics sessions pagination
2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
9fb5e7c4d1 feat(api): fixed typo 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
f76c621350 feat(assist): support null&empty values for search
feat(assist): changed single-session search
feat(api): support null&empty values for live sessions search
feat(api): support key-mapping for different names
feat(api): support platform live-sessions search
2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
6cc7372187 feat(api): support nested-key-sort for live sessions 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
4e22038137 feat(assist): changed pagination response
feat(assist): allow nested-key sort
feat(api): support new live sessions pagination response
2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
2e5acdabc3 feat(assist): full autocomplete
feat(assist): solved endpoints conflicts
feat(api): live sessions full autocomplete
2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
d1ef7ea1c7 feat(assist): full search
feat(api): live sessions full search
2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
47fb100b4f feat(assist): fixed multiple values filter support for search 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
ab02495f63 feat(api): changed assist search payload 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
a59a8c0133 feat(assist): changed debug 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
bd9dbc9393 feat(assist): payload extraction debug 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
4fe3f87d46 feat(api): assist autocomplete 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
c0c1a86209 feat(assist): autocomplete 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
fbe37babbc feat(assist): sessions search handle nested objects 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
ccf951f8e4 feat(api): optimized live session check
feat(assist): optimized live session check
feat(assist): sort
feat(assist): pagination
2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
0aa94bbc3c feat(assist): assist changed search payload 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
ef609aa196 feat(api): search live sessions 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
43184d5c43 feat(assist): assist refactored 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
c6a6a77e71 feat(assist): EE assist search 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
181195ffde feat(assist): assist refactored 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
58aea53101 feat(assist): assist upgrade uWebSockets
feat(assist): assist upgrade SocketIo
2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
03dbf42d11 feat(assist): FOSS assist search 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
7d4d0fadbd feat(api): requirements upgrade 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
5b1185b872 feat(api): metric-funnel changed response 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
c7c6cd2187 feat(api):metrics get sessions related to issue 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
1448cb45e9 feat(api): metrics table of errors 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
b4b3a6c26e feat(api): custom metrics fixed templates response 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
f296b27346 feat(api): optimised get issues for get session-details 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
531b112439 feat(api): fixed custom metrics timestamp issue 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
c68edbc705 feat(api): fixed login 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
7d4596c074 feat(api): get sessions details fix 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
03e0dbf0e4 feat(api): optimised get session details 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
46e7f5b83e feat(api): custom metrics config 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
bafae833d5 feat(api): limited long task DB 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
421a1f1104 feat(api): custom metrics config 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
405d83d4e0 feat(api): optimised weekly report 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
6c377bc4e5 feat(api): fixed login response 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
40d60f7769 feat(api): fixed login response 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
557d855ae5 feat(api): changed login response 2022-06-17 10:42:30 +02:00
Taha Yassine Kraiem
0dd7914375 feat(api): EE changed weekly report
feat(api): changed login response
2022-06-17 10:42:29 +02:00
Taha Yassine Kraiem
63d2fce3b5 feat(api): fixed weekly report
feat(api): optimised weekly report
2022-06-17 10:42:29 +02:00
Taha Yassine Kraiem
119ecd7743 feat(api): ignore weekly report if SMTP not configured 2022-06-17 10:42:29 +02:00
Taha Yassine Kraiem
8aec595495 feat(api): changed connexion pool configuration
feat(alerts): changed connexion pool configuration
2022-06-17 10:42:29 +02:00
Taha Yassine Kraiem
779c85dfda feat(api): changes 2022-06-17 10:42:29 +02:00
Taha Yassine Kraiem
0fd7d1d80c feat(api): changes
feat(db): changes
2022-06-17 10:42:29 +02:00
Taha Yassine Kraiem
5e85da6533 feat(api): changed pages_response_time_distribution response 2022-06-17 10:42:29 +02:00
Taha Yassine Kraiem
26ce0c8e86 feat(api): changed crashes response 2022-06-17 10:42:29 +02:00
Taha Yassine Kraiem
3f35b01a5e feat(api): changed speed_location response 2022-06-17 10:42:29 +02:00
Taha Yassine Kraiem
597da9fc11 feat(api): changed speed_location response 2022-06-17 10:42:29 +02:00
Taha Yassine Kraiem
fa7a57eb3f feat(api): changed slowest_domains response 2022-06-17 10:42:29 +02:00
Taha Yassine Kraiem
23a98d83d7 feat(api): table of sessions widget 2022-06-17 10:42:29 +02:00
Taha Yassine Kraiem
53fc845f9a feat(api): errors widget chart
feat(api): funnels widget chart
2022-06-17 10:42:29 +02:00
Taha Yassine Kraiem
bf60c83f3b feat(api): errors widget 2022-06-17 10:42:29 +02:00
Taha Yassine Kraiem
4912841a9e feat(api): funnel widget issues 2022-06-17 10:42:29 +02:00
Taha Yassine Kraiem
8d49a588e4 feat(api): funnel widget 2022-06-17 10:42:29 +02:00
Taha Yassine Kraiem
b5a646b233 feat(api): EE fixed edition 2022-06-17 10:42:29 +02:00
Taha Yassine Kraiem
7d426ee79a feat(api): fixed notifications count query 2022-06-17 10:42:29 +02:00
Taha Yassine Kraiem
06a52e505e feat(api): fixed edition
feat(api): fixed expiration date
feat(api): fixed change name
feat(api): fixed change role
feat(api): fixed has password
feat(api): refactored edit user
feat(api): refactored edit member
2022-06-17 10:42:29 +02:00
Taha Yassine Kraiem
667fe3dd79 feat(db): removed user's appearance
feat(db): removed generated_password
feat(api): merged account&client
feat(api): cleaned account response
feat(api): removed user's appearance
feat(api): removed generated_password
feat(api): limits endpoint
feat(api): notifications/count endpoint
2022-06-17 10:42:29 +02:00
Taha Yassine Kraiem
d86ca3c7ec feat(db): EE CH new structure 2022-06-17 10:42:29 +02:00
Taha Yassine Kraiem
e92f14dc17 feat(db): EE CH new structure 2022-06-17 10:42:29 +02:00
Taha Yassine Kraiem
81503030e4 feat(db): EE CH new structure 2022-06-17 10:42:29 +02:00
Taha Yassine Kraiem
10f26ab45c feat(api): clean script 2022-06-17 10:42:27 +02:00
Taha Yassine Kraiem
5968b55934 feat(api): refactored user-auth 2022-06-17 10:42:00 +02:00
Taha Yassine Kraiem
c2ea4fb4b6 feat(api): metrics changed web vitals description
feat(db): changed metric's monitoring essentials category to web vitals
2022-06-17 10:42:00 +02:00
Taha Yassine Kraiem
254202ba85 feat(api): fixed changed SearchSession payload schema 2022-06-17 10:42:00 +02:00
Taha Yassine Kraiem
b2732eb9be feat(api): changed SearchSession payload schema 2022-06-17 10:42:00 +02:00
Taha Yassine Kraiem
a3ba925cea feat(api): centralized 'order'
feat(api): transform 'order' casing
2022-06-17 10:42:00 +02:00
Taha Yassine Kraiem
20f7c0fb70 feat(DB): changed metrics category from Overview to Monitoring Essentials 2022-06-17 10:42:00 +02:00
Taha Yassine Kraiem
9c9452c530 feat(api): upgraded python base image
feat(alerts): upgraded python base image
2022-06-17 10:42:00 +02:00
Taha Yassine Kraiem
c12cea6f6b feat(api): fixed CH client format 2022-06-17 10:42:00 +02:00
Taha Yassine Kraiem
6c0aca2f8c feat(DB): changed partition expression 2022-06-17 10:42:00 +02:00
Taha Yassine Kraiem
2ed54261b6 feat(api): fixed sourcemaps reader endpoint 2022-06-17 10:42:00 +02:00
Taha Yassine Kraiem
6bf5d1d65b feat(api): user trail limit changed 2022-06-17 10:41:59 +02:00
Taha Yassine Kraiem
23584b8be8 feat(alerts): changed Dockerfile.alerts 2022-06-17 10:41:59 +02:00
Taha Yassine Kraiem
f7002ab2a0 feat(api): vault support 2022-06-17 10:41:59 +02:00
Taha Yassine Kraiem
2fba643b7c feat(api): changed search user trails by username 2022-06-17 10:41:59 +02:00
Taha Yassine Kraiem
18f0d2fbca feat(api): search user trails by username
feat(db): index to search user trails by username
2022-06-17 10:41:59 +02:00
Taha Yassine Kraiem
9fcba8703e feat(api): EE updated authorizer 2022-06-17 10:41:59 +02:00
Taha Yassine Kraiem
41d7d16d03 feat(api): changed Dockerfile 2022-06-17 10:41:59 +02:00
Taha Yassine Kraiem
9100d27854 feat(api): changed root path 2022-06-17 10:41:59 +02:00
Taha Yassine Kraiem
507462180e feat(api): fixed return createdAt with the list of users 2022-06-17 10:41:58 +02:00
Taha Yassine Kraiem
7f9bc99bcf feat(DB): traces/trails index
feat(api): get all possible traces/trails actions
feat(api): search traces/trails by actions
feat(api): search traces/trails by user
2022-06-17 10:41:58 +02:00
Taha Yassine Kraiem
e95c5b915d feat(api): return createdAt with the list of users 2022-06-17 10:41:58 +02:00
Taha Yassine Kraiem
cf6320d4df feat(DB): traces/trails index
feat(api): get all traces/trails
2022-06-17 10:41:58 +02:00
Taha Yassine Kraiem
d9d2f08fb8 feat(DB): changed sessions_metadata sort expression 2022-06-17 10:41:58 +02:00
Taha Yassine Kraiem
b0d3074ceb feat(api): changed Dockerfile 2022-06-17 10:41:58 +02:00
Taha Yassine Kraiem
9c5d96e35c feat(api): changed Dockerfile 2022-06-17 10:41:58 +02:00
Taha Yassine Kraiem
9af6fc004b feat(api): changed Dockerfile 2022-06-17 10:41:58 +02:00
Taha Yassine Kraiem
1dcad02b9a feat(api): changed replay file URL 2022-06-17 10:41:58 +02:00
Taha Yassine Kraiem
1859fb8a6c feat(api): EE updated dependencies 2022-06-17 10:41:58 +02:00
Taha Yassine Kraiem
90143bcd31 feat(api): updated dependencies 2022-06-17 10:41:58 +02:00
Taha Yassine Kraiem
1224e6054e feat(api): fixed description optional value 2022-06-17 10:41:57 +02:00
Taha Yassine Kraiem
c715a6084e feat(api): fixed description default value 2022-06-17 10:41:57 +02:00
Taha Yassine Kraiem
1c671631e7 feat(api): changed Dockerfile 2022-06-17 10:41:57 +02:00
rjshrjndrn
ea103f9589 chore(vagrant): Adding development readme
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-06-17 10:41:55 +02:00
Rajesh Rajendran
32fdd80784 Vagrant for local contribution (#434)
* chore(vagrant): initial vagrantfile
* chore(vagrant): adding instructions after installation
* chore(vagrant): Adding vagrant user to docker group
* chore(vagrant): use local docker daemon for k3s
* chore(vagrant): fix comment
* chore(vagrant): adding hostname in /etc/hosts
* chore(vagrant): fix doc
* chore(vagrant): limiting cpu
* chore(frontend): initialize dev env
* chore(docker): adding dockerignore
* chore(dockerfile): using cache for fasten build
* chore(dockerignore): update
* chore(docker): build optimizations
* chore(build): all components build option
* chore(build): utilities build fix
* chore(scrpt): remove debug message
* chore(vagrant): provision using stable branch always

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-06-17 10:40:57 +02:00
Taha Yassine Kraiem
c72120ac64 feat(api): s3 helper detect environment
feat(api): support description for dashboards
2022-06-17 10:39:48 +02:00
Taha Yassine Kraiem
1e6c6fa1a7 feat(db): EE remove pages_count column 2022-06-17 10:39:48 +02:00
Taha Yassine Kraiem
d45fd1634d feat(api): EE fixed No of pages count widget 2022-06-17 10:39:48 +02:00
Taha Yassine Kraiem
9ddc0e5e4a feat(api): merge dev 2022-06-17 10:39:30 +02:00
Taha Yassine Kraiem
e322e9c3d0 feat(api): round time metrics 2022-06-17 10:33:41 +02:00
Alexander Zavorotynskiy
a153547575 feat(backend/db): send metadata directly to db (removed from batches) 2022-06-17 09:34:58 +02:00
Taha Yassine Kraiem
f9695198f2 feat(db): migrate to v1.7.0: fixed cross-database references issue 2022-06-16 19:18:52 +02:00
Taha Yassine Kraiem
621b4aae7c feat(db): migrate old funnels to new metric-funnels 2022-06-16 19:12:06 +02:00
Taha Yassine Kraiem
734320cfe5 feat(api): custom metrics errors pagination
feat(api): custom metrics sessions pagination
2022-06-16 17:49:57 +02:00
Shekar Siri
441f792679 feat(ui) - assist filters with pagination 2022-06-16 16:49:00 +02:00
Shekar Siri
133714a4cb feat(ui) - assist filters with pagination 2022-06-16 16:49:00 +02:00
Taha Yassine Kraiem
33a3890562 feat(api): fixed typo 2022-06-16 16:34:02 +02:00
rjshrjndrn
1a5c50cefa fix(helm): removing unnecessary ingress rules
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-06-16 15:08:43 +02:00
rjshrjndrn
54b414e199 chore(helm): adding pvc to utilities
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-06-16 14:17:33 +02:00
Taha Yassine Kraiem
a3aa176e67 feat(assist): support null&empty values for search
feat(assist): changed single-session search
feat(api): support null&empty values for live sessions search
feat(api): support key-mapping for different names
feat(api): support platform live-sessions search
2022-06-16 14:02:20 +02:00
Alexander Zavorotynskiy
d837c14be4 feat(backend): start using analytics topic for heuristics and trigger topic only for sessionEnd between sink and storage 2022-06-16 14:00:50 +02:00
Taha Yassine Kraiem
96bf84b567 feat(api): support nested-key-sort for live sessions 2022-06-16 12:27:51 +02:00
Taha Yassine Kraiem
fe6a50dc2c feat(assist): changed pagination response
feat(assist): allow nested-key sort
feat(api): support new live sessions pagination response
2022-06-16 11:53:49 +02:00
rjshrjndrn
75504409e7 chore(helm): Adding utilities chart
Will contain openreplay utilities. like
- efs cleaner
- postgres backup trigger etc
2022-06-16 11:14:18 +02:00
Taha Yassine Kraiem
c254aab413 feat(assist): full autocomplete
feat(assist): solved endpoints conflicts
feat(api): live sessions full autocomplete
2022-06-15 22:44:41 +02:00
Taha Yassine Kraiem
c6b719b9fa feat(assist): full search
feat(api): live sessions full search
2022-06-15 21:56:59 +02:00
Taha Yassine Kraiem
2dbdfade10 feat(assist): fixed multiple values filter support for search 2022-06-15 20:24:32 +02:00
Taha Yassine Kraiem
31a53edd5a feat(api): changed assist search payload 2022-06-15 19:25:50 +02:00
Shekar Siri
6ba773fe6d Merge branch 'dev-assist-filters' into dev 2022-06-15 19:08:01 +02:00
Shekar Siri
6144a34d75 Merge branch 'dev-funnels' into dev 2022-06-15 19:07:45 +02:00
Taha Yassine Kraiem
dd2c51e3b6 feat(assist): changed debug 2022-06-15 19:05:07 +02:00
Shekar Siri
9e87909167 feat(ui) - issues and errors widgets 2022-06-15 18:56:16 +02:00
Taha Yassine Kraiem
cf80c46cd9 feat(assist): payload extraction debug 2022-06-15 18:45:31 +02:00
Shekar Siri
c2ca867fdc change(ui) - checking for user login 2022-06-15 18:43:55 +02:00
Taha Yassine Kraiem
c53ecbef00 feat(api): assist autocomplete 2022-06-15 17:22:43 +02:00
Taha Yassine Kraiem
38be085622 feat(assist): autocomplete 2022-06-15 17:15:02 +02:00
Shekar Siri
1e78a851c6 feat(ui) - assist filters wip 2022-06-15 16:46:09 +02:00
Shekar Siri
aa669d6a86 feat(ui) - assist filters wip 2022-06-15 16:20:35 +02:00
Taha Yassine Kraiem
8510949d29 feat(assist): sessions search handle nested objects 2022-06-15 16:03:37 +02:00
Alexander Zavorotynskiy
5ea482d4c2 feat(backend/http): removed second unnecessary request body read 2022-06-15 15:50:55 +02:00
Shekar Siri
2fe2406d0c feat(ui) - assist filters wip 2022-06-15 15:29:29 +02:00
Taha Yassine Kraiem
d6070d1829 feat(api): optimized live session check
feat(assist): optimized live session check
feat(assist): sort
feat(assist): pagination
2022-06-15 15:05:41 +02:00
Shekar Siri
e5963fbeef feat(ui) - assist filters wip 2022-06-15 14:14:48 +02:00
Alexander Zavorotynskiy
56623f9635 feat(backend/db): added batch updates in web-stats methods 2022-06-15 13:20:37 +02:00
Alexander
3c6bd9613c
feat(backend): control batch size and number of sql requests in db service to more accurate management data inserts (#540)
Co-authored-by: Alexander Zavorotynskiy <alexander@openreplay.com>
2022-06-15 12:57:09 +02:00
Alexander
6b5d9d3799
feat(backend): added new trigger which sink should send to storage after session end received (#539)
Co-authored-by: Alexander Zavorotynskiy <alexander@openreplay.com>
2022-06-15 11:45:52 +02:00
Alexander
883a6f6909
Improved ender (#537)
* feat(backend/ender): using producer timestamp for session end detection

* feat(backend/ender): added timeControl module

Co-authored-by: Alexander Zavorotynskiy <alexander@openreplay.com>
2022-06-15 10:49:32 +02:00
Taha Yassine Kraiem
b85f2abfd5 feat(assist): assist changed search payload 2022-06-14 20:12:03 +02:00
Taha Yassine Kraiem
a2ec909ace feat(api): search live sessions 2022-06-14 20:09:36 +02:00
Taha Yassine Kraiem
971dbd40a4 feat(assist): assist refactored 2022-06-14 19:42:16 +02:00
Taha Yassine Kraiem
1462f90925 feat(assist): EE assist search 2022-06-14 19:37:04 +02:00
Taha Yassine Kraiem
ded2d980fe feat(assist): assist refactored 2022-06-14 18:01:52 +02:00
Taha Yassine Kraiem
d4d029c525 feat(assist): assist upgrade uWebSockets
feat(assist): assist upgrade SocketIo
2022-06-14 18:01:34 +02:00
Taha Yassine Kraiem
40836092fa feat(assist): FOSS assist search 2022-06-14 17:19:58 +02:00
Mehdi Osman
911736f772
Increased Redis max queue length 2022-06-14 16:21:15 +02:00
Taha Yassine Kraiem
b8eac83662 feat(api): requirements upgrade 2022-06-14 15:07:39 +02:00
Taha Yassine Kraiem
d478436d9b feat(api): metric-funnel changed response 2022-06-14 14:56:46 +02:00
Shekar Siri
af7f751b42 feat(ui) - issues and errors widgets 2022-06-14 14:36:08 +02:00
rjshrjndrn
ec66bc03c6 chore(helm): enable compression for nginx
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-06-14 13:16:54 +02:00
Shekar Siri
7874dcbe0b feat(ui) - issues and errors widgets 2022-06-14 12:47:43 +02:00
Alexander Zavorotynskiy
3059227bcd feat(backend): turn on kafka delivery reports 2022-06-14 10:19:33 +02:00
Taha Yassine Kraiem
13d71ce388 feat(api):metrics get sessions related to issue 2022-06-13 19:56:27 +02:00
Taha Yassine Kraiem
09711d4521 feat(api): metrics table of errors 2022-06-13 19:26:00 +02:00
Taha Yassine Kraiem
50dce0ee9f feat(api): custom metrics fixed templates response 2022-06-13 19:20:16 +02:00
Taha Yassine Kraiem
2a12ed7337 feat(api): optimised get issues for get session-details 2022-06-13 18:24:03 +02:00
Shekar Siri
06855c41f4 feat(ui) - issues - widget 2022-06-13 17:39:27 +02:00
Shekar Siri
bea34112c9 feat(ui) - funnels - issue details 2022-06-13 17:35:31 +02:00
Shekar Siri
44d735d0a5 Merge branch 'funnels' into deb-funnels 2022-06-13 17:21:46 +02:00
Shekar Siri
88adec9e84 feat(ui) - funnels - issue details 2022-06-13 17:20:09 +02:00
Taha Yassine Kraiem
c856b2168d feat(api): fixed custom metrics timestamp issue 2022-06-13 16:07:56 +02:00
Taha Yassine Kraiem
85c27ff0f5 feat(api): fixed login 2022-06-13 15:59:54 +02:00
Taha Yassine Kraiem
d4c7fdcc5f feat(api): get sessions details fix 2022-06-13 15:24:21 +02:00
Shekar Siri
b26f2e87bf feat(ui) - funnels - issue details 2022-06-13 14:04:16 +02:00
Taha Yassine Kraiem
2b85ad3dfc feat(api): optimised get session details 2022-06-13 13:19:24 +02:00
Shekar Siri
4e2bcf26a4 feat(ui) - funnels - issue details 2022-06-13 12:32:13 +02:00
Shekar Siri
936d1f6f6e feat(ui) - funnels - details 2022-06-13 11:35:23 +02:00
Taha Yassine Kraiem
974f78b84a feat(api): custom metrics config 2022-06-10 17:51:47 +02:00
Taha Yassine Kraiem
36e5ba6389 feat(api): limited long task DB 2022-06-10 17:36:22 +02:00
Taha Yassine Kraiem
41b96321fe feat(api): custom metrics config 2022-06-10 17:19:51 +02:00
sylenien
fee99d3bf1 fix(ui): bugfixes 2022-06-10 17:11:14 +02:00
sylenien
43f52a9dcd fix(ui): fix couple ui bugs 2022-06-10 17:11:14 +02:00
dlrm
0c4b6ab6f0 fix(ui): fix styles 2022-06-10 17:11:14 +02:00
dlrm
c90a8d558a fix(ui): env? 2022-06-10 17:11:14 +02:00
dlrm
2bc44d038e fix(ui): fix env sample 2022-06-10 17:11:14 +02:00
dlrm
f745b9ba51 fix(ui): fix env sample 2022-06-10 17:11:14 +02:00
dlrm
f7eb848706 fix(ui): fixes after webpack update 2022-06-10 17:11:14 +02:00
dlrm
f08d8ca07e fix(ui): webpack 2022-06-10 17:11:14 +02:00
sylenien
aca4ef697e fix(ui): fix icon positioning on a timeline 2022-06-10 17:11:14 +02:00
sylenien
55f58487f5 fix(ui): fix performance tab graph mapper 2022-06-10 17:11:14 +02:00
sylenien
05c8bf4d59 fix(ui): red color changes, menu controls, performance crash 2022-06-10 17:11:14 +02:00
sylenien
997a5421ae fix(ui): small design fixes 2022-06-10 17:11:14 +02:00
sylenien
8a2d777d8c fix(ui): small fixes to share popup, archive inspector 2022-06-10 17:11:14 +02:00
sylenien
9caaabcacc fix(ui): move issues button to the subheader 2022-06-10 17:11:14 +02:00
sylenien
43a1991300 fix(ui): ui fixes 2022-06-10 17:11:14 +02:00
sylenien
c60b060cbe fix(ui): unblock tabs when in inspector mode, turn off inspector on tab change 2022-06-10 17:11:14 +02:00
sylenien
13dff716ea fix(ui): fix ui bugs 2022-06-10 17:11:14 +02:00
sylenien
366314193e fix(ui): design review fixes 2022-06-10 17:11:14 +02:00
sylenien
6e24da549a fix(ui): live session fixes 2022-06-10 17:11:14 +02:00
sylenien
02c87d237d feat(ui): change player control tabs designs 2022-06-10 17:11:14 +02:00
sylenien
b1d903f7f6 fix(ui): design fixes 2022-06-10 17:11:14 +02:00
sylenien
83600ee04d fix(ui): minor changes 2022-06-10 17:11:14 +02:00
sylenien
dbae4fe353 feat(ui): player controls redesign 2022-06-10 17:11:14 +02:00
sylenien
35d258aa8c fix(ui): design review fixes 2022-06-10 17:11:14 +02:00
sylenien
042571193a fix(ui): minor bugs 2022-06-10 17:11:14 +02:00
sylenien
3031569c07 fix(ui): ui fixes after design review 2022-06-10 17:11:14 +02:00
sylenien
9d06a95c7a fix(ui): fix active sessions 2022-06-10 17:11:14 +02:00
sylenien
ce5affddd6 fix(ui): fix styles in player header 2022-06-10 17:11:14 +02:00
sylenien
3444b73ed0 fix(ui): show events serach by default 2022-06-10 17:11:14 +02:00
sylenien
2109808d61 fix(ui): fix tooltip for subheader 2022-06-10 17:11:14 +02:00
sylenien
197694be73 fix(ui): rm test code 2022-06-10 17:11:14 +02:00
sylenien
ff73c70bfd fix(ui): fix warnings for few components 2022-06-10 17:11:14 +02:00
sylenien
1e51e3bce8 feat(ui): change eventgroup sidebar 2022-06-10 17:11:14 +02:00
sylenien
5e296703b0 fix(ui): fix typo 2022-06-10 17:11:14 +02:00
sylenien
05ecce9c74 feat(ui): add urlref bad to subheader 2022-06-10 17:11:14 +02:00
sylenien
5f5f47b06b fix(ui): rm unused code 2022-06-10 17:11:14 +02:00
sylenien
e3099bf93d fix(ui): return subheader 2022-06-10 17:11:14 +02:00
sylenien
0ab16ce91c fix(ui): fix for cicd 2022-06-10 17:11:14 +02:00
sylenien
a7d032bb29 fix(ui): rename file 2022-06-10 17:11:14 +02:00
sylenien
6b34630fa1 fix(ui): minor bugfix 2022-06-10 17:11:14 +02:00
sylenien
c584b0f653 feat(ui): change events tab design, move action buttons to subheader 2022-06-10 17:11:14 +02:00
sylenien
aff6f54397 fix(ui): fix sessionlist modal 2022-06-10 17:11:14 +02:00
sylenien
3aac6cf130 feat(ui): redesign player header; move user data to header 2022-06-10 17:11:14 +02:00
Taha Yassine Kraiem
dc02594da8 feat(api): optimised weekly report 2022-06-10 16:31:08 +02:00
Taha Yassine Kraiem
e796e6c795 feat(api): fixed login response 2022-06-10 15:49:24 +02:00
Taha Yassine Kraiem
8d4d61103a feat(api): fixed login response 2022-06-10 15:44:05 +02:00
Taha Yassine Kraiem
3217a55bca feat(api): changed login response 2022-06-10 15:29:54 +02:00
Taha Yassine Kraiem
0886e3856a feat(api): EE changed weekly report
feat(api): changed login response
2022-06-10 12:33:36 +02:00
Taha Yassine Kraiem
5592e13d9b feat(api): fixed weekly report
feat(api): optimised weekly report
2022-06-10 12:31:29 +02:00
Taha Yassine Kraiem
4305e03745 feat(api): ignore weekly report if SMTP not configured 2022-06-10 11:53:47 +02:00
Taha Yassine Kraiem
e1b233bac8 feat(api): changed connexion pool configuration
feat(alerts): changed connexion pool configuration
2022-06-10 11:35:25 +02:00
Alexander Zavorotynskiy
ea658316a2 fix(backend): fixed panic in kafka consumer 2022-06-10 09:45:50 +02:00
Alexander Zavorotynskiy
b646ba2a9e fix(backend): fixed panic in db service 2022-06-10 09:31:54 +02:00
Taha Yassine Kraiem
b16b3e3b87 feat(api): changes 2022-06-09 17:37:49 +02:00
Taha Yassine Kraiem
656e13f6e5 feat(api): changes
feat(db): changes
2022-06-09 17:23:17 +02:00
Taha Yassine Kraiem
6e5bdae7da feat(api): changed pages_response_time_distribution response 2022-06-09 14:12:21 +02:00
Taha Yassine Kraiem
c81ce9bf7d feat(api): changed crashes response 2022-06-09 14:09:13 +02:00
Taha Yassine Kraiem
6e9e5dceb7 feat(api): changed speed_location response 2022-06-09 13:54:25 +02:00
Taha Yassine Kraiem
89b3d84230 feat(api): changed speed_location response 2022-06-09 13:53:55 +02:00
Taha Yassine Kraiem
9411f0f576 feat(api): changed slowest_domains response 2022-06-09 13:42:52 +02:00
dlrm
3b8a2c19ef fix(tracker): code style 2022-06-09 13:36:28 +02:00
dlrm
c913e4e7f6 fix(tracker): code rvw 2022-06-09 13:36:28 +02:00
dlrm
9158fa60c5 fix(tracker): fix tracker date recording, added new obscure dates opt
fix(tracker): rm consolelog

fix(tracker): change compile import

fix(tracker): fix node v and import
2022-06-09 13:36:28 +02:00
Taha Yassine Kraiem
7b1e854c53 feat(api): table of sessions widget 2022-06-09 13:13:05 +02:00
Taha Yassine Kraiem
adb8e2c404 feat(api): errors widget chart
feat(api): funnels widget chart
2022-06-08 19:03:06 +02:00
Taha Yassine Kraiem
6816dedaff feat(api): errors widget 2022-06-08 17:21:13 +02:00
Shekar Siri
a461ad0938 change(ui) - sessions daterange 2022-06-08 16:55:29 +02:00
Shekar Siri
4188b7894d change(ui) - tracking code changes 2022-06-08 16:29:00 +02:00
Shekar Siri
e652ee97ba pulled webpack changes and resolved conflicts 2022-06-08 16:16:41 +02:00
Shekar Siri
f235da44ab pulled webpack changes and resolved conflicts 2022-06-08 16:04:52 +02:00
Shekar Siri
767376a8db change(ui) - notifications count and list with mobx 2022-06-08 15:50:29 +02:00
Shekar Siri
d8911e93c1 change(ui) - notifications count and list 2022-06-08 15:50:29 +02:00
Shekar Siri
8273fc08bc change(ui) - login align 2022-06-08 15:50:29 +02:00
Alexander
e749ed1823
Merge pull request #531 from openreplay/assets_fix
Assets fix
2022-06-08 15:08:25 +02:00
Alexander Zavorotynskiy
2dccb2142b fix(backend/assets): return back cache checks in s3 2022-06-08 15:05:02 +02:00
Alexander Zavorotynskiy
404f6204e1 fix(backend/assets): copy ts and index in assets convert method 2022-06-08 14:44:16 +02:00
Alexander Zavorotynskiy
248d3b2c3d fix(backend/assets): changed comment 2022-06-08 13:17:37 +02:00
rjshrjndrn
9388e03e8c fix(ingress): assets ingress values
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-06-08 12:47:24 +02:00
Alexander Zavorotynskiy
c4081ce78a feat(backend/assets): disabled cache checks 2022-06-08 12:38:38 +02:00
Taha Yassine Kraiem
b2a778a0d7 feat(api): funnel widget issues 2022-06-07 20:10:40 +02:00
Taha Yassine Kraiem
1445c72737 feat(api): funnel widget 2022-06-07 19:17:55 +02:00
Taha Yassine Kraiem
734d1333a9 feat(api): EE fixed edition 2022-06-07 18:34:52 +02:00
Taha Yassine Kraiem
932c18f65a feat(api): fixed notifications count query 2022-06-07 18:18:22 +02:00
Taha Yassine Kraiem
3a70c8bef6 feat(api): fixed edition
feat(api): fixed expiration date
feat(api): fixed change name
feat(api): fixed change role
feat(api): fixed has password
feat(api): refactored edit user
feat(api): refactored edit member
2022-06-07 18:12:08 +02:00
rjshrjndrn
a996fac4d3 fix(ingress): assets path
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-06-07 17:31:51 +02:00
rjshrjndrn
8ce66d0ffc fix(build): frontend build command
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-06-07 16:42:15 +02:00
rjshrjndrn
4986708006 build(frontend): changed env file
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-06-07 15:46:50 +02:00
Shekar Siri
b1ce794c06 change(ui) - Tenant Key checking for ee edition 2022-06-07 15:41:36 +02:00
Shekar Siri
d8dcfe4b5e change(ui) - removed client and updated account 2022-06-07 15:41:36 +02:00
Shekar Siri
7a3b13ff8a change(ui) - Tenant Key checking for ee edition 2022-06-07 15:41:36 +02:00
Shekar Siri
c3d4470bb1 change(ui) - removed appearnace 2022-06-07 15:41:36 +02:00
Alexander Zavorotynskiy
0b0798b0ef feat(backend/assets): added metric (total_assets) 2022-06-07 14:14:18 +02:00
Alexander Zavorotynskiy
9292d315c4 feat(backend/ender): removed debug log 2022-06-07 13:48:10 +02:00
Alexander Zavorotynskiy
7678e9d056 fix(backend/db): fixed loss of sessions 2022-06-07 13:44:20 +02:00
Alexander Zavorotynskiy
4f8c4358f8 fix(backend/storage): fixed panic in storage service 2022-06-07 13:30:48 +02:00
Shekar Siri
329ae62881 change(ui) - input class 2022-06-07 12:08:15 +02:00
Shekar Siri
65331ca016 change(ui) - code snippet 2022-06-07 12:04:43 +02:00
Shekar Siri
cb5809608a change(ui) - code snippet 2022-06-07 11:59:42 +02:00
Alexander Zavorotynskiy
78cf538b6b feat(backend): added metrics to storage and sink services 2022-06-07 10:12:42 +02:00
Taha Yassine Kraiem
cbe78cc58e feat(db): removed user's appearance
feat(db): removed generated_password
feat(api): merged account&client
feat(api): cleaned account response
feat(api): removed user's appearance
feat(api): removed generated_password
feat(api): limits endpoint
feat(api): notifications/count endpoint
2022-06-06 19:33:26 +02:00
Alexander Zavorotynskiy
a6db2cb602 feat(backend): added metrics to http service 2022-06-06 16:46:14 +02:00
Alexander Zavorotynskiy
c963b74cbf feat(backend): cleaned up in internal dir 2022-06-06 14:13:24 +02:00
Taha Yassine Kraiem
a6c75d3cdd Merge remote-tracking branch 'origin/dev' into api-v1.6.1
# Conflicts:
#	api/Dockerfile
#	api/development.md
#	backend/Dockerfile.bundle
#	backend/build.sh
#	backend/development.md
#	backend/internal/assets/jsexception.go
#	backend/internal/handlers/ios/performanceAggregator.go
#	backend/pkg/intervals/intervals.go
#	backend/pkg/log/queue.go
#	backend/pkg/messages/filters.go
#	backend/pkg/messages/legacy-message-transform.go
#	backend/pkg/messages/messages.go
#	backend/pkg/messages/read-message.go
#	backend/services/db/heuristics/anr.go
#	backend/services/db/heuristics/clickrage.go
#	backend/services/db/heuristics/heuristics.go
#	backend/services/db/heuristics/readyMessageStore.go
#	backend/services/db/heuristics/session.go
#	backend/services/db/stats.go
#	backend/services/ender/builder/builderMap.go
#	backend/services/ender/builder/clikRageDetector.go
#	backend/services/ender/builder/cpuIssueFinder.go
#	backend/services/ender/builder/deadClickDetector.go
#	backend/services/ender/builder/domDropDetector.go
#	backend/services/ender/builder/inputEventBuilder.go
#	backend/services/ender/builder/memoryIssueFinder.go
#	backend/services/ender/builder/pageEventBuilder.go
#	backend/services/ender/builder/performanceTrackAggrBuilder.go
#	backend/services/http/assets.go
#	backend/services/http/handlers-depricated.go
#	backend/services/http/ios-device.go
#	backend/services/integrations/clientManager/manager.go
#	backend/services/storage/gzip.go
#	backend/services/storage/main.go
#	ee/api/clean.sh
#	scripts/helmcharts/local_deploy.sh
#	scripts/helmcharts/vars.yaml
2022-06-03 17:06:25 +01:00
Taha Yassine Kraiem
31a577b6cc feat(db): EE CH new structure 2022-06-03 16:56:37 +01:00
Shekar Siri
a7bfbc8ff7 change(ui) - config changes 2022-06-03 17:18:17 +02:00
Shekar Siri
2ed5cac986
Webpack upgrade and dependency cleanup (#523)
* change(ui) - webpack update
* change(ui) - api optimize and other fixes
2022-06-03 16:47:38 +02:00
rjshrjndrn
f5e013329f chore(action): removing unnecessary file 2022-06-03 16:32:06 +02:00
Alexander Zavorotynskiy
d358747caf fix(backend): several fixes in backend services 2022-06-03 16:01:14 +02:00
Alex Kaminskii
d0e651bc29 fix(tracker): uncomment init scroll tracking 2022-06-03 14:19:39 +02:00
Alex Kaminskii
e57d90e5a1 fix(tracker): use node guards instead of instanceof in some cases; import type App 2022-06-03 14:17:53 +02:00
Alex Kaminskii
1495f3bc5d fix(backend/ee/kafka): Partition-wise back-commit 2022-06-03 13:52:31 +02:00
rjshrjndrn
f626636ed7 chore(helm): enable cors for ingest
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-06-03 13:04:11 +02:00
rjshrjndrn
06eeabe494 chore(actions): enable build from branch 2022-06-03 12:48:24 +02:00
Alexander Zavorotynskiy
d68ac74731 feat(backend/http): added OPTIONS method for all paths 2022-06-03 11:13:56 +02:00
Alexander Zavorotynskiy
d4e5fce12a feat(backend/http): added prefix hack 2022-06-03 10:52:12 +02:00
Alex Kaminskii
7395688831 fix(backend/http): check if order of declaring gets influence 2022-06-02 19:04:48 +02:00
Eric Chan
c2695ef31f allow use of localStorage and sessionStorage to be overriden 2022-06-02 17:49:05 +02:00
Alexander Zavorotynskiy
1a8c076b41 fix(backend/http): added prefligt headers to root 2022-06-02 17:39:38 +02:00
Taha Yassine Kraiem
e7e0296b6b feat(db): EE CH new structure 2022-06-02 12:37:52 +01:00
Alexander Zavorotynskiy
2fb57962b8 feat(backend/sink): added last session ts in sink logs 2022-06-02 10:50:14 +02:00
Alexander Zavorotynskiy
485865f704 fix(backend/storage): fixed ts of last processed session in logs 2022-06-02 10:27:32 +02:00
Alexander Zavorotynskiy
2cadf12f88 feat(backend/storage): added counter and last session timestamp for storage service 2022-06-02 10:13:18 +02:00
Taha Yassine Kraiem
caaf7793e3 feat(db): EE CH new structure 2022-06-01 19:51:42 +01:00
rjshrjndrn
f330d5031f chore(helm): adding grafana ingress
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-05-31 19:21:45 +02:00
Taha Yassine Kraiem
95088518aa feat(api): clean script 2022-05-31 13:46:13 +01:00
Alexander Zavorotynskiy
3a4d5f6796 feat(backend/sink): added additional log on producer write operation 2022-05-31 14:43:56 +02:00
Taha Yassine Kraiem
b1aae16f60 feat(api): refactored user-auth 2022-05-31 10:14:55 +01:00
Alexander Zavorotynskiy
6e92ba2e79 feat(backend/ender): added additional log for ender service 2022-05-31 10:40:44 +02:00
Alexander Zavorotynskiy
df18e7dd7d feat(backend/storage): additional log and memory improvements in storage service 2022-05-31 10:02:31 +02:00
Alexander Zavorotynskiy
0b7bb2339d fix(backend/datasaver): changed postgres on clickhouse and added missed imports 2022-05-30 17:41:45 +02:00
rjshrjndrn
440efd1b5d chore(helm): increase health check timeout
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-05-30 17:30:35 +02:00
rjshrjndrn
6aaa0b5fb8 chore(helm): chalice health check
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-05-30 17:20:28 +02:00
Alexander Zavorotynskiy
d871558390 fix(backend/storage): fixed bug with large session files 2022-05-30 16:59:41 +02:00
Alexander Zavorotynskiy
24fdb5e18c fix(backend/http): fixed bug with aws health checks 2022-05-30 16:39:05 +02:00
ShiKhu
0f434a21d4 fix(tracker): 3.5.12: resolve Promise returning on start() with success:false instead of rejecting 2022-05-27 21:25:21 +02:00
ShiKhu
3555864580 fix(backend-db): log session-not-found only once 2022-05-27 12:55:15 +02:00
ShiKhu
edddf87e5f fix(frontend): resources status fix 2022-05-27 12:38:05 +02:00
Alexander Zavorotynskiy
0fe1b0c3a8 fix(backend/storage): fixed panic in storage service 2022-05-27 10:22:19 +02:00
Rajesh Rajendran
3a2b54a446
Fixes related to clickhouse and service Port for ingress (#510)
* chore(helm): variablizing clickhouse shards/replica

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* chore(clickhouse): adding new template for clickhouse cluster

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* chore(helm): enable passwordless clickhouse

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* chore(install): check clickhouse is up prior initialization

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* fix(helm): port value for ingress
2022-05-25 16:53:24 +00:00
Rajesh Rajendran
55a0d3a0e0
chore(helm): enable serviceMonitor only if monitoring stack installed. (#509) 2022-05-25 16:11:09 +00:00
Rajesh Rajendran
2752118e94
fix(helm): clickhouse change port type to integer (#508)
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-05-25 16:00:13 +00:00
Rajesh Rajendran
c795e0480d
fix(helm): service port installation issue (#507)
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-05-25 15:58:48 +00:00
Alex Kaminskii
d7dc6e0860 fix(player): apply scrolls after styles 2022-05-25 15:24:10 +02:00
Rajesh Rajendran
2a870d6f74
chore(helm): enabling monitoring for services (#503) 2022-05-24 17:49:24 +00:00
Alexander Zavorotynskiy
a32ac65f35 feat(backend): additional logs in messageHandler 2022-05-24 16:27:35 +02:00
Alexander Zavorotynskiy
ca78bca3d1 chore(helmchart): added missed part of yaml file to sink helm chart 2022-05-24 13:39:53 +02:00
Alexander Zavorotynskiy
31c852df2b feat(backend/sink): added error log for consumer.Commit() method 2022-05-24 13:30:25 +02:00
Alexander Zavorotynskiy
204c6f589b feat(backend/sink): small changes 2022-05-24 13:24:00 +02:00
Alexander Zavorotynskiy
8647beb538 chore(helmchart): added ASSETS_ORIGIN to sink helm chart 2022-05-24 13:21:38 +02:00
Alexander
c6f54f18aa
Merge pull request #502 from openreplay/message_timestamp_changes
Message timestamp changes
2022-05-24 13:02:16 +02:00
Alexander Zavorotynskiy
c941cb872a feat(backend/messages): added timestamp for SessionStart and moved RawErrorEvent to db datasaver 2022-05-24 10:33:16 +02:00
Alexander Zavorotynskiy
d685ad4cb3 feat(backend/ender): implemented metrics module and added to ender service 2022-05-23 17:48:24 +02:00
Alexander Zavorotynskiy
d29416fd48 fix(backend): fixed bug with group name in heuristics service 2022-05-23 17:42:28 +02:00
sylenien
07072f74b0 fix(ui): fix text overflow 2022-05-23 11:05:03 +02:00
sylenien
a06fb42e12 fix(ui): fix bugs with metric updating, metric selection hover etc 2022-05-23 11:05:03 +02:00
sylenien
40ab7d1e41 fix(ui): minor fixes for sesson settings 2022-05-23 11:05:03 +02:00
sylenien
d4fa960fdf fix(ui): make dashboardeditModal closable with esc 2022-05-23 11:05:03 +02:00
sylenien
6a801a2026 fix(ui): make menuitem configurable 2022-05-23 11:05:03 +02:00
sylenien
af45af8bd0 fix(ui): design review - dashboard metric selection 2022-05-23 11:05:03 +02:00
sylenien
a489a8b77e fix(ui): design review - saved search 2022-05-23 11:05:03 +02:00
sylenien
144f596144 fix(ui): rm consolelog 2022-05-23 11:05:03 +02:00
sylenien
e47797ee3e fix(ui): minor ui fixes after review 2022-05-23 11:05:03 +02:00
sylenien
020b993280 fix(ui): fix description input focus 2022-05-23 11:05:03 +02:00
sylenien
4efe7a7843 feat(ui): add icon to metric creation box 2022-05-23 11:05:03 +02:00
Alex Kaminskii
30d6f2489c feat (tracker-assist): 3.5.11: RemoteControl: better scroll element detection; maintain react tight state input value 2022-05-20 22:38:13 +02:00
Alex Kaminskii
62e163fb40 fix(player-assist): ignore tab press during remote control 2022-05-20 22:26:22 +02:00
Alex Kaminskii
d30b663195 fix(player): use append() instead of add(); update lastMessageTime inside distributeMessage 2022-05-20 19:05:32 +02:00
Taha Yassine Kraiem
b5540998d9 feat(api): metrics changed web vitals description
feat(db): changed metric's monitoring essentials category to web vitals
2022-05-20 11:20:25 +02:00
rjshrjndrn
40e0296c8a docs(machine setup): for contribution
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-05-19 19:05:53 +02:00
rjshrjndrn
9526ea68aa chore(helm): clickhouse use kafka zookeeper
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-05-19 18:27:00 +02:00
Alex Kaminskii
18a09cf66b fix(frontend/player): codefix 2022-05-19 17:52:49 +02:00
Alex Kaminskii
cecd57fc50 fix(frontend): maintain string mobsUrl for the smooth version transition 2022-05-19 17:29:15 +02:00
Rajesh Rajendran
97094107fe
GH actions for ee (#488)
* chore(actions): changing installation method

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* fix(actions): inject ee license key and image tag

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* fix(actions): image tag overload

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-05-19 15:04:01 +00:00
Rajesh Rajendran
2e332f3447
Openreplay install, without kubernetes and related tools (#487)
* chore(init script): option to skip k8s/tools installation

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* chore(install): init script gnu sed detection

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-05-19 13:21:37 +00:00
sylenien
d08280b709 fix(ui): fix text size 2022-05-19 15:11:17 +02:00
sylenien
c82efbeb6b fix(ui): bug fixes for dashboard 2022-05-19 15:11:17 +02:00
sylenien
580641efe8 fix(ui): fix css files 2022-05-19 15:11:17 +02:00
sylenien
bb4aafa1df fix(ui): code rvw 2022-05-19 15:11:17 +02:00
sylenien
d9a01b3380 feat(ui): move create metric button to the grid 2022-05-19 15:11:17 +02:00
sylenien
69002865d6 fix(ui): remove unnecessary code 2022-05-19 15:11:17 +02:00
sylenien
cde2a6e2d5 fix(ui): fix metric category max height calculation 2022-05-19 15:11:17 +02:00
sylenien
eaf162c5f8 fix(ui): minor metric hover styles fixes 2022-05-19 15:11:17 +02:00
sylenien
e8f7e2e9be feat(ui): make edit metric title hoverable and clickable, create plain text button for future usage 2022-05-19 15:11:17 +02:00
Taha Yassine Kraiem
6df7bbe7d1 feat(api): fixed changed SearchSession payload schema 2022-05-18 20:02:09 +02:00
Taha Yassine Kraiem
4a55d93f52 feat(api): changed SearchSession payload schema 2022-05-18 19:43:18 +02:00
Taha Yassine Kraiem
2544a3e166 feat(api): centralized 'order'
feat(api): transform 'order' casing
2022-05-18 19:08:08 +02:00
ShiKhu
babe654329 Merge branch 'assist-fixes' into dev 2022-05-18 17:55:25 +02:00
ShiKhu
84b99616bd chore(tracker-assist): fix package number string 2022-05-18 17:43:31 +02:00
ShiKhu
8b0ad960e9 Merge branch 'assist-fixes' of github.com:openreplay/openreplay into assist-fixes 2022-05-18 17:29:26 +02:00
ShiKhu
613bed393a fix(player): take into account first message time 2022-05-18 17:29:17 +02:00
Shekar Siri
dce918972f change(ui) - enable annotation on call or remote 2022-05-18 17:27:11 +02:00
ShiKhu
9294748352 fix(frontend-assist): toggleAnnotation incapsulate + fix inverse booleans 2022-05-18 17:17:11 +02:00
ShiKhu
f8bbc16208 fix(frontend-player):apply set_input_value on blure if focused (for the case of remote controle) 2022-05-18 16:49:36 +02:00
ShiKhu
b283b89bd2 feat(tracker-assist): annotation available on RemoteControl as well 2022-05-18 16:01:18 +02:00
Shekar Siri
437341257c change(ui) - enable annotation without call 2022-05-18 15:49:58 +02:00
Alex Kaminskii
1f80cb4e64 Merge branch 'small-player-refactoring' into dev 2022-05-18 15:25:34 +02:00
Alex Kaminskii
bd6dba4781 fix(tracker-assisst): ConfirmWindow: override default button style & separate defaults 2022-05-18 14:50:56 +02:00
rjshrjndrn
bda652ccab fix(helm): service name
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-05-18 14:05:08 +02:00
Alex Kaminskii
4c8751944c style(tracker-*): do not store lock files under the npm puckage dirs 2022-05-18 13:57:38 +02:00
Alexander Zavorotynskiy
a9071b68f2 chore(bash): added heuristics service to local_build.sh 2022-05-18 13:30:21 +02:00
Alexander Zavorotynskiy
8d0d05c2cf fix(backend/heuristics): fixed panic in performanceAggr message encoding 2022-05-18 13:28:00 +02:00
Shekar Siri
ab2a800b7c merged vault (from main) and resolved conflicts 2022-05-18 12:52:26 +02:00
Shekar Siri
9ea1992b34 merged vault (from main) and resolved conflicts 2022-05-18 12:51:26 +02:00
rjshrjndrn
336046a443 chore(helm): common naming convention 2022-05-18 12:39:13 +02:00
Rajesh Rajendran
5041bcb177
GH aciton with new format (#479)
* chore(actions): update GH Actions to new deployment format

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* fix(actions): yaml indentation

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* fix(actions): image override

helm doesn't support multipart yaml files.

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* chore(action): enable docker image cache

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* chore(actions): chalice deployment

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* chore(actions): check previous image prior deploying

Because we're using an umbrella chart and not storing the image tags
which is deployed from actions anywhere, a new deployment will reset all
older deployed image tags. For that we've to fetch the existing image
tags and feed it to the current deployment.

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* fix(actions): static path the build input

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* chore(actions): adding dev branch to chalice deployment

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-05-17 20:07:02 +00:00
sylenien
631f427f67 fix(ui): fix typo 2022-05-17 18:20:34 +02:00
sylenien
fcd79a6fb7 fix(ui): fix weird scrolling 2022-05-17 18:20:34 +02:00
sylenien
ff02248900 fix(ui): remove additional divider line, fix zindex for menu 2022-05-17 18:20:34 +02:00
sylenien
8e58e68607 fix(ui): fix descr position, fix card click, rm unneeded code 2022-05-17 17:57:03 +02:00
sylenien
07d2c0427d feat(ui): add hovers to metric widgets for dashboard and template comps 2022-05-17 17:57:03 +02:00
sylenien
c1af05fbbe fix(ui): fix metrics table width, fix reload pathing 2022-05-17 17:57:03 +02:00
sylenien
25f792edc2 fix(ui): fix dashboard pinning and state updating; fix menu items naming 2022-05-17 17:57:03 +02:00
sylenien
9960927ca0 fix(ui): fix show more button for metric adding 2022-05-17 17:57:03 +02:00
sylenien
14ef2cba26 fix(ui): fix tooltip behavior on a metric widget 2022-05-17 17:57:03 +02:00
sylenien
30add0fd3c fix(ui): rm consolelog 2022-05-17 17:57:03 +02:00
sylenien
749093d9f6 fix(ui): fix routing in dashboards 2022-05-17 17:57:03 +02:00
rjshrjndrn
d7037771ed chore(helmcharts): adding heuristics service
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-05-17 12:15:52 +02:00
sylenien
0617e8b485 fix(ui): fix icons generation script to properly trim svg attrs 2022-05-17 11:19:42 +02:00
sylenien
536bacad64 fix(ui): rm conflicting code 2022-05-17 11:19:42 +02:00
sylenien
a3aecae559 fix(ui): fix text on widget updates, remove back link on metrics page and add brdcmbs 2022-05-17 11:19:42 +02:00
sylenien
33ff7914be fix(ui): remove state updates on unmounted components 2022-05-17 11:19:42 +02:00
sylenien
cba53fa284 fix(ui): fix comments in iconsjs 2022-05-17 11:19:42 +02:00
sylenien
a2c999ccef fix(ui): fix weird wording, bug with svg 2022-05-17 11:19:42 +02:00
sylenien
fec8b9e13c fix(ui): fix clipping bg on hover, fix side menu header 2022-05-17 11:19:42 +02:00
sylenien
8a29f8ecf4 fix(ui): wording, keys warnings 2022-05-17 11:19:42 +02:00
sylenien
bb33ea4714 fix(ui): lettering fixes, move create dashboard to sidebar title 2022-05-17 11:19:42 +02:00
sylenien
5c7f6c1738 fix(ui): fix messages for empty dashboad 2022-05-17 11:19:42 +02:00
rjshrjndrn
f66e780596 chore(ingress): changing proxy body size to 10m
else nginx will reject the change, and AWS will report as CORS issue.

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-05-16 21:14:14 +02:00
Alex Kaminskii
8ff0249814 few files to ts 2022-05-16 20:25:15 +02:00
Alex Kaminskii
d495f1aa97 style(player): few renamings 2022-05-16 20:02:28 +02:00
Alex Kaminskii
7929a8ceca refactor(player): move lists to separate file + renaming 2022-05-16 19:55:45 +02:00
Shekar Siri
82ad650f0c feat(ui) - sessions - widget 2022-05-16 19:11:53 +02:00
Alexander Zavorotynskiy
94c56205b9 fix(backend): added error log in kafka producer 2022-05-16 18:56:43 +02:00
Taha Yassine Kraiem
f054b130bf feat(DB): changed metrics category from Overview to Monitoring Essentials 2022-05-16 18:24:16 +02:00
Shekar Siri
acdd3596bc fix(ui) - assist reload remove click event params 2022-05-16 17:05:23 +02:00
Shekar Siri
f1d94c5378 feat(ui) - errors - widget 2022-05-16 17:04:10 +02:00
Shekar Siri
baa6c916dc feat(ui) - funnels - fitler dropdowns to select 2022-05-16 16:26:16 +02:00
Alex Kaminskii
76d9d41ed8 refactor(backend/storage): pass FileSplitSize as env var 2022-05-16 15:31:37 +02:00
Alex Kaminskii
7d7dcc2910 chore (backend): Dockerfile.bundle update 2022-05-16 15:28:56 +02:00
rjshrjndrn
3b704b9430 fix(helm): nginx forward L7 headers from LB
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-05-16 15:02:59 +02:00
Alexander Zavorotynskiy
f681e85e50 fix(backend): removed temp Dockerfile from cmd dir 2022-05-16 15:01:12 +02:00
sylenien
90299d9d6d fix(ui): rm consolelog 2022-05-16 14:53:40 +02:00
sylenien
09056c103c feat(ui): moved saved search list to new modal component 2022-05-16 14:53:40 +02:00
sylenien
69b75f5b56 fix(ui): various small ui fixes for buttons 2022-05-16 14:53:40 +02:00
sylenien
e5842939db feat(ui): added success notif for settings updates 2022-05-16 14:53:40 +02:00
sylenien
387e946dfe fix(ui): removed popup from country flag component; added bg to toggler head 2022-05-16 14:53:40 +02:00
sylenien
e1ae8bae20 fix(ui): removed popup from country flag component 2022-05-16 14:53:40 +02:00
sylenien
ac7a70ea62 fix(ui): fixed search bar to properly include sections and filters 2022-05-16 14:53:40 +02:00
Alexander Zavorotynskiy
0028de2d11 fix(backend): removed service dir from Dockerfile 2022-05-16 14:50:32 +02:00
Alex K
22606aca62
Merge pull request #475 from openreplay/integrations_refactoring
Integrations to golang standart filestructure
2022-05-16 14:48:00 +02:00
Alex Kaminskii
e26ce2e963 fix(backend-ee/clickhouse): do not insert metod & status into resources as they are always unknown 2022-05-16 14:41:44 +02:00
Alexander Zavorotynskiy
3511534cbb feat(backend/integrations): service refactoring 2022-05-16 14:41:12 +02:00
Shekar Siri
97da3f5c1c Merge branch 'dev' of github.com:openreplay/openreplay into funnels 2022-05-16 14:38:00 +02:00
Shekar Siri
ebbc9cc984 fix(ui) - alert form footer bg 2022-05-16 14:18:52 +02:00
Alex K
d996b14ff8
Merge pull request #474 from openreplay/assets_refactoring
* Assets to golang standart filestructure
2022-05-16 14:18:04 +02:00
Alexander Zavorotynskiy
3449440de3 feat(backend/assets): service refactoring 2022-05-16 14:12:37 +02:00
Shekar Siri
d36d4862cf fix(ui) - chart y axis numbers 2022-05-16 14:12:16 +02:00
Alexander
356bf32bfc
Merge pull request #473 from openreplay/storage_refactoring
Storage refactoring
2022-05-16 12:56:22 +02:00
Alexander Zavorotynskiy
24f64af95a feat(backend/storage): service refactoring 2022-05-16 12:52:43 +02:00
rjshrjndrn
4175d98be8 chore(helmcharts): adding clickhouse operator helm chart
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-05-16 12:52:43 +02:00
rjshrjndrn
c94f4074bb chore(helm): make ingress-nginx installation not mandatory.
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-05-16 10:48:35 +02:00
Taha Yassine Kraiem
c84d39d38e feat(api): upgraded python base image
feat(alerts): upgraded python base image
2022-05-13 19:15:31 +02:00
Shekar Siri
05bd61b83c feat(ui) - funnels - issues sort 2022-05-13 19:03:01 +02:00
Alexander
a69f3f0e83
Merge pull request #459 from openreplay/ender_refactoring
Ender refactoring
2022-05-13 17:32:14 +02:00
Alexander Zavorotynskiy
44dae11886 feat(backend/db): fixed ee version 2022-05-13 17:00:09 +02:00
Shekar Siri
3baa3ea9a5 Merge branch 'dev' of github.com:openreplay/openreplay into funnels 2022-05-13 16:05:30 +02:00
Shekar Siri
f6bd3dd0dd feat(ui) - funnels - details wip 2022-05-13 16:05:11 +02:00
sylenien
58397e6c6c fix(ui): remove attrs from icons 2022-05-13 15:56:26 +02:00
sylenien
d72f47b296 fix(ui): fix prop types for sessionitem 2022-05-13 15:56:26 +02:00
sylenien
cea1218613 fix(ui): fix typo in comment 2022-05-13 15:56:26 +02:00
sylenien
e7a31dbb8c fix(ui): refactor sessionitem 2022-05-13 15:56:26 +02:00
sylenien
19178807f8 fix(ui): fixed sessionitem types and removed withrouter connection 2022-05-13 15:56:26 +02:00
sylenien
be13ff5f7a fix(ui): fixed sessionitem and timezone dropdown connection to mobx 2022-05-13 15:56:26 +02:00
sylenien
0d00cf0349 more search field fixes 2022-05-13 15:56:26 +02:00
sylenien
64ebd07e57 added toggler disabled colors, visibility default values, no items warning text to search field 2022-05-13 15:56:26 +02:00
sylenien
1529510d25 removed browser autocomplete from filter inputs, removed timezone picker from main page 2022-05-13 15:56:26 +02:00
sylenien
1f0fb80024 fix category and filters naming, add underline to username hover, fix small bugs 2022-05-13 15:56:26 +02:00
sylenien
7005c046b8 fix ui bugs in session tab 2022-05-13 15:56:26 +02:00
Taha Yassine Kraiem
839f4c0927 feat(api): fixed CH client format 2022-05-13 15:49:17 +02:00
Shekar Siri
fd68f7b576 feat(ui) - funnels - path changes 2022-05-13 13:07:35 +02:00
Shekar Siri
87f42b4a79 feat(ui) - funnels - sub details view 2022-05-13 12:35:55 +02:00
Shekar Siri
95f0649ccb Merge branch 'dev' of github.com:openreplay/openreplay into funnels 2022-05-13 11:27:53 +02:00
Shekar Siri
923fce97fb change(ui) - validation based on ee 2022-05-13 11:26:36 +02:00
Shekar Siri
8c7cbbb189 Merge branch 'dev' of github.com:openreplay/openreplay into funnels 2022-05-13 11:22:53 +02:00
Shekar Siri
34947d8ef7 change(ui) - validation based on ee 2022-05-13 11:19:23 +02:00
Shekar Siri
a88763d0eb feat(ui) - funnels - issues list 2022-05-13 11:13:55 +02:00
Alexander
4ac3da241e
Merge branch 'dev' into ender_refactoring 2022-05-12 17:16:45 +02:00
Taha Yassine Kraiem
ac4e32aba3 feat(DB): changed partition expression 2022-05-12 16:24:58 +02:00
Shekar Siri
6a1e72e1d5 feat(ui) - funnels - issues list 2022-05-12 15:15:56 +02:00
Alex K
4f1a686787
Merge pull request #453 from openreplay/sink_refactor
Sink refactor

* structure -> go standarts
* move URLrewrite to sink (free http from encoding-decoding)
2022-05-12 15:03:32 +02:00
Shekar Siri
8584cf74cb feat(ui) - funnels - tailwind config 2022-05-12 14:32:04 +02:00
Shekar Siri
f40403f4e9 feat(ui) - funnels - issues filters 2022-05-12 14:31:44 +02:00
Shekar Siri
8e1bb95c84 feat(ui) - funnels - issues filters 2022-05-12 12:55:34 +02:00
Alexander Zavorotynskiy
ae6af1449c feat(backend-db/heuristics): fixed errors in main files 2022-05-12 09:59:09 +02:00
ShiKhu
883f7eab8a fix(tracker-assist):3.5.9: enforce peerjs@1.3.2 2022-05-11 23:53:19 +02:00
Alex Kaminskii
88bec7ab60 refactor(): separate ieBuilder, peBuilder & networkIssueDeterctor from EventMapper 2022-05-11 21:27:18 +02:00
Alex Kaminskii
6d2bfc0e77 fix(backend/internals): builder codefix 2022-05-11 21:25:41 +02:00
Alex Kaminskii
85b87e17df refactor(backend/internals): builder: message order & timestamps check 2022-05-11 21:14:23 +02:00
Alex Kaminskii
a6f8857b89 refactor-fix(backend-heuristics/db): create handlers for each session separately 2022-05-11 19:04:14 +02:00
Alex Kaminskii
e65fa58ab5 refactor(backend-internal): dry builder 2022-05-11 18:51:55 +02:00
Alex Kaminskii
17d477fc43 fix+style(tracker):3.5.11 fix build & files structure 2022-05-11 18:27:18 +02:00
Alex Kaminskii
396f1a16af refactor(backend-sink): producer close timeout value to config 2022-05-11 17:36:35 +02:00
Shekar Siri
a8fbf50a49 feat(ui) - funnels - issues sort 2022-05-11 17:12:33 +02:00
Alexander Zavorotynskiy
c77966a789 feat(backend/handlers): removed unix timestamp from header builders 2022-05-11 16:45:31 +02:00
Alex Kaminskii
ebc0185806 style(backend-http): split core and local imports 2022-05-11 16:37:49 +02:00
Alex Kaminskii
6456520587 style(backend-http): use UnixMilli 2022-05-11 16:36:31 +02:00
Alex Kaminskii
a241830e71 refactor(backend-sink/http): move URLrewriter to sink 2022-05-11 16:32:27 +02:00
Alex Kaminskii
ea2d13dac6 chore(backend-sink): sink in cmd 2022-05-11 16:27:01 +02:00
Shekar Siri
467e99d90d merge dev changes 2022-05-11 16:16:59 +02:00
Shekar Siri
f5d154bfc2 npm updates 2022-05-11 16:13:26 +02:00
Shekar Siri
bec68eb375 feat(ui) - funnels - issues 2022-05-11 16:13:01 +02:00
Shekar Siri
34425b8b02 feat(ui) - funnels - check for table and funnel 2022-05-10 19:25:08 +02:00
Shekar Siri
9ecb4c369e feat(ui) - funnels - step percentage dynamic 2022-05-10 18:03:19 +02:00
Shekar Siri
0174e265e0 feat(ui) - funnels - step percentage 2022-05-10 17:50:50 +02:00
Shekar Siri
d619083a85 feat(ui) - funnels - step toggle 2022-05-10 17:37:27 +02:00
Shekar Siri
3bb5d9fabd feat(ui) - funnels - graph 2022-05-10 17:17:15 +02:00
Taha Yassine Kraiem
efec096ffe feat(api): fixed sourcemaps reader endpoint 2022-05-10 17:13:19 +02:00
Shekar Siri
5f64bc90dc
Merge pull request #452 from openreplay/audit
Audit Trails
2022-05-10 17:08:21 +02:00
Alexander Zavorotynskiy
26e23d594f feat(backend/handlers): refactored web and ios message handlers 2022-05-10 15:40:55 +02:00
Alexander Zavorotynskiy
47007eb9d7 feat(backend/db): prepared db service for refactoring 2022-05-10 14:11:41 +02:00
Shekar Siri
89db14bdbf feat(ui) - funnels - merged dev 2022-05-10 12:10:18 +02:00
Shekar Siri
eae31eac37 feat(ui) - audit - date 2022-05-09 19:34:59 +02:00
Shekar Siri
5b627c17ec feat(ui) - audit - daterange with new component 2022-05-09 19:02:07 +02:00
Alexander Zavorotynskiy
ca9d76624b feat(backend/heuristics): message handlers refactoring 2022-05-09 16:51:10 +02:00
Taha Yassine Kraiem
d3be02fd9d feat(api): user trail limit changed 2022-05-09 15:30:28 +02:00
Alex Kaminskii
ae4c6e5cad refactor(backend-sink): go go standarts 2022-05-07 23:52:48 +02:00
Alex Kaminskii
324ee0890e chore(backend): enforce amd64 build (for build on amr mac) 2022-05-07 23:21:30 +02:00
Alex Kaminskii
71d50e5a44 refactor(backend-messages):predefined TypeID() on message type 2022-05-07 23:19:49 +02:00
Alex Kaminskii
e4d45e88f9 chore(backend): name entrypoint container 2022-05-07 23:00:00 +02:00
Alex Kaminskii
6ab6d342c0 chore(backend-heuristics/db): remove redundant 2022-05-07 22:16:15 +02:00
Alex Kaminskii
62b36bd70a refactor(backend-heuristics): bring all sub-bilders to common interface 2022-05-07 21:29:40 +02:00
Alex Kaminskii
432c0da4e2 chore(backend-heuristics): Remove redundant lines 2022-05-07 15:10:46 +02:00
Shekar Siri
b97c32ad56 feat(ui) - audit - filters 2022-05-06 18:54:25 +02:00
Taha Yassine Kraiem
7625eb9f8c feat(alerts): changed Dockerfile.alerts 2022-05-06 18:36:46 +02:00
Taha Yassine Kraiem
202bf73456 feat(api): vault support 2022-05-06 18:30:59 +02:00
Taha Yassine Kraiem
516e5b0446 feat(api): changed search user trails by username 2022-05-06 17:43:55 +02:00
Shekar Siri
7feaa376e6 feat(ui) - audit - list and search 2022-05-06 17:31:35 +02:00
Taha Yassine Kraiem
d8078c220d feat(api): search user trails by username
feat(db): index to search user trails by username
2022-05-06 17:27:43 +02:00
Alexander Zavorotynskiy
8c432b8ba3 Removed from heuristics extra logic 2022-05-06 16:39:29 +02:00
Alexander Zavorotynskiy
967034a89c Create first version of heuristics service with the same logic as old ender 2022-05-06 16:12:06 +02:00
Taha Yassine Kraiem
ec445f88c7 feat(api): EE updated authorizer 2022-05-06 15:09:50 +02:00
Alexander Zavorotynskiy
2b3728d8da Finished refactoring for session ender service 2022-05-06 12:21:43 +02:00
Taha Yassine Kraiem
0c84c89b4f feat(api): changed Dockerfile 2022-05-06 12:16:07 +02:00
Taha Yassine Kraiem
50b476316a feat(api): changed root path 2022-05-06 12:11:38 +02:00
Taha Yassine Kraiem
ac9c10393f feat(api): fixed return createdAt with the list of users 2022-05-06 12:07:03 +02:00
Shekar Siri
f12931491a feat(ui) - audit - base views 2022-05-06 12:06:55 +02:00
Taha Yassine Kraiem
ef0edebb3d feat(DB): traces/trails index
feat(api): get all possible traces/trails actions
feat(api): search traces/trails by actions
feat(api): search traces/trails by user
2022-05-06 11:56:03 +02:00
Alex Kaminskii
a99f684b83 feat(frontend-player): sequential (pre)load for multifile sessions 2022-05-06 00:10:08 +02:00
Alex Kaminskii
2d96705930 readme(tracker): build-readme for js packages 2022-05-06 00:07:07 +02:00
Taha Yassine Kraiem
21d8d28a79 feat(api): return createdAt with the list of users 2022-05-05 20:42:08 +02:00
Taha Yassine Kraiem
acaef59590 feat(DB): traces/trails index
feat(api): get all traces/trails
2022-05-05 20:37:37 +02:00
Taha Yassine Kraiem
172508dcf3 feat(DB): changed sessions_metadata sort expression 2022-05-05 18:21:47 +02:00
Alexander Zavorotynskiy
f4212d6eaa Split ender into 2 services (ender and heuristics) 2022-05-05 17:37:05 +02:00
Shekar Siri
bd07d42084 Merge branch 'user-list' into dev 2022-05-05 17:07:36 +02:00
Shekar Siri
b77771ccca change(ui) - user list checking for enterprise 2022-05-05 17:07:16 +02:00
Shekar Siri
17aec98298
Merge pull request #447 from openreplay/user-list
UI Improvements - User, Projects
2022-05-05 16:32:20 +02:00
Shekar Siri
bb1afdc76e fix(ui) - errors viewed state 2022-05-05 16:29:55 +02:00
Alexander Zavorotynskiy
700ef0dcc6 Made standart project layout for ender service 2022-05-05 15:26:10 +02:00
rjshrjndrn
b843aba08a chore(init): create direcotry if not exist
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-05-05 15:03:31 +02:00
Shekar Siri
59fe8245dd change(ui) - user list tooltip 2022-05-05 14:41:10 +02:00
Shekar Siri
c4b371507d change(ui) - project delete moved to modal 2022-05-05 14:31:53 +02:00
Shekar Siri
c3bb5aeb07 change(ui) - sites search 2022-05-05 13:27:06 +02:00
Shekar Siri
55b64128f1 change(ui) - sites checking for exists 2022-05-05 13:16:06 +02:00
Shekar Siri
dfce25709a change(ui) - user limit check and other fixes 2022-05-05 13:11:20 +02:00
Alex K
50bbd0fe98
Merge pull request #445 from openreplay/db_refactoring
Db refactoring
2022-05-05 12:50:40 +02:00
Alex Kaminskii
b6d57b45ab chore(github-workflow): backend 2022-05-05 12:49:44 +02:00
Alexander Zavorotynskiy
88306e1a6a fix (backend): removed unused import in storage module 2022-05-05 12:04:23 +02:00
Alexander Zavorotynskiy
74756b2409 Refactoring of the db service 2022-05-05 10:46:48 +02:00
Alexander Zavorotynskiy
c050394116 Moved service configs to config module 2022-05-05 10:23:36 +02:00
Shekar Siri
918f7e9d86 change(ui) - user delete 2022-05-05 10:09:16 +02:00
Alexander Zavorotynskiy
167d1e117e Made correct project layout 2022-05-05 09:45:38 +02:00
Alex Kaminskii
6314fcbbef feat(backend): 2 files back compatible format 2022-05-04 20:33:52 +02:00
Shekar Siri
330992736d change(ui) - user form role filter 2022-05-04 19:35:04 +02:00
Shekar Siri
7e655d513c change(ui) - userlist form 2022-05-04 18:53:43 +02:00
Shekar Siri
5ef382c9b8 Merge branch 'dev' of github.com:openreplay/openreplay into user-list 2022-05-04 16:42:45 +02:00
Shekar Siri
c15648eaf7 change(ui) - tailwind justify-self 2022-05-04 16:41:44 +02:00
Shekar Siri
c97fe55cda change(ui) - users list - form 2022-05-04 16:41:29 +02:00
Alexander Zavorotynskiy
5b7c479f4d Refactoring in stats logger 2022-05-04 16:17:57 +02:00
Taha Yassine Kraiem
42f3b6d018 feat(api): changed Dockerfile 2022-05-04 14:50:09 +02:00
Taha Yassine Kraiem
8d5cf84d90 feat(api): changed Dockerfile 2022-05-04 14:36:52 +02:00
Alexander Zavorotynskiy
74672d4321 Removed unused code 2022-05-04 14:36:42 +02:00
Taha Yassine Kraiem
47be240dfb feat(api): changed Dockerfile 2022-05-04 14:32:17 +02:00
Alexander Zavorotynskiy
9cdb1e8ab7 Removed global pg connection 2022-05-04 14:21:15 +02:00
Taha Yassine Kraiem
36b466665c feat(api): changed replay file URL 2022-05-04 13:14:25 +02:00
Shekar Siri
424b071eaf change(ui) - users list - search and pagination 2022-05-04 13:14:20 +02:00
Taha Yassine Kraiem
f90a25c75a feat(api): EE updated dependencies 2022-05-04 13:10:48 +02:00
Taha Yassine Kraiem
144e58adef feat(api): updated dependencies 2022-05-04 13:00:40 +02:00
Shekar Siri
7d08e32d25 change(ui) - users list 2022-05-04 12:27:44 +02:00
Alexander Zavorotynskiy
a4278aec23 [http] removed extra log in main.go
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-05-04 12:17:33 +02:00
rjshrjndrn
767fa31026 chore(actions): include cmd dir for build
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-05-04 12:09:46 +02:00
rjshrjndrn
b72a332cd0 chore(build): returning from function
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-05-04 11:38:23 +02:00
Alex Kaminskii
82084c9717 fix (backend): build.sh build_service incapsulate 2022-05-04 11:23:38 +02:00
Alexander
15563ca582
Merge pull request #442 from openreplay/http_refactoring
Http service refactoring
2022-05-04 10:10:07 +02:00
rjshrjndrn
42e6a63e44 docs(vagrant): create user account comment
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-05-03 21:43:46 +02:00
Alexander Zavorotynskiy
414fbee962 Fixed build.sh file 2022-05-03 13:55:56 +02:00
Shekar Siri
0bbd27e856
Merge pull request #441 from openreplay/session-settings
Session settings
2022-05-03 12:50:54 +02:00
Shekar Siri
690577407d feat(ui) - session settings - cleanup 2022-05-03 12:34:58 +02:00
Alexander Zavorotynskiy
b2456e9ac6 Removed debug lines from build.sh 2022-05-03 12:33:43 +02:00
Shekar Siri
18e932e5e9 feat(ui) - session settings - capture rate api update 2022-05-03 12:26:42 +02:00
Alexander Zavorotynskiy
18d18164b3 Added temporary hack for http service building 2022-05-03 10:42:24 +02:00
Alexander Zavorotynskiy
d02ecba354 Added missed return statements 2022-05-02 17:38:53 +02:00
Alexander Zavorotynskiy
5ec46ad753 Moved assets cache logic 2022-05-02 17:36:33 +02:00
Shekar Siri
87f76f484d feat(ui) - session settings - changed state 2022-05-02 16:31:19 +02:00
Shekar Siri
d2f168f667 remote pull dev 2022-05-02 16:27:53 +02:00
Shekar Siri
02c39199d2 feat(ui) - session settings - changed state 2022-05-02 16:26:05 +02:00
Shekar Siri
e421511db8 feat(ui) - session settings - libs 2022-05-02 16:07:12 +02:00
Shekar Siri
a1b656dc6a feat(ui) - session settings - ui and state 2022-05-02 16:07:00 +02:00
Alexander Zavorotynskiy
69cabaecfe Moved the rest of the code to separate dirs 2022-05-02 15:28:51 +02:00
Alexander Zavorotynskiy
df722761e5 Moved server to a separate dir 2022-05-02 15:20:10 +02:00
Alexander Zavorotynskiy
c347198fc1 Moved http handlers to a separate dir 2022-05-02 15:05:45 +02:00
Alexander Zavorotynskiy
f01ef3ea03 Made a correct project structure for http service 2022-05-02 14:47:13 +02:00
Alexander Zavorotynskiy
66e190221d Removed global objects (moved service initialization into serviceBuilder) 2022-05-02 14:36:02 +02:00
Taha Yassine Kraiem
b87e601f27 chore(vagrant): Changed development.md
chore(vagrant): Added dev setup-scripts for EE
2022-05-02 11:33:39 +02:00
Rajesh Rajendran
867f92dfc7 Update development.md 2022-04-30 18:07:45 +02:00
Taha Yassine Kraiem
6807dc8ce1 feat(api): EE optimized get error details 2022-04-29 18:52:29 +02:00
Alexander Zavorotynskiy
b0bb5bd922 Moved configuration to the separate file 2022-04-29 17:23:20 +02:00
Alexander Zavorotynskiy
10edeb6e2d Refactoring of http handlers 2022-04-29 16:53:28 +02:00
Shekar Siri
27641279b4
Update dashboard.ts 2022-04-29 16:10:03 +02:00
Taha Yassine Kraiem
423f416015 feat(api): fixed description optional value 2022-04-29 16:08:38 +02:00
Shekar Siri
4f1a476c65
Update dashboard.ts 2022-04-29 16:02:14 +02:00
Shekar Siri
6a855a947c
Merge pull request #435 from openreplay/reporting
Dashboard - Report Generation
2022-04-29 15:36:06 +02:00
Shekar Siri
8986f395b1 feat(ui) - dashboard - new libs 2022-04-29 14:27:23 +02:00
Taha Yassine Kraiem
84a43bcd8b feat(api): fixed description default value 2022-04-29 14:16:36 +02:00
Shekar Siri
7c2539ec93 feat(ui) - dashboard - report 2022-04-29 14:16:29 +02:00
Taha Yassine Kraiem
fff8f75fd0 feat(api): changed Dockerfile 2022-04-29 14:06:06 +02:00
Taha Yassine Kraiem
63e897594f feat(db): EE fixed widget-size for upgrade 2022-04-29 14:06:06 +02:00
rjshrjndrn
31f9e49673 chore(vagrant): Adding development readme
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-04-29 14:06:06 +02:00
ShiKhu
6412c2a862 fix(backend/storage): codefix 2022-04-29 14:06:06 +02:00
ShiKhu
1e5deed0d5 feat(backend/storage):split files into 2 2022-04-29 14:06:06 +02:00
Alexander Zavorotynskiy
0bbf8012f1 fix(backend): added missed return in error case 2022-04-29 14:06:06 +02:00
Alexander Zavorotynskiy
9856e36f44 fix(backend): fixed possible panic in the defer 2022-04-29 14:06:06 +02:00
ShiKhu
d699341676 fix(backend): Dockerfile.bundle fix 2022-04-29 14:06:06 +02:00
ShiKhu
fbb039f0c7 fix(backend):pprof launch addr: use port only 2022-04-29 14:06:06 +02:00
ShiKhu
1b93f8a453 gofmt 2022-04-29 14:06:06 +02:00
rjshrjndrn
bdb6a75d7c fix(nginx): proper x-forward-for proxying
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-04-29 14:06:06 +02:00
Rajesh Rajendran
4f44edeb39 Vagrant for local contribution (#434)
* chore(vagrant): initial vagrantfile
* chore(vagrant): adding instructions after installation
* chore(vagrant): Adding vagrant user to docker group
* chore(vagrant): use local docker daemon for k3s
* chore(vagrant): fix comment
* chore(vagrant): adding hostname in /etc/hosts
* chore(vagrant): fix doc
* chore(vagrant): limiting cpu
* chore(frontend): initialize dev env
* chore(docker): adding dockerignore
* chore(dockerfile): using cache for fasten build
* chore(dockerignore): update
* chore(docker): build optimizations
* chore(build): all components build option
* chore(build): utilities build fix
* chore(scrpt): remove debug message
* chore(vagrant): provision using stable branch always

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-04-29 14:06:06 +02:00
Taha Yassine Kraiem
8fa4632ee4 feat(alerts): changed build script 2022-04-29 14:06:06 +02:00
Shekar Siri
59f51cde26 feat(ui) - dashboard - report 2022-04-29 13:56:20 +02:00
Taha Yassine Kraiem
35b9d6ebaf feat(api): s3 helper detect environment
feat(api): support description for dashboards
2022-04-29 13:40:57 +02:00
Shekar Siri
a87717ba8c feat(ui) - dashboard - report 2022-04-29 13:37:30 +02:00
Taha Yassine Kraiem
122705b4c7 feat(db): EE fixed widget-size for upgrade 2022-04-29 13:19:11 +02:00
Shekar Siri
878c742c2f feat(ui) - dashboard - report 2022-04-29 12:32:34 +02:00
rjshrjndrn
89ba052d41 chore(vagrant): Adding development readme
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-04-29 12:17:01 +02:00
Alexander Zavorotynskiy
dc69131499 Deleted commented (unused) code 2022-04-29 11:22:00 +02:00
Shekar Siri
b096ac73d1 feat(ui) - dashboard - report 2022-04-29 10:02:56 +02:00
ShiKhu
cb01c3cb28 fix(backend/storage): codefix 2022-04-28 19:21:45 +02:00
ShiKhu
6d4800feea feat(backend/storage):split files into 2 2022-04-28 19:14:23 +02:00
Alexander Zavorotynskiy
de3ba9c7f6 fix(backend): added missed return in error case 2022-04-28 18:02:56 +02:00
Alexander Zavorotynskiy
3132db6205 fix(backend): fixed possible panic in the defer 2022-04-28 17:55:56 +02:00
ShiKhu
c2d1bcdb35 Merge branch 'backend' into dev 2022-04-28 17:03:25 +02:00
ShiKhu
60d0d42d69 fix(backend): Dockerfile.bundle fix 2022-04-28 17:02:53 +02:00
ShiKhu
d64cd12eb6 fix(backend):pprof launch addr: use port only 2022-04-28 17:02:13 +02:00
Taha Yassine Kraiem
1a73b978dc feat(db): EE remove pages_count column 2022-04-28 15:29:45 +02:00
Taha Yassine Kraiem
b8367d87f8 feat(api): EE fixed No of pages count widget 2022-04-28 14:59:22 +02:00
Taha Yassine Kraiem
aef7026034 feat(api): EE fixed No of pages count widget 2022-04-28 14:59:05 +02:00
Taha Yassine Kraiem
51c75657ab feat(api): EE fixed No of pages count widget 2022-04-28 14:08:23 +02:00
Taha Yassine Kraiem
f8f70b1006 feat(api): EE fixed No of pages count widget 2022-04-28 14:07:28 +02:00
rjshrjndrn
94adb69f6b fix(nginx): proper x-forward-for proxying
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-04-27 15:00:54 +02:00
Rajesh Rajendran
f3b6bda163
Vagrant for local contribution (#434)
* chore(vagrant): initial vagrantfile
* chore(vagrant): adding instructions after installation
* chore(vagrant): Adding vagrant user to docker group
* chore(vagrant): use local docker daemon for k3s
* chore(vagrant): fix comment
* chore(vagrant): adding hostname in /etc/hosts
* chore(vagrant): fix doc
* chore(vagrant): limiting cpu
* chore(frontend): initialize dev env
* chore(docker): adding dockerignore
* chore(dockerfile): using cache for fasten build
* chore(dockerignore): update
* chore(docker): build optimizations
* chore(build): all components build option
* chore(build): utilities build fix
* chore(scrpt): remove debug message
* chore(vagrant): provision using stable branch always

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2022-04-27 12:54:40 +00:00
Taha Yassine Kraiem
72bee8e894 feat(api): round time metrics 2022-04-26 18:10:25 +02:00
Taha Yassine Kraiem
55b504cc22 feat(alerts): changed build script 2022-04-26 16:30:48 +02:00
Taha Yassine Kraiem
f57bf7205c feat(assist): EE fixed geoip-unknown ip 2022-04-26 12:47:18 +02:00
Taha Yassine Kraiem
1832567beb feat(assist): fixed geoip-unknown ip 2022-04-26 12:44:07 +02:00
ShiKhu
43669c082c gofmt 2022-04-25 23:09:52 +02:00
Shekar Siri
53ac4c3321 Merge branch 'dev' of github.com:openreplay/openreplay into funnels 2022-04-25 12:07:19 +02:00
Shekar Siri
fb44ff70fe feat(ui) - funnels wip 2022-04-22 19:07:01 +02:00
Shekar Siri
eeebe11915 Merge branch 'dev' of github.com:openreplay/openreplay into funnels 2022-04-22 16:10:44 +02:00
Shekar Siri
4907c1b26c feat(ui) - funnels listing 2022-04-22 14:47:38 +02:00
Shekar Siri
a287a9ca47 Merge branch 'dev' of github.com:openreplay/openreplay into funnels 2022-04-22 12:47:03 +02:00
Shekar Siri
3882128d4a feat(ui) - funnels - wip 2022-04-21 16:52:01 +02:00
Shekar Siri
45e39c8749 feat(ui) - funnels - wip 2022-04-20 18:05:10 +02:00
1648 changed files with 43369 additions and 71524 deletions

View file

@ -1,10 +1,12 @@
# This action will push the chalice changes to aws
on:
workflow_dispatch:
push:
branches:
- dev
paths:
- ee/api/**
- api/**
name: Build and Deploy Chalice EE
@ -31,27 +33,64 @@ jobs:
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
id: setcontext
# Caching docker images
- uses: satackey/action-docker-layer-caching@v0.0.11
# Ignore the failure of a step and avoid terminating the job.
continue-on-error: true
- name: Building and Pusing api image
id: build-image
env:
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
IMAGE_TAG: ee-${{ github.sha }}
IMAGE_TAG: ${{ github.sha }}-ee
ENVIRONMENT: staging
run: |
cd api
PUSH_IMAGE=1 bash build.sh ee
- name: Creating old image input
run: |
#
# Create yaml with existing image tags
#
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
echo > /tmp/image_override.yaml
for line in `cat /tmp/image_tag.txt`;
do
image_array=($(echo "$line" | tr ':' '\n'))
cat <<EOF >> /tmp/image_override.yaml
${image_array[0]}:
image:
# We've to strip off the -ee, as helm will append it.
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
EOF
done
- name: Deploy to kubernetes
run: |
cd scripts/helm/
sed -i "s#minio_access_key.*#minio_access_key: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\" #g" vars.yaml
sed -i "s#minio_secret_key.*#minio_secret_key: \"${{ secrets.EE_MINIO_SECRET_KEY }}\" #g" vars.yaml
sed -i "s#domain_name.*#domain_name: \"foss.openreplay.com\" #g" vars.yaml
sed -i "s#kubeconfig.*#kubeconfig_path: ${KUBECONFIG}#g" vars.yaml
sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml
bash kube-install.sh --app chalice
cd scripts/helmcharts/
## Update secerts
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml
# Update changed image tag
sed -i "/chalice/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
cat /tmp/image_override.yaml
# Deploy command
helm upgrade --install openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml
env:
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
IMAGE_TAG: ee-${{ github.sha }}
# We're not passing -ee flag, because helm will add that.
IMAGE_TAG: ${{ github.sha }}
ENVIRONMENT: staging
# - name: Debug Job
@ -59,6 +98,6 @@ jobs:
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# IMAGE_TAG: ee-${{ github.sha }}
# IMAGE_TAG: ${{ github.sha }}-ee
# ENVIRONMENT: staging
#

View file

@ -3,6 +3,7 @@ on:
workflow_dispatch:
push:
branches:
- dev
- api-v1.5.5
paths:
- api/**
@ -32,6 +33,12 @@ jobs:
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
id: setcontext
# Caching docker images
- uses: satackey/action-docker-layer-caching@v0.0.11
# Ignore the failure of a step and avoid terminating the job.
continue-on-error: true
- name: Building and Pusing api image
id: build-image
env:
@ -41,15 +48,43 @@ jobs:
run: |
cd api
PUSH_IMAGE=1 bash build.sh
- name: Creating old image input
run: |
#
# Create yaml with existing image tags
#
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
echo > /tmp/image_override.yaml
for line in `cat /tmp/image_tag.txt`;
do
image_array=($(echo "$line" | tr ':' '\n'))
cat <<EOF >> /tmp/image_override.yaml
${image_array[0]}:
image:
tag: ${image_array[1]}
EOF
done
- name: Deploy to kubernetes
run: |
cd scripts/helm/
sed -i "s#minio_access_key.*#minio_access_key: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\" #g" vars.yaml
sed -i "s#minio_secret_key.*#minio_secret_key: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\" #g" vars.yaml
sed -i "s#domain_name.*#domain_name: \"foss.openreplay.com\" #g" vars.yaml
sed -i "s#kubeconfig.*#kubeconfig_path: ${KUBECONFIG}#g" vars.yaml
sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml
bash kube-install.sh --app chalice
cd scripts/helmcharts/
## Update secerts
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml
# Update changed image tag
sed -i "/chalice/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
cat /tmp/image_override.yaml
# Deploy command
helm upgrade --install openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml
env:
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
IMAGE_TAG: ${{ github.sha }}

View file

@ -1,5 +1,6 @@
# This action will push the utilities changes to aws
on:
workflow_dispatch:
push:
branches:
- dev

View file

@ -1,11 +1,13 @@
# Ref: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions
on:
workflow_dispatch:
push:
branches:
- dev
paths:
- ee/backend/**
- backend/**
name: Build and deploy workers EE
@ -33,11 +35,16 @@ jobs:
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
id: setcontext
- name: Build, tag, and Deploy to k8s
# Caching docker images
- uses: satackey/action-docker-layer-caching@v0.0.11
# Ignore the failure of a step and avoid terminating the job.
continue-on-error: true
- name: Build, tag
id: build-image
env:
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
IMAGE_TAG: ee-${{ github.sha }}
IMAGE_TAG: ${{ github.sha }}-ee
ENVIRONMENT: staging
run: |
#
@ -47,35 +54,79 @@ jobs:
#
# Getting the images to build
#
git diff --name-only HEAD HEAD~1 | grep backend/services | cut -d '/' -f3 | uniq > backend/images_to_build.txt
[[ $(cat backend/images_to_build.txt) != "" ]] || (echo "Nothing to build here"; exit 0)
set -x
{
git diff --name-only HEAD HEAD~1 | grep -E "backend/cmd|backend/services" | grep -vE ^ee/ | cut -d '/' -f3
git diff --name-only HEAD HEAD~1 | grep -E "backend/pkg|backend/internal" | grep -vE ^ee/ | cut -d '/' -f3 | uniq | while read -r pkg_name ; do
grep -rl "pkg/$pkg_name" backend/services backend/cmd | cut -d '/' -f3
done
} | uniq > /tmp/images_to_build.txt
[[ $(cat /tmp/images_to_build.txt) != "" ]] || (echo "Nothing to build here"; exit 0)
#
# Pushing image to registry
#
cd backend
for image in $(cat images_to_build.txt);
for image in $(cat /tmp/images_to_build.txt);
do
echo "Bulding $image"
PUSH_IMAGE=1 bash -x ./build.sh ee $image
echo "::set-output name=image::$DOCKER_REPO/$image:$IMAGE_TAG"
done
- name: Creating old image input
env:
IMAGE_TAG: ${{ github.sha }}
run: |
#
# Create yaml with existing image tags
#
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
echo > /tmp/image_override.yaml
for line in `cat /tmp/image_tag.txt`;
do
image_array=($(echo "$line" | tr ':' '\n'))
cat <<EOF >> /tmp/image_override.yaml
${image_array[0]}:
image:
# We've to strip off the -ee, as helm will append it.
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
EOF
done
- name: Deploying to kuberntes
env:
# We're not passing -ee flag, because helm will add that.
IMAGE_TAG: ${{ github.sha }}
run: |
#
# Deploying image to environment.
#
cd ../scripts/helm/
sed -i "s#minio_access_key.*#minio_access_key: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\" #g" vars.yaml
sed -i "s#minio_secret_key.*#minio_secret_key: \"${{ secrets.EE_MINIO_SECRET_KEY }}\" #g" vars.yaml
sed -i "s#jwt_secret_key.*#jwt_secret_key: \"${{ secrets.EE_JWT_SECRET }}\" #g" vars.yaml
sed -i "s#domain_name.*#domain_name: \"foss.openreplay.com\" #g" vars.yaml
sed -i "s#kubeconfig.*#kubeconfig_path: ${KUBECONFIG}#g" vars.yaml
for image in $(cat ../../backend/images_to_build.txt);
cd scripts/helmcharts/
## Update secerts
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml
## Update images
for image in $(cat /tmp/images_to_build.txt);
do
sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml
# Deploy command
bash openreplay-cli --install $image
sed -i "/${image}/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
done
cat /tmp/image_override.yaml
# Deploy command
helm upgrade --install openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml
# - name: Debug Job
# if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3

View file

@ -1,6 +1,7 @@
# Ref: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions
on:
workflow_dispatch:
push:
branches:
- dev
@ -33,7 +34,12 @@ jobs:
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
id: setcontext
- name: Build, tag, and Deploy to k8s
# Caching docker images
- uses: satackey/action-docker-layer-caching@v0.0.11
# Ignore the failure of a step and avoid terminating the job.
continue-on-error: true
- name: Build, tag
id: build-image
env:
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
@ -47,42 +53,73 @@ jobs:
#
# Getting the images to build
#
set -x
{
git diff --name-only HEAD HEAD~1 | grep backend/services | grep -vE ^ee/ | cut -d '/' -f3
git diff --name-only HEAD HEAD~1 | grep -E "backend/cmd|backend/services" | grep -vE ^ee/ | cut -d '/' -f3
git diff --name-only HEAD HEAD~1 | grep backend/pkg | grep -vE ^ee/ | cut -d '/' -f3 | uniq | while read -r pkg_name ; do
grep -rl "pkg/$pkg_name" backend/services | cut -d '/' -f3
git diff --name-only HEAD HEAD~1 | grep -E "backend/pkg|backend/internal" | grep -vE ^ee/ | cut -d '/' -f3 | uniq | while read -r pkg_name ; do
grep -rl "pkg/$pkg_name" backend/services backend/cmd | cut -d '/' -f3
done
} | uniq > backend/images_to_build.txt
} | uniq > /tmp/images_to_build.txt
[[ $(cat backend/images_to_build.txt) != "" ]] || (echo "Nothing to build here"; exit 0)
[[ $(cat /tmp/images_to_build.txt) != "" ]] || (echo "Nothing to build here"; exit 0)
#
# Pushing image to registry
#
cd backend
for image in $(cat images_to_build.txt);
for image in $(cat /tmp/images_to_build.txt);
do
echo "Bulding $image"
PUSH_IMAGE=1 bash -x ./build.sh skip $image
echo "::set-output name=image::$DOCKER_REPO/$image:$IMAGE_TAG"
done
- name: Creating old image input
env:
IMAGE_TAG: ${{ github.sha }}
run: |
#
# Create yaml with existing image tags
#
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
echo > /tmp/image_override.yaml
for line in `cat /tmp/image_tag.txt`;
do
image_array=($(echo "$line" | tr ':' '\n'))
cat <<EOF >> /tmp/image_override.yaml
${image_array[0]}:
image:
tag: ${image_array[1]}
EOF
done
- name: Deploying to kuberntes
env:
IMAGE_TAG: ${{ github.sha }}
run: |
#
# Deploying image to environment.
#
cd ../scripts/helm/
sed -i "s#minio_access_key.*#minio_access_key: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\" #g" vars.yaml
sed -i "s#minio_secret_key.*#minio_secret_key: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\" #g" vars.yaml
sed -i "s#domain_name.*#domain_name: \"foss.openreplay.com\" #g" vars.yaml
sed -i "s#kubeconfig.*#kubeconfig_path: ${KUBECONFIG}#g" vars.yaml
for image in $(cat ../../backend/images_to_build.txt);
cd scripts/helmcharts/
## Update secerts
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml
## Update images
for image in $(cat /tmp/images_to_build.txt);
do
sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml
# Deploy command
bash kube-install.sh --app $image
sed -i "/${image}/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
done
# Deploy command
helm upgrade --install openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml
# - name: Debug Job
# if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3

6
api/.dockerignore Normal file
View file

@ -0,0 +1,6 @@
# ignore .git and .cache folders
.git
.cache
**/build.sh
**/build_*.sh
**/*deploy.sh

View file

@ -36,7 +36,8 @@ pg_password=asayerPostgres
pg_port=5432
pg_user=postgres
pg_timeout=30
pg_minconn=45
pg_minconn=20
pg_maxconn=50
PG_RETRY_MAX=50
PG_RETRY_INTERVAL=2
put_S3_TTL=20
@ -44,6 +45,6 @@ sentryURL=
sessions_bucket=mobs
sessions_region=us-east-1
sourcemaps_bucket=sourcemaps
sourcemaps_reader=http://127.0.0.1:9000/
sourcemaps_reader=http://127.0.0.1:9000/sourcemaps
stage=default-foss
version_number=1.4.0

View file

@ -1,20 +1,7 @@
FROM python:3.9.10-slim
FROM python:3.9.12-slim
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
WORKDIR /work
COPY . .
RUN pip install -r requirements.txt
RUN mv .env.default .env
ENV APP_NAME chalice
# Installing Nodejs
RUN apt update && apt install -y curl && \
curl -fsSL https://deb.nodesource.com/setup_12.x | bash - && \
apt install -y nodejs && \
apt remove --purge -y curl && \
rm -rf /var/lib/apt/lists/* && \
cd sourcemap-reader && \
npm install
# Add Tini
# Startup daemon
ENV TINI_VERSION v0.19.0
@ -22,5 +9,23 @@ ARG envarg
ENV ENTERPRISE_BUILD ${envarg}
ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini /tini
RUN chmod +x /tini
# Installing Nodejs
RUN apt update && apt install -y curl && \
curl -fsSL https://deb.nodesource.com/setup_12.x | bash - && \
apt install -y nodejs && \
apt remove --purge -y curl && \
rm -rf /var/lib/apt/lists/*
WORKDIR /work_tmp
COPY requirements.txt /work_tmp/requirements.txt
RUN pip install -r /work_tmp/requirements.txt
COPY sourcemap-reader/*.json /work_tmp/
RUN cd /work_tmp && npm install
WORKDIR /work
COPY . .
RUN mv .env.default .env && mv /work_tmp/node_modules sourcemap-reader/.
ENTRYPOINT ["/tini", "--"]
CMD ./entrypoint.sh
CMD ./entrypoint.sh

View file

@ -1,13 +1,9 @@
FROM python:3.9.10-slim
FROM python:3.9.12-slim
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
WORKDIR /work
COPY . .
RUN pip install -r requirements.txt
RUN mv .env.default .env && mv app_alerts.py app.py && mv entrypoint_alerts.sh entrypoint.sh
ENV pg_minconn 2
ENV APP_NAME alerts
ENV pg_minconn 2
ENV pg_maxconn 10
# Add Tini
# Startup daemon
ENV TINI_VERSION v0.19.0
@ -15,5 +11,13 @@ ARG envarg
ENV ENTERPRISE_BUILD ${envarg}
ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini /tini
RUN chmod +x /tini
COPY requirements.txt /work_tmp/requirements.txt
RUN pip install -r /work_tmp/requirements.txt
WORKDIR /work
COPY . .
RUN mv .env.default .env && mv app_alerts.py app.py && mv entrypoint_alerts.sh entrypoint.sh
ENTRYPOINT ["/tini", "--"]
CMD ./entrypoint.sh

View file

@ -1,4 +1,4 @@
FROM python:3.9.10-slim
FROM python:3.9.12-slim
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
WORKDIR /work
COPY . .

View file

@ -19,10 +19,14 @@ class JWTAuth(HTTPBearer):
if not credentials.scheme == "Bearer":
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid authentication scheme.")
jwt_payload = authorizers.jwt_authorizer(credentials.scheme + " " + credentials.credentials)
auth_exists = jwt_payload is not None \
and users.auth_exists(user_id=jwt_payload.get("userId", -1),
tenant_id=jwt_payload.get("tenantId", -1),
jwt_iat=jwt_payload.get("iat", 100),
jwt_aud=jwt_payload.get("aud", ""))
if jwt_payload is None \
or jwt_payload.get("iat") is None or jwt_payload.get("aud") is None \
or not users.auth_exists(user_id=jwt_payload["userId"], tenant_id=jwt_payload["tenantId"],
jwt_iat=jwt_payload["iat"], jwt_aud=jwt_payload["aud"]):
or not auth_exists:
print("JWTAuth: Token issue")
if jwt_payload is not None:
print(jwt_payload)
@ -34,21 +38,19 @@ class JWTAuth(HTTPBearer):
print("JWTAuth: iat is None")
if jwt_payload is not None and jwt_payload.get("aud") is None:
print("JWTAuth: aud is None")
if jwt_payload is not None and \
not users.auth_exists(user_id=jwt_payload["userId"], tenant_id=jwt_payload["tenantId"],
jwt_iat=jwt_payload["iat"], jwt_aud=jwt_payload["aud"]):
if jwt_payload is not None and not auth_exists:
print("JWTAuth: not users.auth_exists")
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Invalid token or expired token.")
user = users.get(user_id=jwt_payload["userId"], tenant_id=jwt_payload["tenantId"])
user = users.get(user_id=jwt_payload.get("userId", -1), tenant_id=jwt_payload.get("tenantId", -1))
if user is None:
print("JWTAuth: User not found.")
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="User not found.")
jwt_payload["authorizer_identity"] = "jwt"
print(jwt_payload)
request.state.authorizer_identity = "jwt"
request.state.currentContext = CurrentContext(tenant_id=jwt_payload["tenantId"],
user_id=jwt_payload["userId"],
request.state.currentContext = CurrentContext(tenant_id=jwt_payload.get("tenantId", -1),
user_id=jwt_payload.get("userId", -1),
email=user["email"])
return request.state.currentContext

View file

@ -12,9 +12,9 @@ envarg="default-foss"
check_prereq() {
which docker || {
echo "Docker not installed, please install docker."
exit=1
exit 1
}
[[ exit -eq 1 ]] && exit 1
return
}
function build_api(){
@ -32,9 +32,11 @@ function build_api(){
docker push ${DOCKER_REPO:-'local'}/chalice:${git_sha1}
docker tag ${DOCKER_REPO:-'local'}/chalice:${git_sha1} ${DOCKER_REPO:-'local'}/chalice:${tag}latest
docker push ${DOCKER_REPO:-'local'}/chalice:${tag}latest
}
}
echo "api docker build completed"
}
check_prereq
build_api $1
IMAGE_TAG=$IMAGE_TAG PUSH_IMAGE=$PUSH_IMAGE DOCKER_REPO=$DOCKER_REPO bash build_alerts.sh $1
echo buil_complete
IMAGE_TAG=$IMAGE_TAG PUSH_IMAGE=$PUSH_IMAGE DOCKER_REPO=$DOCKER_REPO bash build_alerts.sh $1

View file

@ -27,7 +27,7 @@ function make_submodule() {
mkdir -p ./alerts/chalicelib/
cp -R ./chalicelib/__init__.py ./alerts/chalicelib/
mkdir -p ./alerts/chalicelib/core/
cp -R ./chalicelib/core/{__init__,alerts_processor,alerts_listener,sessions,events,issues,sessions_metas,metadata,projects,users,authorizers,tenants,roles,assist,events_ios,sessions_mobs,errors,dashboard,sourcemaps,sourcemaps_parser,resources,performance_event,alerts,notifications,slack,collaboration_slack,webhook}.py ./alerts/chalicelib/core/
cp -R ./chalicelib/core/{__init__,alerts_processor,alerts_listener,sessions,events,issues,sessions_metas,metadata,projects,users,authorizers,tenants,roles,assist,events_ios,sessions_mobs,errors,metrics,sourcemaps,sourcemaps_parser,resources,performance_event,alerts,notifications,slack,collaboration_slack,webhook}.py ./alerts/chalicelib/core/
mkdir -p ./alerts/chalicelib/utils/
cp -R ./chalicelib/utils/{__init__,TimeUTC,pg_client,helper,event_filter_definition,dev,SAML2_helper,email_helper,email_handler,smtp,s3,args_transformer,ch_client,metrics_helper}.py ./alerts/chalicelib/utils/
# -- end of generated part
@ -64,7 +64,8 @@ function build_api(){
docker tag ${DOCKER_REPO:-'local'}/alerts:${git_sha1} ${DOCKER_REPO:-'local'}/alerts:${tag}latest
docker push ${DOCKER_REPO:-'local'}/alerts:${tag}latest
}
echo "completed alerts build"
}
check_prereq
build_api $1
build_api $1

View file

@ -99,10 +99,10 @@ def Build(a):
j_s = True
if a["seriesId"] is not None:
a["filter"]["sort"] = "session_id"
a["filter"]["order"] = "DESC"
a["filter"]["order"] = schemas.SortOrderType.desc
a["filter"]["startDate"] = -1
a["filter"]["endDate"] = TimeUTC.now()
full_args, query_part= sessions.search_query_parts(
full_args, query_part = sessions.search_query_parts(
data=schemas.SessionsSearchPayloadSchema.parse_obj(a["filter"]), error_status=None, errors_only=False,
issue=None, project_id=a["projectId"], user_id=None, favorite_only=False)
subQ = f"""SELECT COUNT(session_id) AS value

View file

@ -1,6 +1,7 @@
import requests
from decouple import config
import schemas
from chalicelib.core import projects
SESSION_PROJECTION_COLS = """s.project_id,
@ -19,14 +20,32 @@ SESSION_PROJECTION_COLS = """s.project_id,
"""
def get_live_sessions_ws(project_id, user_id=None):
def get_live_sessions_ws_user_id(project_id, user_id):
data = {
"filter": {"userId": user_id} if user_id else {}
}
return __get_live_sessions_ws(project_id=project_id, data=data)
def get_live_sessions_ws(project_id, body: schemas.LiveSessionsSearchPayloadSchema):
data = {
"filter": {},
"pagination": {"limit": body.limit, "page": body.page},
"sort": {"key": body.sort, "order": body.order}
}
for f in body.filters:
if f.type == schemas.LiveFilterType.metadata:
data["filter"][f.source] = f.value
else:
data["filter"][f.type.value] = f.value
return __get_live_sessions_ws(project_id=project_id, data=data)
def __get_live_sessions_ws(project_id, data):
project_key = projects.get_project_key(project_id)
params = {}
if user_id and len(user_id) > 0:
params["userId"] = user_id
try:
connected_peers = requests.get(config("assist") % config("S3_KEY") + f"/{project_key}", params,
timeout=config("assistTimeout", cast=int, default=5))
connected_peers = requests.post(config("assist") % config("S3_KEY") + f"/{project_key}", json=data,
timeout=config("assistTimeout", cast=int, default=5))
if connected_peers.status_code != 200:
print("!! issue with the peer-server")
print(connected_peers.text)
@ -44,27 +63,19 @@ def get_live_sessions_ws(project_id, user_id=None):
except:
print("couldn't get response")
live_peers = []
for s in live_peers:
_live_peers = live_peers
if "sessions" in live_peers:
_live_peers = live_peers["sessions"]
for s in _live_peers:
s["live"] = True
s["projectId"] = project_id
live_peers = sorted(live_peers, key=lambda l: l.get("timestamp", 0), reverse=True)
return live_peers
def get_live_session_by_id(project_id, session_id):
all_live = get_live_sessions_ws(project_id)
for l in all_live:
if str(l.get("sessionID")) == str(session_id):
return l
return None
def is_live(project_id, session_id, project_key=None):
if project_key is None:
project_key = projects.get_project_key(project_id)
project_key = projects.get_project_key(project_id)
try:
connected_peers = requests.get(config("assistList") % config("S3_KEY") + f"/{project_key}",
connected_peers = requests.get(config("assist") % config("S3_KEY") + f"/{project_key}/{session_id}",
timeout=config("assistTimeout", cast=int, default=5))
if connected_peers.status_code != 200:
print("!! issue with the peer-server")
@ -83,7 +94,61 @@ def is_live(project_id, session_id, project_key=None):
except:
print("couldn't get response")
return False
return str(session_id) in connected_peers
return connected_peers
def is_live(project_id, session_id, project_key=None):
if project_key is None:
project_key = projects.get_project_key(project_id)
try:
connected_peers = requests.get(config("assistList") % config("S3_KEY") + f"/{project_key}/{session_id}",
timeout=config("assistTimeout", cast=int, default=5))
if connected_peers.status_code != 200:
print("!! issue with the peer-server")
print(connected_peers.text)
return False
connected_peers = connected_peers.json().get("data")
except requests.exceptions.Timeout:
print("Timeout getting Assist response")
return False
except Exception as e:
print("issue getting Assist response")
print(str(e))
print("expected JSON, received:")
try:
print(connected_peers.text)
except:
print("couldn't get response")
return False
return str(session_id) == connected_peers
def autocomplete(project_id, q: str, key: str = None):
project_key = projects.get_project_key(project_id)
params = {"q": q}
if key:
params["key"] = key
try:
results = requests.get(config("assistList") % config("S3_KEY") + f"/{project_key}/autocomplete",
params=params, timeout=config("assistTimeout", cast=int, default=5))
if results.status_code != 200:
print("!! issue with the peer-server")
print(results.text)
return {"errors": [f"Something went wrong wile calling assist:{results.text}"]}
results = results.json().get("data", [])
except requests.exceptions.Timeout:
print("Timeout getting Assist response")
return {"errors": ["Assist request timeout"]}
except Exception as e:
print("issue getting Assist response")
print(str(e))
print("expected JSON, received:")
try:
print(results.text)
except:
print("couldn't get response")
return {"errors": ["Something went wrong wile calling assist"]}
return {"data": results}
def get_ice_servers():

View file

@ -2,7 +2,7 @@ import json
from typing import Union
import schemas
from chalicelib.core import sessions
from chalicelib.core import sessions, funnels, errors
from chalicelib.utils import helper, pg_client
from chalicelib.utils.TimeUTC import TimeUTC
@ -42,7 +42,66 @@ def __try_live(project_id, data: schemas.TryCustomMetricsPayloadSchema):
return results
def merged_live(project_id, data: schemas.TryCustomMetricsPayloadSchema):
def __is_funnel_chart(data: schemas.TryCustomMetricsPayloadSchema):
return data.metric_type == schemas.MetricType.funnel
def __get_funnel_chart(project_id, data: schemas.TryCustomMetricsPayloadSchema):
if len(data.series) == 0:
return {
"stages": [],
"totalDropDueToIssues": 0
}
data.series[0].filter.startDate = data.startTimestamp
data.series[0].filter.endDate = data.endTimestamp
return funnels.get_top_insights_on_the_fly_widget(project_id=project_id, data=data.series[0].filter)
def __is_errors_list(data):
return data.metric_type == schemas.MetricType.table \
and data.metric_of == schemas.TableMetricOfType.errors
def __get_errors_list(project_id, user_id, data):
if len(data.series) == 0:
return {
"total": 0,
"errors": []
}
data.series[0].filter.startDate = data.startTimestamp
data.series[0].filter.endDate = data.endTimestamp
data.series[0].filter.page = data.page
data.series[0].filter.limit = data.limit
return errors.search(data.series[0].filter, project_id=project_id, user_id=user_id)
def __is_sessions_list(data):
return data.metric_type == schemas.MetricType.table \
and data.metric_of == schemas.TableMetricOfType.sessions
def __get_sessions_list(project_id, user_id, data):
if len(data.series) == 0:
print("empty series")
return {
"total": 0,
"sessions": []
}
data.series[0].filter.startDate = data.startTimestamp
data.series[0].filter.endDate = data.endTimestamp
data.series[0].filter.page = data.page
data.series[0].filter.limit = data.limit
return sessions.search2_pg(data=data.series[0].filter, project_id=project_id, user_id=user_id)
def merged_live(project_id, data: schemas.TryCustomMetricsPayloadSchema, user_id=None):
if __is_funnel_chart(data):
return __get_funnel_chart(project_id=project_id, data=data)
elif __is_errors_list(data):
return __get_errors_list(project_id=project_id, user_id=user_id, data=data)
elif __is_sessions_list(data):
return __get_sessions_list(project_id=project_id, user_id=user_id, data=data)
series_charts = __try_live(project_id=project_id, data=data)
if data.view_type == schemas.MetricTimeseriesViewType.progress or data.metric_type == schemas.MetricType.table:
return series_charts
@ -75,15 +134,22 @@ def make_chart(project_id, user_id, metric_id, data: schemas.CustomMetricChartPa
if metric is None:
return None
metric: schemas.CreateCustomMetricsSchema = __merge_metric_with_data(metric=metric, data=data)
series_charts = __try_live(project_id=project_id, data=metric)
if metric.view_type == schemas.MetricTimeseriesViewType.progress or metric.metric_type == schemas.MetricType.table:
return series_charts
results = [{}] * len(series_charts[0])
for i in range(len(results)):
for j, series_chart in enumerate(series_charts):
results[i] = {**results[i], "timestamp": series_chart[i]["timestamp"],
metric.series[j].name: series_chart[i]["count"]}
return results
return merged_live(project_id=project_id, data=metric, user_id=user_id)
# if __is_funnel_chart(metric):
# return __get_funnel_chart(project_id=project_id, data=metric)
# elif __is_errors_list(metric):
# return __get_errors_list(project_id=project_id, user_id=user_id, data=metric)
#
# series_charts = __try_live(project_id=project_id, data=metric)
# if metric.view_type == schemas.MetricTimeseriesViewType.progress or metric.metric_type == schemas.MetricType.table:
# return series_charts
# results = [{}] * len(series_charts[0])
# for i in range(len(results)):
# for j, series_chart in enumerate(series_charts):
# results[i] = {**results[i], "timestamp": series_chart[i]["timestamp"],
# metric.series[j].name: series_chart[i]["count"]}
# return results
def get_sessions(project_id, user_id, metric_id, data: schemas.CustomMetricSessionsPayloadSchema):
@ -105,6 +171,38 @@ def get_sessions(project_id, user_id, metric_id, data: schemas.CustomMetricSessi
return results
def get_funnel_issues(project_id, user_id, metric_id, data: schemas.CustomMetricSessionsPayloadSchema):
metric = get(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
if metric is None:
return None
metric: schemas.CreateCustomMetricsSchema = __merge_metric_with_data(metric=metric, data=data)
if metric is None:
return None
for s in metric.series:
s.filter.startDate = data.startTimestamp
s.filter.endDate = data.endTimestamp
s.filter.limit = data.limit
s.filter.page = data.page
return {"seriesId": s.series_id, "seriesName": s.name,
**funnels.get_issues_on_the_fly_widget(project_id=project_id, data=s.filter)}
def get_errors_list(project_id, user_id, metric_id, data: schemas.CustomMetricSessionsPayloadSchema):
metric = get(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
if metric is None:
return None
metric: schemas.CreateCustomMetricsSchema = __merge_metric_with_data(metric=metric, data=data)
if metric is None:
return None
for s in metric.series:
s.filter.startDate = data.startTimestamp
s.filter.endDate = data.endTimestamp
s.filter.limit = data.limit
s.filter.page = data.page
return {"seriesId": s.series_id, "seriesName": s.name,
**errors.search(data=s.filter, project_id=project_id, user_id=user_id)}
def try_sessions(project_id, user_id, data: schemas.CustomMetricSessionsPayloadSchema):
results = []
if data.series is None:
@ -130,12 +228,16 @@ def create(project_id, user_id, data: schemas.CreateCustomMetricsSchema, dashboa
_data[f"filter_{i}"] = s.filter.json()
series_len = len(data.series)
data.series = None
params = {"user_id": user_id, "project_id": project_id, **data.dict(), **_data}
params = {"user_id": user_id, "project_id": project_id,
"default_config": json.dumps(data.config.dict()),
**data.dict(), **_data}
query = cur.mogrify(f"""\
WITH m AS (INSERT INTO metrics (project_id, user_id, name, is_public,
view_type, metric_type, metric_of, metric_value, metric_format)
view_type, metric_type, metric_of, metric_value,
metric_format, default_config)
VALUES (%(project_id)s, %(user_id)s, %(name)s, %(is_public)s,
%(view_type)s, %(metric_type)s, %(metric_of)s, %(metric_value)s, %(metric_format)s)
%(view_type)s, %(metric_type)s, %(metric_of)s, %(metric_value)s,
%(metric_format)s, %(default_config)s)
RETURNING *)
INSERT
INTO metric_series(metric_id, index, name, filter)
@ -396,3 +498,32 @@ def change_state(project_id, metric_id, user_id, status):
{"metric_id": metric_id, "status": status, "user_id": user_id})
)
return get(metric_id=metric_id, project_id=project_id, user_id=user_id)
def get_funnel_sessions_by_issue(user_id, project_id, metric_id, issue_id,
data: schemas.CustomMetricSessionsPayloadSchema
# , range_value=None, start_date=None, end_date=None
):
metric = get(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
if metric is None:
return None
metric: schemas.CreateCustomMetricsSchema = __merge_metric_with_data(metric=metric, data=data)
if metric is None:
return None
for s in metric.series:
s.filter.startDate = data.startTimestamp
s.filter.endDate = data.endTimestamp
s.filter.limit = data.limit
s.filter.page = data.page
issues = funnels.get_issues_on_the_fly_widget(project_id=project_id, data=s.filter).get("issues", {})
issues = issues.get("significant", []) + issues.get("insignificant", [])
issue = None
for i in issues:
if i.get("issueId", "") == issue_id:
issue = i
break
return {"seriesId": s.series_id, "seriesName": s.name,
"sessions": sessions.search2_pg(user_id=user_id, project_id=project_id,
issue=issue, data=s.filter)
if issue is not None else {"total": 0, "sessions": []},
"issue": issue}

View file

@ -6,8 +6,9 @@ from chalicelib.utils import helper
from chalicelib.utils import pg_client
from chalicelib.utils.TimeUTC import TimeUTC
# category name should be lower cased
CATEGORY_DESCRIPTION = {
'overview': 'High-level metrics and web vitals.',
'web vitals': 'A set of metrics that assess app performance on criteria such as load time, load performance, and stability.',
'custom': 'Previously created custom metrics by me and my team.',
'errors': 'Keep a closer eye on errors and track their type, origin and domain.',
'performance': 'Optimize your apps performance by tracking slow domains, page response times, memory consumption, CPU usage and more.',
@ -33,17 +34,20 @@ def get_templates(project_id, user_id):
cur.execute(pg_query)
rows = cur.fetchall()
for r in rows:
r["description"] = CATEGORY_DESCRIPTION.get(r["category"], "")
r["description"] = CATEGORY_DESCRIPTION.get(r["category"].lower(), "")
for w in r["widgets"]:
w["created_at"] = TimeUTC.datetime_to_timestamp(w["created_at"])
w["edited_at"] = TimeUTC.datetime_to_timestamp(w["edited_at"])
for s in w["series"]:
s["filter"] = helper.old_search_payload_to_flat(s["filter"])
return helper.list_to_camel_case(rows)
def create_dashboard(project_id, user_id, data: schemas.CreateDashboardSchema):
with pg_client.PostgresClient() as cur:
pg_query = f"""INSERT INTO dashboards(project_id, user_id, name, is_public, is_pinned)
VALUES(%(projectId)s, %(userId)s, %(name)s, %(is_public)s, %(is_pinned)s)
pg_query = f"""INSERT INTO dashboards(project_id, user_id, name, is_public, is_pinned, description)
VALUES(%(projectId)s, %(userId)s, %(name)s, %(is_public)s, %(is_pinned)s, %(description)s)
RETURNING *"""
params = {"userId": user_id, "projectId": project_id, **data.dict()}
if data.metrics is not None and len(data.metrics) > 0:
@ -134,7 +138,8 @@ def update_dashboard(project_id, user_id, dashboard_id, data: schemas.EditDashbo
row = cur.fetchone()
offset = row["count"]
pg_query = f"""UPDATE dashboards
SET name = %(name)s
SET name = %(name)s,
description= %(description)s
{", is_public = %(is_public)s" if data.is_public is not None else ""}
{", is_pinned = %(is_pinned)s" if data.is_pinned is not None else ""}
WHERE dashboards.project_id = %(projectId)s

View file

@ -425,10 +425,9 @@ def __get_sort_key(key):
def search(data: schemas.SearchErrorsSchema, project_id, user_id, flows=False):
empty_response = {"data": {
'total': 0,
'errors': []
}}
empty_response = {'total': 0,
'errors': []
}
platform = None
for f in data.filters:
@ -463,7 +462,7 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id, flows=False):
sort = __get_sort_key('datetime')
if data.sort is not None:
sort = __get_sort_key(data.sort)
order = "DESC"
order = schemas.SortOrderType.desc
if data.order is not None:
order = data.order
extra_join = ""
@ -544,7 +543,7 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id, flows=False):
rows = cur.fetchall()
total = 0 if len(rows) == 0 else rows[0]["full_count"]
if flows:
return {"data": {"count": total}}
return {"count": total}
if total == 0:
rows = []
@ -592,10 +591,8 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id, flows=False):
and (r["message"].lower() != "script error." or len(r["stack"][0]["absPath"]) > 0))]
offset -= len(rows)
return {
"data": {
'total': total - offset,
'errors': helper.list_to_camel_case(rows)
}
'total': total - offset,
'errors': helper.list_to_camel_case(rows)
}

View file

@ -28,8 +28,8 @@ def __merge_cells(rows, start, count, replacement):
return rows
def __get_grouped_clickrage(rows, session_id):
click_rage_issues = issues.get_by_session_id(session_id=session_id, issue_type="click_rage")
def __get_grouped_clickrage(rows, session_id, project_id):
click_rage_issues = issues.get_by_session_id(session_id=session_id, issue_type="click_rage", project_id=project_id)
if len(click_rage_issues) == 0:
return rows
@ -63,7 +63,7 @@ def get_by_sessionId2_pg(session_id, project_id, group_clickrage=False):
)
rows = cur.fetchall()
if group_clickrage:
rows = __get_grouped_clickrage(rows=rows, session_id=session_id)
rows = __get_grouped_clickrage(rows=rows, session_id=session_id, project_id=project_id)
cur.execute(cur.mogrify("""
SELECT
@ -435,7 +435,15 @@ def __get_autocomplete_table(value, project_id):
query = cur.mogrify(" UNION ".join(sub_queries) + ";",
{"project_id": project_id, "value": helper.string_to_sql_like(value),
"svalue": helper.string_to_sql_like("^" + value)})
cur.execute(query)
try:
cur.execute(query)
except Exception as err:
print("--------- AUTOCOMPLETE SEARCH QUERY EXCEPTION -----------")
print(query.decode('UTF-8'))
print("--------- VALUE -----------")
print(value)
print("--------------------")
raise err
results = helper.list_to_camel_case(cur.fetchall())
return results
@ -464,14 +472,13 @@ def search(text, event_type, project_id, source, key):
return {"data": rows}
def get_errors_by_session_id(session_id):
def get_errors_by_session_id(session_id, project_id):
with pg_client.PostgresClient() as cur:
cur.execute(cur.mogrify(f"""\
SELECT er.*,ur.*, er.timestamp - s.start_ts AS time
FROM {event_type.ERROR.table} AS er INNER JOIN public.errors AS ur USING (error_id) INNER JOIN public.sessions AS s USING (session_id)
WHERE
er.session_id = %(session_id)s
ORDER BY timestamp;""", {"session_id": session_id}))
WHERE er.session_id = %(session_id)s AND s.project_id=%(project_id)s
ORDER BY timestamp;""", {"session_id": session_id, "project_id": project_id}))
errors = cur.fetchall()
for e in errors:
e["stacktrace_parsed_at"] = TimeUTC.datetime_to_timestamp(e["stacktrace_parsed_at"])

View file

@ -251,6 +251,22 @@ def get_top_insights_on_the_fly(funnel_id, user_id, project_id, data: schemas.Fu
"totalDropDueToIssues": total_drop_due_to_issues}}
# def get_top_insights_on_the_fly_widget(project_id, data: schemas.FunnelInsightsPayloadSchema):
def get_top_insights_on_the_fly_widget(project_id, data: schemas.CustomMetricSeriesFilterSchema):
data.events = filter_stages(__parse_events(data.events))
data.events = __fix_stages(data.events)
if len(data.events) == 0:
return {"stages": [], "totalDropDueToIssues": 0}
insights, total_drop_due_to_issues = significance.get_top_insights(filter_d=data.dict(), project_id=project_id)
insights = helper.list_to_camel_case(insights)
if len(insights) > 0:
if total_drop_due_to_issues > insights[0]["sessionsCount"]:
total_drop_due_to_issues = insights[0]["sessionsCount"]
insights[-1]["dropDueToIssues"] = total_drop_due_to_issues
return {"stages": insights,
"totalDropDueToIssues": total_drop_due_to_issues}
def get_issues(project_id, user_id, funnel_id, range_value=None, start_date=None, end_date=None):
f = get(funnel_id=funnel_id, project_id=project_id, user_id=user_id, flatten=False)
if f is None:
@ -280,6 +296,19 @@ def get_issues_on_the_fly(funnel_id, user_id, project_id, data: schemas.FunnelSe
last_stage=len(data.events)))}
# def get_issues_on_the_fly_widget(project_id, data: schemas.FunnelSearchPayloadSchema):
def get_issues_on_the_fly_widget(project_id, data: schemas.CustomMetricSeriesFilterSchema):
data.events = filter_stages(data.events)
data.events = __fix_stages(data.events)
if len(data.events) < 0:
return {"issues": []}
return {
"issues": helper.dict_to_camel_case(
significance.get_issues_list(filter_d=data.dict(), project_id=project_id, first_stage=1,
last_stage=len(data.events)))}
def get(funnel_id, project_id, user_id, flatten=True, fix_stages=True):
with pg_client.PostgresClient() as cur:
cur.execute(

View file

@ -44,16 +44,18 @@ def get(project_id, issue_id):
return helper.dict_to_camel_case(data)
def get_by_session_id(session_id, issue_type=None):
def get_by_session_id(session_id, project_id, issue_type=None):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(f"""\
SELECT *
FROM events_common.issues
INNER JOIN public.issues USING (issue_id)
WHERE session_id = %(session_id)s {"AND type = %(type)s" if issue_type is not None else ""}
WHERE session_id = %(session_id)s
AND project_id= %(project_id)s
{"AND type = %(type)s" if issue_type is not None else ""}
ORDER BY timestamp;""",
{"session_id": session_id, "type": issue_type})
{"session_id": session_id, "project_id": project_id, "type": issue_type})
)
return helper.list_to_camel_case(cur.fetchall())

View file

@ -1,21 +1,9 @@
from chalicelib.utils import pg_client
EDITION = 'foss'
def get_status(tenant_id=None):
with pg_client.PostgresClient() as cur:
cur.execute("SELECT * FROM public.tenants;")
r = cur.fetchone()
return {
"hasActivePlan": True,
"current": {
"edition": r.get("edition", "").upper(),
"versionNumber": r.get("version_number", ""),
"license": "",
"expirationDate": -1
},
"count": {
"teamMember": r.get("t_users"),
"projects": r.get("t_projects"),
"capturedSessions": r.get("t_sessions")
}
"edition": EDITION,
"expirationDate": -1
}

View file

@ -1,4 +1,5 @@
from elasticsearch import Elasticsearch, RequestsHttpConnection
# from elasticsearch import Elasticsearch, RequestsHttpConnection
from elasticsearch import Elasticsearch
from chalicelib.core import log_tools
import base64
import logging
@ -58,20 +59,21 @@ def add_edit(tenant_id, project_id, data):
def __get_es_client(host, port, api_key_id, api_key, use_ssl=False, timeout=15):
scheme = "http" if host.startswith("http") else "https"
host = host.replace("http://", "").replace("https://", "")
try:
args = {
"hosts": [{"host": host, "port": port}],
"use_ssl": use_ssl,
"hosts": [{"host": host, "port": port, "scheme": scheme}],
"verify_certs": False,
"ca_certs": False,
"connection_class": RequestsHttpConnection,
"timeout": timeout
# "ca_certs": False,
# "connection_class": RequestsHttpConnection,
"request_timeout": timeout,
"api_key": (api_key_id, api_key)
}
if api_key_id is not None and len(api_key_id) > 0:
# args["http_auth"] = (username, password)
token = "ApiKey " + base64.b64encode(f"{api_key_id}:{api_key}".encode("utf-8")).decode("utf-8")
args["headers"] = {"Authorization": token}
# if api_key_id is not None and len(api_key_id) > 0:
# # args["http_auth"] = (username, password)
# token = "ApiKey " + base64.b64encode(f"{api_key_id}:{api_key}".encode("utf-8")).decode("utf-8")
# args["headers"] = {"Authorization": token}
es = Elasticsearch(
**args
)

View file

@ -967,7 +967,7 @@ def get_pages_dom_build_time(project_id, startTimestamp=TimeUTC.now(delta_days=-
cur.execute(cur.mogrify(pg_query, params))
row = cur.fetchone()
row["unit"] = schemas.TemplatePredefinedUnits.millisecond
helper.__time_value(row)
return row
@ -1069,11 +1069,11 @@ def get_speed_index_location(project_id, startTimestamp=TimeUTC.now(delta_days=-
pg_sub_query.append("pages.speed_index>0")
with pg_client.PostgresClient() as cur:
pg_query = f"""SELECT sessions.user_country, AVG(pages.speed_index) AS avg
pg_query = f"""SELECT sessions.user_country, AVG(pages.speed_index) AS value
FROM events.pages INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query)}
GROUP BY sessions.user_country
ORDER BY avg,sessions.user_country;"""
ORDER BY value, sessions.user_country;"""
params = {"project_id": project_id,
"startTimestamp": startTimestamp,
"endTimestamp": endTimestamp, **__get_constraint_values(args)}
@ -1087,7 +1087,7 @@ def get_speed_index_location(project_id, startTimestamp=TimeUTC.now(delta_days=-
avg = cur.fetchone()["avg"]
else:
avg = 0
return {"avg": avg, "chart": helper.list_to_camel_case(rows)}
return {"value": avg, "chart": helper.list_to_camel_case(rows), "unit": schemas.TemplatePredefinedUnits.millisecond}
def get_pages_response_time(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
@ -1126,7 +1126,9 @@ def get_pages_response_time(project_id, startTimestamp=TimeUTC.now(delta_days=-1
WHERE {" AND ".join(pg_sub_query)};"""
cur.execute(cur.mogrify(pg_query, params))
avg = cur.fetchone()["avg"]
return {"value": avg, "chart": rows, "unit": schemas.TemplatePredefinedUnits.millisecond}
result = {"value": avg, "chart": rows}
helper.__time_value(result)
return result
def get_pages_response_time_distribution(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
@ -1169,7 +1171,7 @@ def get_pages_response_time_distribution(project_id, startTimestamp=TimeUTC.now(
else:
quantiles = [0 for i in range(len(quantiles_keys))]
result = {
"avg": avg,
"value": avg,
"total": sum(r["count"] for r in rows),
"chart": [],
"percentiles": [{
@ -1177,7 +1179,8 @@ def get_pages_response_time_distribution(project_id, startTimestamp=TimeUTC.now(
"responseTime": int(quantiles[i])
} for i, v in enumerate(quantiles_keys)
],
"extremeValues": [{"count": 0}]
"extremeValues": [{"count": 0}],
"unit": schemas.TemplatePredefinedUnits.millisecond
}
rows = helper.list_to_camel_case(rows)
_99 = result["percentiles"][-1]["responseTime"]
@ -1348,7 +1351,7 @@ def get_time_to_render(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
"endTimestamp": endTimestamp, "value": url, **__get_constraint_values(args)}
cur.execute(cur.mogrify(pg_query, params))
row = cur.fetchone()
row["unit"] = schemas.TemplatePredefinedUnits.millisecond
helper.__time_value(row)
return row
@ -1498,7 +1501,7 @@ def get_crashes(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
pg_sub_query_chart.append("m_issues.type = 'crash'")
with pg_client.PostgresClient() as cur:
pg_query = f"""SELECT generated_timestamp AS timestamp,
COUNT(sessions) AS count
COUNT(sessions) AS value
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
LEFT JOIN LATERAL (
SELECT sessions.session_id
@ -1556,7 +1559,7 @@ def get_crashes(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
versions.append({v["version"]: v["count"] / (r["total"] / 100)})
r["versions"] = versions
return {"chart": rows, "browsers": browsers}
return {"chart": rows, "browsers": browsers, "unit": schemas.TemplatePredefinedUnits.count}
def __get_neutral(rows, add_All_if_empty=True):
@ -1719,7 +1722,7 @@ def get_slowest_domains(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
with pg_client.PostgresClient() as cur:
pg_query = f"""SELECT
resources.url_host AS domain,
AVG(resources.duration) AS avg
AVG(resources.duration) AS value
FROM events.resources INNER JOIN sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query)}
GROUP BY resources.url_host
@ -1738,7 +1741,7 @@ def get_slowest_domains(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
avg = cur.fetchone()["avg"]
else:
avg = 0
return {"avg": avg, "partition": rows}
return {"value": avg, "chart": rows, "unit": schemas.TemplatePredefinedUnits.millisecond}
def get_errors_per_domains(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
@ -2241,7 +2244,7 @@ def get_application_activity_avg_image_load_time(project_id, startTimestamp=Time
row = __get_application_activity_avg_image_load_time(cur, project_id, startTimestamp, endTimestamp, **args)
previous = helper.dict_to_camel_case(row)
results["progress"] = helper.__progress(old_val=previous["value"], new_val=results["value"])
results["unit"] = schemas.TemplatePredefinedUnits.millisecond
helper.__time_value(results)
return results
@ -2300,7 +2303,7 @@ def __get_application_activity_avg_page_load_time(cur, project_id, startTimestam
cur.execute(cur.mogrify(pg_query, params))
row = cur.fetchone()
row["unit"] = schemas.TemplatePredefinedUnits.millisecond
helper.__time_value(row)
return row
@ -2316,7 +2319,7 @@ def get_application_activity_avg_page_load_time(project_id, startTimestamp=TimeU
row = __get_application_activity_avg_page_load_time(cur, project_id, startTimestamp, endTimestamp, **args)
previous = helper.dict_to_camel_case(row)
results["progress"] = helper.__progress(old_val=previous["value"], new_val=results["value"])
results["unit"] = schemas.TemplatePredefinedUnits.millisecond
helper.__time_value(results)
return results
@ -2369,7 +2372,7 @@ def __get_application_activity_avg_request_load_time(cur, project_id, startTimes
"endTimestamp": endTimestamp, **__get_constraint_values(args)}))
row = cur.fetchone()
row["unit"] = schemas.TemplatePredefinedUnits.millisecond
helper.__time_value(row)
return row
@ -2385,7 +2388,7 @@ def get_application_activity_avg_request_load_time(project_id, startTimestamp=Ti
row = __get_application_activity_avg_request_load_time(cur, project_id, startTimestamp, endTimestamp, **args)
previous = helper.dict_to_camel_case(row)
results["progress"] = helper.__progress(old_val=previous["value"], new_val=results["value"])
results["unit"] = schemas.TemplatePredefinedUnits.millisecond
helper.__time_value(results)
return results
@ -2442,7 +2445,7 @@ def get_page_metrics_avg_dom_content_load_start(project_id, startTimestamp=TimeU
row = __get_page_metrics_avg_dom_content_load_start(cur, project_id, startTimestamp, endTimestamp, **args)
previous = helper.dict_to_camel_case(row)
results["progress"] = helper.__progress(old_val=previous["value"], new_val=results["value"])
results["unit"] = schemas.TemplatePredefinedUnits.millisecond
helper.__time_value(results)
return results
@ -2512,7 +2515,7 @@ def get_page_metrics_avg_first_contentful_pixel(project_id, startTimestamp=TimeU
if len(rows) > 0:
previous = helper.dict_to_camel_case(rows[0])
results["progress"] = helper.__progress(old_val=previous["value"], new_val=results["value"])
results["unit"] = schemas.TemplatePredefinedUnits.millisecond
helper.__time_value(results)
return results
@ -2645,7 +2648,7 @@ def get_user_activity_avg_session_duration(project_id, startTimestamp=TimeUTC.no
previous = helper.dict_to_camel_case(row)
results["progress"] = helper.__progress(old_val=previous["value"], new_val=results["value"])
results["unit"] = schemas.TemplatePredefinedUnits.millisecond
helper.__time_value(results)
return results
@ -2731,7 +2734,7 @@ def get_top_metrics_avg_response_time(project_id, startTimestamp=TimeUTC.now(del
cur.execute(cur.mogrify(pg_query, params))
rows = cur.fetchall()
row["chart"] = helper.list_to_camel_case(rows)
row["unit"] = schemas.TemplatePredefinedUnits.millisecond
helper.__time_value(row)
return helper.dict_to_camel_case(row)
@ -2772,7 +2775,7 @@ def get_top_metrics_avg_first_paint(project_id, startTimestamp=TimeUTC.now(delta
cur.execute(cur.mogrify(pg_query, params))
rows = cur.fetchall()
row["chart"] = helper.list_to_camel_case(rows)
row["unit"] = schemas.TemplatePredefinedUnits.millisecond
helper.__time_value(row)
return helper.dict_to_camel_case(row)
@ -2816,7 +2819,7 @@ def get_top_metrics_avg_dom_content_loaded(project_id, startTimestamp=TimeUTC.no
cur.execute(cur.mogrify(pg_query, params))
rows = cur.fetchall()
row["chart"] = helper.list_to_camel_case(rows)
row["unit"] = schemas.TemplatePredefinedUnits.millisecond
helper.__time_value(row)
return helper.dict_to_camel_case(row)
@ -2857,7 +2860,7 @@ def get_top_metrics_avg_till_first_bit(project_id, startTimestamp=TimeUTC.now(de
cur.execute(cur.mogrify(pg_query, params))
rows = cur.fetchall()
row["chart"] = helper.list_to_camel_case(rows)
row["unit"] = schemas.TemplatePredefinedUnits.millisecond
helper.__time_value(row)
return helper.dict_to_camel_case(row)
@ -2899,7 +2902,7 @@ def get_top_metrics_avg_time_to_interactive(project_id, startTimestamp=TimeUTC.n
cur.execute(cur.mogrify(pg_query, params))
rows = cur.fetchall()
row["chart"] = helper.list_to_camel_case(rows)
row["unit"] = schemas.TemplatePredefinedUnits.millisecond
helper.__time_value(row)
return helper.dict_to_camel_case(row)

View file

@ -25,6 +25,22 @@ def get_all(tenant_id, user_id):
return rows
def get_all_count(tenant_id, user_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify("""\
SELECT COUNT(notifications.*) AS count
FROM public.notifications
LEFT JOIN (SELECT notification_id
FROM public.user_viewed_notifications
WHERE user_viewed_notifications.user_id = %(user_id)s) AS user_viewed_notifications USING (notification_id)
WHERE (notifications.user_id IS NULL OR notifications.user_id =%(user_id)s) AND user_viewed_notifications.notification_id IS NULL;""",
{"user_id": user_id})
)
row = cur.fetchone()
return row
def view_notification(user_id, notification_ids=[], tenant_id=None, startTimestamp=None, endTimestamp=None):
if (notification_ids is None or len(notification_ids) == 0) and endTimestamp is None:
return False

View file

@ -1,8 +1,10 @@
from chalicelib.utils import helper, pg_client
from decouple import config
def get_by_session_id(session_id, project_id):
def get_by_session_id(session_id, project_id, start_ts, duration):
with pg_client.PostgresClient() as cur:
delta = config("events_ts_delta", cast=int, default=5 * 60) * 1000
ch_query = """\
SELECT
timestamp AS datetime,
@ -16,8 +18,13 @@ def get_by_session_id(session_id, project_id):
success,
COALESCE(status, CASE WHEN success THEN 200 END) AS status
FROM events.resources INNER JOIN sessions USING (session_id)
WHERE session_id = %(session_id)s AND project_id= %(project_id)s;"""
params = {"session_id": session_id, "project_id": project_id}
WHERE session_id = %(session_id)s
AND project_id= %(project_id)s
AND sessions.start_ts=%(start_ts)s
AND resources.timestamp>=%(res_start_ts)s
AND resources.timestamp>=%(res_end_ts)s;"""
params = {"session_id": session_id, "project_id": project_id, "start_ts": start_ts, "duration": duration,
"res_start_ts": start_ts - delta, "res_end_ts": start_ts + duration + delta, }
cur.execute(cur.mogrify(ch_query, params))
rows = cur.fetchall()
return helper.list_to_camel_case(rows)

View file

@ -85,7 +85,7 @@ def get_by_id2_pg(project_id, session_id, user_id, full_data=False, include_fav_
else:
data['events'] = events.get_by_sessionId2_pg(project_id=project_id, session_id=session_id,
group_clickrage=True)
all_errors = events.get_errors_by_session_id(session_id=session_id)
all_errors = events.get_errors_by_session_id(session_id=session_id, project_id=project_id)
data['stackEvents'] = [e for e in all_errors if e['source'] != "js_exception"]
# to keep only the first stack
data['errors'] = [errors.format_first_stack_frame(e) for e in all_errors if
@ -94,10 +94,12 @@ def get_by_id2_pg(project_id, session_id, user_id, full_data=False, include_fav_
data['userEvents'] = events.get_customs_by_sessionId2_pg(project_id=project_id,
session_id=session_id)
data['mobsUrl'] = sessions_mobs.get_web(sessionId=session_id)
data['resources'] = resources.get_by_session_id(session_id=session_id, project_id=project_id)
data['resources'] = resources.get_by_session_id(session_id=session_id, project_id=project_id,
start_ts=data["startTs"],
duration=data["duration"])
data['metadata'] = __group_metadata(project_metadata=data.pop("projectMetadata"), session=data)
data['issues'] = issues.get_by_session_id(session_id=session_id)
data['issues'] = issues.get_by_session_id(session_id=session_id,project_id=project_id)
data['live'] = live and assist.is_live(project_id=project_id,
session_id=session_id,
project_key=data["projectKey"])
@ -201,12 +203,12 @@ def search2_pg(data: schemas.SessionsSearchPayloadSchema, project_id, user_id, e
elif data.group_by_user:
g_sort = "count(full_sessions)"
if data.order is None:
data.order = "DESC"
data.order = schemas.SortOrderType.desc
else:
data.order = data.order.upper()
if data.sort is not None and data.sort != 'sessionsCount':
sort = helper.key_to_snake_case(data.sort)
g_sort = f"{'MIN' if data.order == 'DESC' else 'MAX'}({sort})"
g_sort = f"{'MIN' if data.order == schemas.SortOrderType.desc else 'MAX'}({sort})"
else:
sort = 'start_ts'
@ -230,7 +232,7 @@ def search2_pg(data: schemas.SessionsSearchPayloadSchema, project_id, user_id, e
full_args)
else:
if data.order is None:
data.order = "DESC"
data.order = schemas.SortOrderType.desc
sort = 'session_id'
if data.sort is not None and data.sort != "session_id":
# sort += " " + data.order + "," + helper.key_to_snake_case(data.sort)
@ -254,9 +256,9 @@ def search2_pg(data: schemas.SessionsSearchPayloadSchema, project_id, user_id, e
cur.execute(main_query)
except Exception as err:
print("--------- SESSIONS SEARCH QUERY EXCEPTION -----------")
print(main_query)
print(main_query.decode('UTF-8'))
print("--------- PAYLOAD -----------")
print(data.dict())
print(data.json())
print("--------------------")
raise err
if errors_only:
@ -1199,7 +1201,7 @@ def get_session_ids_by_user_ids(project_id, user_ids):
def delete_sessions_by_session_ids(session_ids):
with pg_client.PostgresClient(long_query=True) as cur:
with pg_client.PostgresClient(unlimited_query=True) as cur:
query = cur.mogrify(
"""\
DELETE FROM public.sessions
@ -1213,7 +1215,7 @@ def delete_sessions_by_session_ids(session_ids):
def delete_sessions_by_user_ids(project_id, user_ids):
with pg_client.PostgresClient(long_query=True) as cur:
with pg_client.PostgresClient(unlimited_query=True) as cur:
query = cur.mogrify(
"""\
DELETE FROM public.sessions
@ -1227,6 +1229,6 @@ def delete_sessions_by_user_ids(project_id, user_ids):
def count_all():
with pg_client.PostgresClient(long_query=True) as cur:
with pg_client.PostgresClient(unlimited_query=True) as cur:
row = cur.execute(query="SELECT COUNT(session_id) AS count FROM public.sessions")
return row.get("count", 0)

View file

@ -5,14 +5,23 @@ from chalicelib.utils.s3 import client
def get_web(sessionId):
return client.generate_presigned_url(
'get_object',
Params={
'Bucket': config("sessions_bucket"),
'Key': str(sessionId)
},
ExpiresIn=100000
)
return [
client.generate_presigned_url(
'get_object',
Params={
'Bucket': config("sessions_bucket"),
'Key': str(sessionId)
},
ExpiresIn=100000
),
client.generate_presigned_url(
'get_object',
Params={
'Bucket': config("sessions_bucket"),
'Key': str(sessionId) + "e"
},
ExpiresIn=100000
)]
def get_ios(sessionId):

View file

@ -24,7 +24,6 @@ T_VALUES = {1: 12.706, 2: 4.303, 3: 3.182, 4: 2.776, 5: 2.571, 6: 2.447, 7: 2.36
21: 2.080, 22: 2.074, 23: 2.069, 25: 2.064, 26: 2.060, 27: 2.056, 28: 2.052, 29: 2.045, 30: 2.042}
def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]:
"""
Add minimal timestamp
@ -293,7 +292,6 @@ def pearson_corr(x: list, y: list):
return r, confidence, False
def get_transitions_and_issues_of_each_type(rows: List[RealDictRow], all_issues_with_context, first_stage, last_stage):
"""
Returns two lists with binary values 0/1:
@ -363,7 +361,6 @@ def get_transitions_and_issues_of_each_type(rows: List[RealDictRow], all_issues_
return transitions, errors, all_errors, n_sess_affected
def get_affected_users_for_all_issues(rows, first_stage, last_stage):
"""
@ -415,7 +412,6 @@ def get_affected_users_for_all_issues(rows, first_stage, last_stage):
return all_issues_with_context, n_issues_dict, n_affected_users_dict, n_affected_sessions_dict, contexts
def count_sessions(rows, n_stages):
session_counts = {i: set() for i in range(1, n_stages + 1)}
for ind, row in enumerate(rows):
@ -467,7 +463,6 @@ def get_stages(stages, rows):
return stages_list
def get_issues(stages, rows, first_stage=None, last_stage=None, drop_only=False):
"""
@ -544,7 +539,6 @@ def get_issues(stages, rows, first_stage=None, last_stage=None, drop_only=False)
return n_critical_issues, issues_dict, total_drop_due_to_issues
def get_top_insights(filter_d, project_id):
output = []
stages = filter_d.get("events", [])
@ -582,9 +576,8 @@ def get_top_insights(filter_d, project_id):
return stages_list, total_drop_due_to_issues
def get_issues_list(filter_d, project_id, first_stage=None, last_stage=None):
output = dict({'critical_issues_count': 0})
output = dict({"total_drop_due_to_issues": 0, "critical_issues_count": 0, "significant": [], "insignificant": []})
stages = filter_d.get("events", [])
# The result of the multi-stage query
rows = get_stages_and_events(filter_d=filter_d, project_id=project_id)

View file

@ -67,8 +67,8 @@ def create_step1(data: schemas.UserSignupSchema):
}
query = f"""\
WITH t AS (
INSERT INTO public.tenants (name, version_number, edition)
VALUES (%(organizationName)s, (SELECT openreplay_version()), 'fos')
INSERT INTO public.tenants (name, version_number)
VALUES (%(organizationName)s, (SELECT openreplay_version()))
RETURNING api_key
),
u AS (
@ -77,8 +77,8 @@ def create_step1(data: schemas.UserSignupSchema):
RETURNING user_id,email,role,name
),
au AS (INSERT
INTO public.basic_authentication (user_id, password, generated_password)
VALUES ((SELECT user_id FROM u), crypt(%(password)s, gen_salt('bf', 12)), FALSE)
INTO public.basic_authentication (user_id, password)
VALUES ((SELECT user_id FROM u), crypt(%(password)s, gen_salt('bf', 12)))
)
INSERT INTO public.projects (name, active)
VALUES (%(projectName)s, TRUE)

View file

@ -1,13 +1,15 @@
from chalicelib.utils import pg_client
import requests
from chalicelib.core import license
def process_data(data, edition='fos'):
def process_data(data):
return {
'edition': edition,
'edition': license.EDITION,
'tracking': data["opt_out"],
'version': data["version_number"],
'user_id': data["user_id"],
'user_id': data["tenant_key"],
'tenant_key': data["tenant_key"],
'owner_email': None if data["opt_out"] else data["email"],
'organization_name': None if data["opt_out"] else data["name"],
'users_count': data["t_users"],
@ -27,7 +29,7 @@ def compute():
t_projects=COALESCE((SELECT COUNT(*) FROM public.projects WHERE deleted_at ISNULL), 0),
t_sessions=COALESCE((SELECT COUNT(*) FROM public.sessions), 0),
t_users=COALESCE((SELECT COUNT(*) FROM public.users WHERE deleted_at ISNULL), 0)
RETURNING name,t_integrations,t_projects,t_sessions,t_users,user_id,opt_out,
RETURNING name,t_integrations,t_projects,t_sessions,t_users,tenant_key,opt_out,
(SELECT openreplay_version()) AS version_number,(SELECT email FROM public.users WHERE role = 'owner' LIMIT 1);"""
)
data = cur.fetchone()
@ -39,6 +41,7 @@ def new_client():
cur.execute(
f"""SELECT *,
(SELECT email FROM public.users WHERE role='owner' LIMIT 1) AS email
FROM public.tenants;""")
FROM public.tenants
LIMIT 1;""")
data = cur.fetchone()
requests.post('https://api.openreplay.com/os/signup', json=process_data(data))

View file

@ -1,7 +1,7 @@
import schemas
from chalicelib.utils import pg_client
from chalicelib.utils import helper
from chalicelib.core import users
from chalicelib.core import users, license
def get_by_tenant_id(tenant_id):
@ -13,7 +13,7 @@ def get_by_tenant_id(tenant_id):
name,
api_key,
created_at,
edition,
'{license.EDITION}' AS edition,
version_number,
opt_out
FROM public.tenants
@ -67,7 +67,7 @@ def update(tenant_id, user_id, data: schemas.UpdateTenantSchema):
admin = users.get(user_id=user_id, tenant_id=tenant_id)
if not admin["admin"] and not admin["superAdmin"]:
return {"error": "unauthorized"}
return {"errors": ["unauthorized, needs admin or owner"]}
if data.name is None and data.opt_out is None:
return {"errors": ["please provide 'name' of 'optOut' attribute for update"]}
changes = {}

View file

@ -4,6 +4,7 @@ import secrets
from decouple import config
from fastapi import BackgroundTasks
import schemas
from chalicelib.core import authorizers, metadata, projects
from chalicelib.core import tenants, assist
from chalicelib.utils import dev, email_helper
@ -21,10 +22,10 @@ def create_new_member(email, invitation_token, admin, name, owner=False):
query = cur.mogrify(f"""\
WITH u AS (INSERT INTO public.users (email, role, name, data)
VALUES (%(email)s, %(role)s, %(name)s, %(data)s)
RETURNING user_id,email,role,name,appearance
RETURNING user_id,email,role,name
),
au AS (INSERT INTO public.basic_authentication (user_id, generated_password, invitation_token, invited_at)
VALUES ((SELECT user_id FROM u), TRUE, %(invitation_token)s, timezone('utc'::text, now()))
au AS (INSERT INTO public.basic_authentication (user_id, invitation_token, invited_at)
VALUES ((SELECT user_id FROM u), %(invitation_token)s, timezone('utc'::text, now()))
RETURNING invitation_token
)
SELECT u.user_id,
@ -32,7 +33,6 @@ def create_new_member(email, invitation_token, admin, name, owner=False):
u.email,
u.role,
u.name,
TRUE AS change_password,
(CASE WHEN u.role = 'owner' THEN TRUE ELSE FALSE END) AS super_admin,
(CASE WHEN u.role = 'admin' THEN TRUE ELSE FALSE END) AS admin,
(CASE WHEN u.role = 'member' THEN TRUE ELSE FALSE END) AS member,
@ -61,7 +61,6 @@ def restore_member(user_id, email, invitation_token, admin, name, owner=False):
email,
role,
name,
TRUE AS change_password,
(CASE WHEN role = 'owner' THEN TRUE ELSE FALSE END) AS super_admin,
(CASE WHEN role = 'admin' THEN TRUE ELSE FALSE END) AS admin,
(CASE WHEN role = 'member' THEN TRUE ELSE FALSE END) AS member;""",
@ -73,8 +72,7 @@ def restore_member(user_id, email, invitation_token, admin, name, owner=False):
result = cur.fetchone()
query = cur.mogrify("""\
UPDATE public.basic_authentication
SET generated_password = TRUE,
invitation_token = %(invitation_token)s,
SET invitation_token = %(invitation_token)s,
invited_at = timezone('utc'::text, now()),
change_pwd_expire_at = NULL,
change_pwd_token = NULL
@ -132,11 +130,7 @@ def update(tenant_id, user_id, changes):
else:
sub_query_bauth.append(f"{helper.key_to_snake_case(key)} = %({key})s")
else:
if key == "appearance":
sub_query_users.append(f"appearance = %(appearance)s::jsonb")
changes["appearance"] = json.dumps(changes[key])
else:
sub_query_users.append(f"{helper.key_to_snake_case(key)} = %({key})s")
sub_query_users.append(f"{helper.key_to_snake_case(key)} = %({key})s")
with pg_client.PostgresClient() as cur:
if len(sub_query_users) > 0:
@ -151,11 +145,9 @@ def update(tenant_id, user_id, changes):
users.email,
users.role,
users.name,
basic_authentication.generated_password AS change_password,
(CASE WHEN users.role = 'owner' THEN TRUE ELSE FALSE END) AS super_admin,
(CASE WHEN users.role = 'admin' THEN TRUE ELSE FALSE END) AS admin,
(CASE WHEN users.role = 'member' THEN TRUE ELSE FALSE END) AS member,
users.appearance;""",
(CASE WHEN users.role = 'member' THEN TRUE ELSE FALSE END) AS member;""",
{"user_id": user_id, **changes})
)
if len(sub_query_bauth) > 0:
@ -170,11 +162,9 @@ def update(tenant_id, user_id, changes):
users.email,
users.role,
users.name,
basic_authentication.generated_password AS change_password,
(CASE WHEN users.role = 'owner' THEN TRUE ELSE FALSE END) AS super_admin,
(CASE WHEN users.role = 'admin' THEN TRUE ELSE FALSE END) AS admin,
(CASE WHEN users.role = 'member' THEN TRUE ELSE FALSE END) AS member,
users.appearance;""",
(CASE WHEN users.role = 'member' THEN TRUE ELSE FALSE END) AS member;""",
{"user_id": user_id, **changes})
)
@ -244,16 +234,15 @@ def get(user_id, tenant_id):
cur.execute(
cur.mogrify(
f"""SELECT
users.user_id AS id,
users.user_id,
email,
role,
name,
basic_authentication.generated_password,
name,
(CASE WHEN role = 'owner' THEN TRUE ELSE FALSE END) AS super_admin,
(CASE WHEN role = 'admin' THEN TRUE ELSE FALSE END) AS admin,
(CASE WHEN role = 'member' THEN TRUE ELSE FALSE END) AS member,
appearance,
api_key
api_key,
TRUE AS has_password
FROM public.users LEFT JOIN public.basic_authentication ON users.user_id=basic_authentication.user_id
WHERE
users.user_id = %(userId)s
@ -262,7 +251,7 @@ def get(user_id, tenant_id):
{"userId": user_id})
)
r = cur.fetchone()
return helper.dict_to_camel_case(r, ignore_keys=["appearance"])
return helper.dict_to_camel_case(r)
def generate_new_api_key(user_id):
@ -281,45 +270,39 @@ def generate_new_api_key(user_id):
return helper.dict_to_camel_case(r)
def edit(user_id_to_update, tenant_id, changes, editor_id):
ALLOW_EDIT = ["name", "email", "admin", "appearance"]
def edit(user_id_to_update, tenant_id, changes: schemas.EditUserSchema, editor_id):
user = get(user_id=user_id_to_update, tenant_id=tenant_id)
if editor_id != user_id_to_update or "admin" in changes and changes["admin"] != user["admin"]:
if editor_id != user_id_to_update or changes.admin is not None and changes.admin != user["admin"]:
admin = get(tenant_id=tenant_id, user_id=editor_id)
if not admin["superAdmin"] and not admin["admin"]:
return {"errors": ["unauthorized"]}
_changes = {}
if editor_id == user_id_to_update:
if user["superAdmin"]:
changes.pop("admin")
elif user["admin"] != changes["admin"]:
return {"errors": ["cannot change your own role"]}
if changes.admin is not None:
if user["superAdmin"]:
changes.admin = None
elif changes.admin != user["admin"]:
return {"errors": ["cannot change your own role"]}
keys = list(changes.keys())
for k in keys:
if k not in ALLOW_EDIT or changes[k] is None:
changes.pop(k)
keys = list(changes.keys())
if changes.email is not None and changes.email != user["email"]:
if email_exists(changes.email):
return {"errors": ["email already exists."]}
if get_deleted_user_by_email(changes.email) is not None:
return {"errors": ["email previously deleted."]}
_changes["email"] = changes.email
if len(keys) > 0:
if "email" in keys and changes["email"] != user["email"]:
if email_exists(changes["email"]):
return {"errors": ["email already exists."]}
if get_deleted_user_by_email(changes["email"]) is not None:
return {"errors": ["email previously deleted."]}
if "admin" in keys:
changes["role"] = "admin" if changes.pop("admin") else "member"
if len(changes.keys()) > 0:
updated_user = update(tenant_id=tenant_id, user_id=user_id_to_update, changes=changes)
if changes.name is not None and len(changes.name) > 0:
_changes["name"] = changes.name
return {"data": updated_user}
if changes.admin is not None:
_changes["role"] = "admin" if changes.admin else "member"
if len(_changes.keys()) > 0:
updated_user = update(tenant_id=tenant_id, user_id=user_id_to_update, changes=_changes)
return {"data": updated_user}
return {"data": user}
def edit_appearance(user_id, tenant_id, changes):
updated_user = update(tenant_id=tenant_id, user_id=user_id, changes=changes)
return {"data": updated_user}
def get_by_email_only(email):
with pg_client.PostgresClient() as cur:
cur.execute(
@ -329,8 +312,7 @@ def get_by_email_only(email):
1 AS tenant_id,
users.email,
users.role,
users.name,
basic_authentication.generated_password,
users.name,
(CASE WHEN users.role = 'owner' THEN TRUE ELSE FALSE END) AS super_admin,
(CASE WHEN users.role = 'admin' THEN TRUE ELSE FALSE END) AS admin,
(CASE WHEN users.role = 'member' THEN TRUE ELSE FALSE END) AS member
@ -353,8 +335,7 @@ def get_by_email_reset(email, reset_token):
1 AS tenant_id,
users.email,
users.role,
users.name,
basic_authentication.generated_password,
users.name,
(CASE WHEN users.role = 'owner' THEN TRUE ELSE FALSE END) AS super_admin,
(CASE WHEN users.role = 'admin' THEN TRUE ELSE FALSE END) AS admin,
(CASE WHEN users.role = 'member' THEN TRUE ELSE FALSE END) AS member
@ -377,7 +358,7 @@ def get_members(tenant_id):
users.email,
users.role,
users.name,
basic_authentication.generated_password,
users.created_at,
(CASE WHEN users.role = 'owner' THEN TRUE ELSE FALSE END) AS super_admin,
(CASE WHEN users.role = 'admin' THEN TRUE ELSE FALSE END) AS admin,
(CASE WHEN users.role = 'member' THEN TRUE ELSE FALSE END) AS member,
@ -393,6 +374,7 @@ def get_members(tenant_id):
if len(r):
r = helper.list_to_camel_case(r)
for u in r:
u["createdAt"] = TimeUTC.datetime_to_timestamp(u["createdAt"])
if u["invitationToken"]:
u["invitationLink"] = __get_invitation_link(u.pop("invitationToken"))
else:
@ -562,28 +544,26 @@ def auth_exists(user_id, tenant_id, jwt_iat, jwt_aud):
{"userId": user_id})
)
r = cur.fetchone()
return r is not None \
and r.get("jwt_iat") is not None \
and (abs(jwt_iat - TimeUTC.datetime_to_timestamp(r["jwt_iat"]) // 1000) <= 1 \
or (jwt_aud.startswith("plugin") \
and (r["changed_at"] is None \
or jwt_iat >= (TimeUTC.datetime_to_timestamp(r["changed_at"]) // 1000)))
)
return r is not None \
and r.get("jwt_iat") is not None \
and (abs(jwt_iat - TimeUTC.datetime_to_timestamp(r["jwt_iat"]) // 1000) <= 1 \
or (jwt_aud.startswith("plugin") \
and (r["changed_at"] is None \
or jwt_iat >= (TimeUTC.datetime_to_timestamp(r["changed_at"]) // 1000)))
)
def authenticate(email, password, for_change_password=False, for_plugin=False):
with pg_client.PostgresClient() as cur:
query = cur.mogrify(
f"""SELECT
users.user_id AS id,
users.user_id,
1 AS tenant_id,
users.role,
users.name,
basic_authentication.generated_password AS change_password,
(CASE WHEN users.role = 'owner' THEN TRUE ELSE FALSE END) AS super_admin,
(CASE WHEN users.role = 'admin' THEN TRUE ELSE FALSE END) AS admin,
(CASE WHEN users.role = 'member' THEN TRUE ELSE FALSE END) AS member,
users.appearance
(CASE WHEN users.role = 'member' THEN TRUE ELSE FALSE END) AS member
FROM public.users INNER JOIN public.basic_authentication USING(user_id)
WHERE users.email = %(email)s
AND basic_authentication.password = crypt(%(password)s, basic_authentication.password)
@ -597,16 +577,16 @@ def authenticate(email, password, for_change_password=False, for_plugin=False):
if r is not None:
if for_change_password:
return True
r = helper.dict_to_camel_case(r, ignore_keys=["appearance"])
r = helper.dict_to_camel_case(r)
query = cur.mogrify(
f"""UPDATE public.users
SET jwt_iat = timezone('utc'::text, now())
WHERE user_id = %(user_id)s
RETURNING jwt_iat;""",
{"user_id": r["id"]})
{"user_id": r["userId"]})
cur.execute(query)
return {
"jwt": authorizers.generate_jwt(r['id'], r['tenantId'],
"jwt": authorizers.generate_jwt(r['userId'], r['tenantId'],
TimeUTC.datetime_to_timestamp(cur.fetchone()["jwt_iat"]),
aud=f"plugin:{helper.get_stage_name()}" if for_plugin else f"front:{helper.get_stage_name()}"),
"email": email,

View file

@ -29,8 +29,12 @@ def edit_config(user_id, weekly_report):
def cron():
if not helper.has_smtp():
print("!!! No SMTP configuration found, ignoring weekly report")
return
with pg_client.PostgresClient(long_query=True) as cur:
params = {"3_days_ago": TimeUTC.midnight(delta_days=-3),
params = {"tomorrow": TimeUTC.midnight(delta_days=1),
"3_days_ago": TimeUTC.midnight(delta_days=-3),
"1_week_ago": TimeUTC.midnight(delta_days=-7),
"2_week_ago": TimeUTC.midnight(delta_days=-14),
"5_week_ago": TimeUTC.midnight(delta_days=-35)}
@ -43,18 +47,18 @@ def cron():
COALESCE(week_0_issues.count, 0) AS this_week_issues_count,
COALESCE(week_1_issues.count, 0) AS past_week_issues_count,
COALESCE(month_1_issues.count, 0) AS past_month_issues_count
FROM public.projects
FROM (SELECT project_id, name FROM public.projects WHERE projects.deleted_at ISNULL) AS projects
INNER JOIN LATERAL (
SELECT sessions.project_id
FROM public.sessions
WHERE sessions.project_id = projects.project_id
AND start_ts >= %(3_days_ago)s
AND start_ts < %(tomorrow)s
LIMIT 1) AS recently_active USING (project_id)
INNER JOIN LATERAL (
SELECT COALESCE(ARRAY_AGG(email), '{}') AS emails
FROM public.users
WHERE users.tenant_id = projects.tenant_id
AND users.deleted_at ISNULL
WHERE users.deleted_at ISNULL
AND users.weekly_report
) AS users ON (TRUE)
LEFT JOIN LATERAL (
@ -62,25 +66,25 @@ def cron():
FROM events_common.issues
INNER JOIN public.sessions USING (session_id)
WHERE sessions.project_id = projects.project_id
AND issues.timestamp >= (EXTRACT(EPOCH FROM DATE_TRUNC('day', now()) - INTERVAL '1 week') * 1000)::BIGINT
AND issues.timestamp >= %(1_week_ago)s
AND issues.timestamp < %(tomorrow)s
) AS week_0_issues ON (TRUE)
LEFT JOIN LATERAL (
SELECT COUNT(1) AS count
FROM events_common.issues
INNER JOIN public.sessions USING (session_id)
WHERE sessions.project_id = projects.project_id
AND issues.timestamp <= (EXTRACT(EPOCH FROM DATE_TRUNC('day', now()) - INTERVAL '1 week') * 1000)::BIGINT
AND issues.timestamp >= (EXTRACT(EPOCH FROM DATE_TRUNC('day', now()) - INTERVAL '2 week') * 1000)::BIGINT
AND issues.timestamp <= %(1_week_ago)s
AND issues.timestamp >= %(2_week_ago)s
) AS week_1_issues ON (TRUE)
LEFT JOIN LATERAL (
SELECT COUNT(1) AS count
FROM events_common.issues
INNER JOIN public.sessions USING (session_id)
WHERE sessions.project_id = projects.project_id
AND issues.timestamp <= (EXTRACT(EPOCH FROM DATE_TRUNC('day', now()) - INTERVAL '1 week') * 1000)::BIGINT
AND issues.timestamp >= (EXTRACT(EPOCH FROM DATE_TRUNC('day', now()) - INTERVAL '5 week') * 1000)::BIGINT
) AS month_1_issues ON (TRUE)
WHERE projects.deleted_at ISNULL;"""), params)
AND issues.timestamp <= %(1_week_ago)s
AND issues.timestamp >= %(5_week_ago)s
) AS month_1_issues ON (TRUE);"""), params)
projects_data = cur.fetchall()
emails_to_send = []
for p in projects_data:

View file

@ -1,12 +1,13 @@
import math
import random
import re
import string
from typing import Union
import math
import requests
import schemas
from chalicelib.utils.TimeUTC import TimeUTC
local_prefix = 'local-'
from decouple import config
@ -364,10 +365,6 @@ def has_smtp():
return config("EMAIL_HOST") is not None and len(config("EMAIL_HOST")) > 0
def get_edition():
return "ee" if "ee" in config("ENTERPRISE_BUILD", default="").lower() else "foss"
def old_search_payload_to_flat(values):
# in case the old search body was passed
if values.get("events") is not None:
@ -384,3 +381,20 @@ def custom_alert_to_front(values):
if values.get("seriesId") is not None and values["query"]["left"] == schemas.AlertColumn.custom:
values["query"]["left"] = values["seriesId"]
return values
def __time_value(row):
row["unit"] = schemas.TemplatePredefinedUnits.millisecond
factor = 1
if row["value"] > TimeUTC.MS_MINUTE:
row["value"] = row["value"] / TimeUTC.MS_MINUTE
row["unit"] = schemas.TemplatePredefinedUnits.minute
factor = TimeUTC.MS_MINUTE
elif row["value"] > 1 * 1000:
row["value"] = row["value"] / 1000
row["unit"] = schemas.TemplatePredefinedUnits.second
factor = 1000
if "chart" in row and factor > 1:
for r in row["chart"]:
r["value"] /= factor

View file

@ -52,7 +52,9 @@ def make_pool():
except (Exception, psycopg2.DatabaseError) as error:
print("Error while closing all connexions to PostgreSQL", error)
try:
postgreSQL_pool = ORThreadedConnectionPool(config("pg_minconn", cast=int, default=20), 100, **PG_CONFIG)
postgreSQL_pool = ORThreadedConnectionPool(config("pg_minconn", cast=int, default=20),
config("pg_maxconn", cast=int, default=80),
**PG_CONFIG)
if (postgreSQL_pool):
print("Connection pool created successfully")
except (Exception, psycopg2.DatabaseError) as error:
@ -74,12 +76,17 @@ class PostgresClient:
cursor = None
long_query = False
def __init__(self, long_query=False):
def __init__(self, long_query=False, unlimited_query=False):
self.long_query = long_query
if long_query:
if unlimited_query:
long_config = dict(_PG_CONFIG)
long_config["application_name"] += "-UNLIMITED"
self.connection = psycopg2.connect(**long_config)
elif long_query:
long_config = dict(_PG_CONFIG)
long_config["application_name"] += "-LONG"
self.connection = psycopg2.connect(**_PG_CONFIG)
long_config["options"] = f"-c statement_timeout={config('pg_long_timeout', cast=int, default=5*60) * 1000}"
self.connection = psycopg2.connect(**long_config)
else:
self.connection = postgreSQL_pool.getconn()

View file

@ -5,11 +5,14 @@ import boto3
import botocore
from botocore.client import Config
client = boto3.client('s3', endpoint_url=config("S3_HOST"),
aws_access_key_id=config("S3_KEY"),
aws_secret_access_key=config("S3_SECRET"),
config=Config(signature_version='s3v4'),
region_name=config("sessions_region"))
if not config("S3_HOST", default=False):
client = boto3.client('s3')
else:
client = boto3.client('s3', endpoint_url=config("S3_HOST"),
aws_access_key_id=config("S3_KEY"),
aws_secret_access_key=config("S3_SECRET"),
config=Config(signature_version='s3v4'),
region_name=config("sessions_region"))
def exists(bucket, key):

30
api/development.md Normal file
View file

@ -0,0 +1,30 @@
### Prerequisites
- [Vagrant](../scripts/vagrant/README.md)
- Python 3.9
- Pipenv
### Development environment
```bash
cd openreplay/api
# Make your own copy of .env file and edit it as you want
cp .env.dev .env
# Create a .venv folder to contain all you dependencies
mkdir .venv
# Installing dependencies (pipenv will detect the .venv folder and use it as a target)
pipenv install -r requirements.txt [--skip-lock]
```
### Building and deploying locally
```bash
cd openreplay-contributions
vagrant ssh
cd openreplay-dev/openreplay/scripts/helmcharts
# For complete list of options
# bash local_deploy.sh help
bash local_deploy.sh api
```

View file

@ -33,7 +33,9 @@ class ORRoute(APIRoute):
if isinstance(response, JSONResponse):
response: JSONResponse = response
body = json.loads(response.body.decode('utf8'))
if response.status_code == 200 and body is not None and body.get("errors") is not None:
if response.status_code == 200 \
and body is not None and isinstance(body, dict) \
and body.get("errors") is not None:
if "not found" in body["errors"][0]:
response.status_code = status.HTTP_404_NOT_FOUND
else:

View file

@ -1,15 +1,15 @@
requests==2.26.0
urllib3==1.26.6
boto3==1.16.1
pyjwt==1.7.1
psycopg2-binary==2.8.6
elasticsearch==7.9.1
jira==3.1.1
requests==2.28.0
urllib3==1.26.9
boto3==1.24.11
pyjwt==2.4.0
psycopg2-binary==2.9.3
elasticsearch==8.2.3
jira==3.2.0
fastapi==0.75.0
uvicorn[standard]==0.17.5
fastapi==0.78.0
uvicorn[standard]==0.17.6
python-decouple==3.6
pydantic[email]==1.8.2
apscheduler==3.8.1
pydantic[email]==1.9.1
apscheduler==3.9.1

View file

@ -1,7 +1,8 @@
from typing import Union
from typing import Union, Optional
from decouple import config
from fastapi import Depends, Body, BackgroundTasks
from fastapi import Depends, Body, BackgroundTasks, HTTPException
from starlette import status
import schemas
from chalicelib.core import log_tool_rollbar, sourcemaps, events, sessions_assignments, projects, \
@ -13,7 +14,7 @@ from chalicelib.core import log_tool_rollbar, sourcemaps, events, sessions_assig
assist, heatmaps, mobile, signup, tenants, errors_favorite_viewed, boarding, notifications, webhook, users, \
custom_metrics, saved_search
from chalicelib.core.collaboration_slack import Slack
from chalicelib.utils import email_helper
from chalicelib.utils import email_helper, helper, captcha
from chalicelib.utils.TimeUTC import TimeUTC
from or_dependencies import OR_context
from routers.base import get_routers
@ -21,6 +22,34 @@ from routers.base import get_routers
public_app, app, app_apikey = get_routers()
@public_app.post('/login', tags=["authentication"])
def login(data: schemas.UserLoginSchema = Body(...)):
if helper.allow_captcha() and not captcha.is_valid(data.g_recaptcha_response):
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid captcha."
)
r = users.authenticate(data.email, data.password, for_plugin=False)
if r is None:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Youve entered invalid Email or Password."
)
if "errors" in r:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail=r["errors"][0]
)
r["smtp"] = helper.has_smtp()
return {
'jwt': r.pop('jwt'),
'data': {
"user": r
}
}
@app.get('/{projectId}/sessions/{sessionId}', tags=["sessions"])
@app.get('/{projectId}/sessions2/{sessionId}', tags=["sessions"])
def get_session2(projectId: int, sessionId: Union[int, str], background_tasks: BackgroundTasks,
@ -107,10 +136,12 @@ def events_search(projectId: int, q: str,
type: Union[schemas.FilterType, schemas.EventType,
schemas.PerformanceEventType, schemas.FetchFilterType,
schemas.GraphqlFilterType] = None,
key: str = None,
source: str = None, context: schemas.CurrentContext = Depends(OR_context)):
key: str = None, source: str = None, live: bool = False,
context: schemas.CurrentContext = Depends(OR_context)):
if len(q) == 0:
return {"data": []}
if live:
return assist.autocomplete(project_id=projectId, q=q, key=key)
if type in [schemas.FetchFilterType._url]:
type = schemas.EventType.request
elif type in [schemas.GraphqlFilterType._name]:
@ -743,8 +774,8 @@ def get_funnel_sessions_on_the_fly(projectId: int, funnelId: int, data: schemas.
@app.get('/{projectId}/funnels/issues/{issueId}/sessions', tags=["funnels"])
def get_issue_sessions(projectId: int, issueId: str, startDate: int = None, endDate: int = None,
context: schemas.CurrentContext = Depends(OR_context)):
def get_funnel_issue_sessions(projectId: int, issueId: str, startDate: int = None, endDate: int = None,
context: schemas.CurrentContext = Depends(OR_context)):
issue = issues.get(project_id=projectId, issue_id=issueId)
if issue is None:
return {"errors": ["issue not found"]}
@ -830,7 +861,14 @@ def all_issue_types(context: schemas.CurrentContext = Depends(OR_context)):
@app.get('/{projectId}/assist/sessions', tags=["assist"])
def sessions_live(projectId: int, userId: str = None, context: schemas.CurrentContext = Depends(OR_context)):
data = assist.get_live_sessions_ws(projectId, user_id=userId)
data = assist.get_live_sessions_ws_user_id(projectId, user_id=userId)
return {'data': data}
@app.post('/{projectId}/assist/sessions', tags=["assist"])
def sessions_live(projectId: int, data: schemas.LiveSessionsSearchPayloadSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
data = assist.get_live_sessions_ws(projectId, body=data)
return {'data': data}
@ -903,7 +941,7 @@ def edit_client(data: schemas.UpdateTenantSchema = Body(...),
@app.post('/{projectId}/errors/search', tags=['errors'])
def errors_search(projectId: int, data: schemas.SearchErrorsSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
return errors.search(data, projectId, user_id=context.user_id)
return {"data": errors.search(data, projectId, user_id=context.user_id)}
@app.get('/{projectId}/errors/stats', tags=['errors'])
@ -966,6 +1004,11 @@ def get_notifications(context: schemas.CurrentContext = Depends(OR_context)):
return {"data": notifications.get_all(tenant_id=context.tenant_id, user_id=context.user_id)}
@app.get('/notifications/count', tags=['notifications'])
def get_notifications_count(context: schemas.CurrentContext = Depends(OR_context)):
return {"data": notifications.get_all_count(tenant_id=context.tenant_id, user_id=context.user_id)}
@app.get('/notifications/{notificationId}/view', tags=['notifications'])
def view_notifications(notificationId: int, context: schemas.CurrentContext = Depends(OR_context)):
return {"data": notifications.view_notification(notification_ids=[notificationId], user_id=context.user_id)}
@ -1071,17 +1114,10 @@ def generate_new_user_token(context: schemas.CurrentContext = Depends(OR_context
@app.put('/account', tags=["account"])
def edit_account(data: schemas.EditUserSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
return users.edit(tenant_id=context.tenant_id, user_id_to_update=context.user_id, changes=data.dict(),
return users.edit(tenant_id=context.tenant_id, user_id_to_update=context.user_id, changes=data,
editor_id=context.user_id)
@app.post('/account/appearance', tags=["account"])
@app.put('/account/appearance', tags=["account"])
def edit_account_appearance(data: schemas.EditUserAppearanceSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
return users.edit_appearance(tenant_id=context.tenant_id, user_id=context.user_id, changes=data.dict())
@app.post('/account/password', tags=["account"])
@app.put('/account/password', tags=["account"])
def change_client_password(data: schemas.EditUserPasswordSchema = Body(...),

View file

@ -1,17 +1,15 @@
from typing import Optional
from decouple import config
from fastapi import Body, Depends, HTTPException, status, BackgroundTasks
from fastapi import Body, Depends, BackgroundTasks
from starlette.responses import RedirectResponse
import schemas
from chalicelib.core import assist
from chalicelib.core import integrations_manager
from chalicelib.core import sessions
from chalicelib.core import tenants, users, metadata, projects, license
from chalicelib.core import webhook
from chalicelib.core.collaboration_slack import Slack
from chalicelib.utils import captcha
from chalicelib.utils import helper
from or_dependencies import OR_context
from routers.base import get_routers
@ -24,60 +22,23 @@ def get_all_signup():
return {"data": {"tenants": tenants.tenants_exists(),
"sso": None,
"ssoProvider": None,
"edition": helper.get_edition()}}
@public_app.post('/login', tags=["authentication"])
def login(data: schemas.UserLoginSchema = Body(...)):
if helper.allow_captcha() and not captcha.is_valid(data.g_recaptcha_response):
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid captcha."
)
r = users.authenticate(data.email, data.password, for_plugin=False)
if r is None:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Youve entered invalid Email or Password."
)
tenant_id = r.pop("tenantId")
r["limits"] = {
"teamMember": -1,
"projects": -1,
"metadata": metadata.get_remaining_metadata_with_count(tenant_id)}
c = tenants.get_by_tenant_id(tenant_id)
c.pop("createdAt")
c["smtp"] = helper.has_smtp()
c["iceServers"] = assist.get_ice_servers()
r["smtp"] = c["smtp"]
r["iceServers"] = c["iceServers"]
return {
'jwt': r.pop('jwt'),
'data': {
"user": r,
"client": c
}
}
"edition": license.EDITION}}
@app.get('/account', tags=['accounts'])
def get_account(context: schemas.CurrentContext = Depends(OR_context)):
r = users.get(tenant_id=context.tenant_id, user_id=context.user_id)
t = tenants.get_by_tenant_id(context.tenant_id)
if t is not None:
t.pop("createdAt")
t["tenantName"] = t.pop("name")
return {
'data': {
**r,
"limits": {
"teamMember": -1,
"projects": -1,
"metadata": metadata.get_remaining_metadata_with_count(context.tenant_id)
},
**t,
**license.get_status(context.tenant_id),
"smtp": helper.has_smtp(),
"iceServers": assist.get_ice_servers()
# "iceServers": assist.get_ice_servers()
}
}
@ -181,7 +142,7 @@ def change_password_by_invitation(data: schemas.EditPasswordByInvitationSchema =
@app.post('/client/members/{memberId}', tags=["client"])
def edit_member(memberId: int, data: schemas.EditMemberSchema,
context: schemas.CurrentContext = Depends(OR_context)):
return users.edit(tenant_id=context.tenant_id, editor_id=context.user_id, changes=data.dict(),
return users.edit(tenant_id=context.tenant_id, editor_id=context.user_id, changes=data,
user_id_to_update=memberId)
@ -199,29 +160,25 @@ def search_sessions_by_metadata(key: str, value: str, projectId: Optional[int] =
m_key=key, project_id=projectId)}
@app.get('/plans', tags=["plan"])
def get_current_plan(context: schemas.CurrentContext = Depends(OR_context)):
return {
"data": license.get_status(context.tenant_id)
}
@public_app.get('/general_stats', tags=["private"], include_in_schema=False)
def get_general_stats():
return {"data": {"sessions:": sessions.count_all()}}
@app.get('/client', tags=['projects'])
def get_client(context: schemas.CurrentContext = Depends(OR_context)):
r = tenants.get_by_tenant_id(context.tenant_id)
if r is not None:
r.pop("createdAt")
return {
'data': r
}
@app.get('/projects', tags=['projects'])
def get_projects(context: schemas.CurrentContext = Depends(OR_context)):
return {"data": projects.get_projects(tenant_id=context.tenant_id, recording_state=True, gdpr=True, recorded=True,
stack_integrations=True)}
@app.get('/limits', tags=['accounts'])
def get_limits(context: schemas.CurrentContext = Depends(OR_context)):
return {
'data': {
"limits": {
"teamMember": -1,
"projects": -1,
"metadata": metadata.get_remaining_metadata_with_count(context.tenant_id)
},
}
}

View file

@ -1,7 +1,7 @@
from fastapi import Body, Depends
import schemas
from chalicelib.core import dashboards, custom_metrics
from chalicelib.core import dashboards, custom_metrics, funnels
from or_dependencies import OR_context
from routers.base import get_routers
@ -102,18 +102,29 @@ def get_templates(projectId: int, context: schemas.CurrentContext = Depends(OR_c
@app.put('/{projectId}/custom_metrics/try', tags=["customMetrics"])
def try_custom_metric(projectId: int, data: schemas.TryCustomMetricsPayloadSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
return {"data": custom_metrics.merged_live(project_id=projectId, data=data)}
return {"data": custom_metrics.merged_live(project_id=projectId, data=data, user_id=context.user_id)}
@app.post('/{projectId}/metrics/try/sessions', tags=["dashboard"])
@app.post('/{projectId}/custom_metrics/try/sessions', tags=["customMetrics"])
def try_custom_metric_sessions(projectId: int,
data: schemas.CustomMetricSessionsPayloadSchema = Body(...),
def try_custom_metric_sessions(projectId: int, data: schemas.CustomMetricSessionsPayloadSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
data = custom_metrics.try_sessions(project_id=projectId, user_id=context.user_id, data=data)
return {"data": data}
@app.post('/{projectId}/metrics/try/issues', tags=["dashboard"])
@app.post('/{projectId}/custom_metrics/try/issues', tags=["customMetrics"])
def try_custom_metric_funnel_issues(projectId: int, data: schemas.CustomMetricSessionsPayloadSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
if len(data.series) == 0:
return {"data": []}
data.series[0].filter.startDate = data.startTimestamp
data.series[0].filter.endDate = data.endTimestamp
data = funnels.get_issues_on_the_fly_widget(project_id=projectId, data=data.series[0].filter)
return {"data": data}
@app.post('/{projectId}/metrics', tags=["dashboard"])
@app.put('/{projectId}/metrics', tags=["dashboard"])
@app.post('/{projectId}/custom_metrics', tags=["customMetrics"])
@ -149,6 +160,42 @@ def get_custom_metric_sessions(projectId: int, metric_id: int,
return {"data": data}
@app.post('/{projectId}/metrics/{metric_id}/issues', tags=["dashboard"])
@app.post('/{projectId}/custom_metrics/{metric_id}/issues', tags=["customMetrics"])
def get_custom_metric_funnel_issues(projectId: int, metric_id: int,
data: schemas.CustomMetricSessionsPayloadSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
data = custom_metrics.get_funnel_issues(project_id=projectId, user_id=context.user_id, metric_id=metric_id,
data=data)
if data is None:
return {"errors": ["custom metric not found"]}
return {"data": data}
@app.post('/{projectId}/metrics/{metric_id}/issues/{issueId}/sessions', tags=["dashboard"])
@app.post('/{projectId}/custom_metrics/{metric_id}/issues/{issueId}/sessions', tags=["customMetrics"])
def get_metric_funnel_issue_sessions(projectId: int, metric_id: int, issueId: str,
data: schemas.CustomMetricSessionsPayloadSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
data = custom_metrics.get_funnel_sessions_by_issue(project_id=projectId, user_id=context.user_id,
metric_id=metric_id, issue_id=issueId, data=data)
if data is None:
return {"errors": ["custom metric not found"]}
return {"data": data}
@app.post('/{projectId}/metrics/{metric_id}/errors', tags=["dashboard"])
@app.post('/{projectId}/custom_metrics/{metric_id}/errors', tags=["customMetrics"])
def get_custom_metric_errors_list(projectId: int, metric_id: int,
data: schemas.CustomMetricSessionsPayloadSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
data = custom_metrics.get_errors_list(project_id=projectId, user_id=context.user_id, metric_id=metric_id,
data=data)
if data is None:
return {"errors": ["custom metric not found"]}
return {"data": data}
@app.post('/{projectId}/metrics/{metric_id}/chart', tags=["dashboard"])
@app.post('/{projectId}/custom_metrics/{metric_id}/chart', tags=["customMetrics"])
def get_custom_metric_chart(projectId: int, metric_id: int, data: schemas.CustomMetricChartPayloadSchema = Body(...),

View file

@ -12,7 +12,7 @@ def attribute_to_camel_case(snake_str):
def transform_email(email: str) -> str:
return email.lower() if isinstance(email, str) else email
return email.lower().strip() if isinstance(email, str) else email
class _Grecaptcha(BaseModel):
@ -37,16 +37,11 @@ class UserSignupSchema(UserLoginSchema):
class EditUserSchema(BaseModel):
name: Optional[str] = Field(None)
email: Optional[EmailStr] = Field(None)
admin: Optional[bool] = Field(False)
appearance: Optional[dict] = Field({})
admin: Optional[bool] = Field(None)
_transform_email = validator('email', pre=True, allow_reuse=True)(transform_email)
class EditUserAppearanceSchema(BaseModel):
appearance: dict = Field(...)
class ForgetPasswordPayloadSchema(_Grecaptcha):
email: EmailStr = Field(...)
@ -132,13 +127,11 @@ class CreateMemberSchema(BaseModel):
_transform_email = validator('email', pre=True, allow_reuse=True)(transform_email)
class EditMemberSchema(BaseModel):
class EditMemberSchema(EditUserSchema):
name: str = Field(...)
email: EmailStr = Field(...)
admin: bool = Field(False)
_transform_email = validator('email', pre=True, allow_reuse=True)(transform_email)
class EditPasswordByInvitationSchema(BaseModel):
invitation: str = Field(...)
@ -486,6 +479,10 @@ class IssueType(str, Enum):
js_exception = 'js_exception'
class MetricFormatType(str, Enum):
session_count = 'sessionCount'
class __MixedSearchFilter(BaseModel):
is_event: bool = Field(...)
@ -618,17 +615,28 @@ class _PaginatedSchema(BaseModel):
page: int = Field(default=1, gt=0)
class SortOrderType(str, Enum):
asc = "ASC"
desc = "DESC"
class SessionsSearchPayloadSchema(_PaginatedSchema):
events: List[_SessionSearchEventSchema] = Field([])
filters: List[SessionSearchFilterSchema] = Field([])
startDate: int = Field(None)
endDate: int = Field(None)
sort: str = Field(default="startTs")
order: Literal["asc", "desc"] = Field(default="desc")
order: SortOrderType = Field(default=SortOrderType.desc)
events_order: Optional[SearchEventOrder] = Field(default=SearchEventOrder._then)
group_by_user: bool = Field(default=False)
bookmarked: bool = Field(default=False)
@root_validator(pre=True)
def transform_order(cls, values):
if values.get("order") is not None:
values["order"] = values["order"].upper()
return values
class Config:
alias_generator = attribute_to_camel_case
@ -757,8 +765,7 @@ class MobileSignPayloadSchema(BaseModel):
keys: List[str] = Field(...)
class CustomMetricSeriesFilterSchema(FlatSessionsSearchPayloadSchema):
# class CustomMetricSeriesFilterSchema(SessionsSearchPayloadSchema):
class CustomMetricSeriesFilterSchema(FlatSessionsSearchPayloadSchema, SearchErrorsSchema):
startDate: Optional[int] = Field(None)
endDate: Optional[int] = Field(None)
sort: Optional[str] = Field(None)
@ -790,6 +797,8 @@ class MetricTableViewType(str, Enum):
class MetricType(str, Enum):
timeseries = "timeseries"
table = "table"
predefined = "predefined"
funnel = "funnel"
class TableMetricOfType(str, Enum):
@ -800,6 +809,8 @@ class TableMetricOfType(str, Enum):
user_id = FilterType.user_id.value
issues = FilterType.issue.value
visited_url = EventType.location.value
sessions = "SESSIONS"
errors = IssueType.js_exception.value
class TimeseriesMetricOfType(str, Enum):
@ -815,7 +826,7 @@ class CustomMetricSessionsPayloadSchema(FlatSessionsSearch, _PaginatedSchema):
alias_generator = attribute_to_camel_case
class CustomMetricChartPayloadSchema(CustomMetricSessionsPayloadSchema):
class CustomMetricChartPayloadSchema(CustomMetricSessionsPayloadSchema, _PaginatedSchema):
density: int = Field(7)
class Config:
@ -830,7 +841,7 @@ class TryCustomMetricsPayloadSchema(CustomMetricChartPayloadSchema):
metric_type: MetricType = Field(MetricType.timeseries)
metric_of: Union[TableMetricOfType, TimeseriesMetricOfType] = Field(TableMetricOfType.user_id)
metric_value: List[IssueType] = Field([])
metric_format: Optional[str] = Field(None)
metric_format: Optional[MetricFormatType] = Field(None)
# metricFraction: float = Field(None, gt=0, lt=1)
# This is used to handle wrong values sent by the UI
@ -863,8 +874,23 @@ class TryCustomMetricsPayloadSchema(CustomMetricChartPayloadSchema):
alias_generator = attribute_to_camel_case
class CustomMetricsConfigSchema(BaseModel):
col: Optional[int] = Field(default=2)
row: Optional[int] = Field(default=2)
position: Optional[int] = Field(default=0)
class CreateCustomMetricsSchema(TryCustomMetricsPayloadSchema):
series: List[CustomMetricCreateSeriesSchema] = Field(..., min_items=1)
config: CustomMetricsConfigSchema = Field(default=CustomMetricsConfigSchema())
@root_validator(pre=True)
def transform_series(cls, values):
if values.get("series") is not None and len(values["series"]) > 1 and values.get(
"metric_type") == MetricType.funnel.value:
values["series"] = [values["series"][0]]
return values
class CustomMetricUpdateSeriesSchema(CustomMetricCreateSeriesSchema):
@ -888,6 +914,7 @@ class SavedSearchSchema(FunnelSchema):
class CreateDashboardSchema(BaseModel):
name: str = Field(..., min_length=1)
description: Optional[str] = Field(default='')
is_public: bool = Field(default=False)
is_pinned: bool = Field(default=False)
metrics: Optional[List[int]] = Field(default=[])
@ -966,6 +993,7 @@ class TemplatePredefinedKeys(str, Enum):
class TemplatePredefinedUnits(str, Enum):
millisecond = "ms"
second = "s"
minute = "min"
memory = "mb"
frame = "f/s"
@ -980,3 +1008,62 @@ class CustomMetricAndTemplate(BaseModel):
class Config:
alias_generator = attribute_to_camel_case
class LiveFilterType(str, Enum):
user_os = FilterType.user_os.value
user_browser = FilterType.user_browser.value
user_device = FilterType.user_device.value
user_country = FilterType.user_country.value
user_id = FilterType.user_id.value
user_anonymous_id = FilterType.user_anonymous_id.value
rev_id = FilterType.rev_id.value
platform = FilterType.platform.value
page_title = "PAGETITLE"
session_id = "SESSIONID"
metadata = "METADATA"
user_UUID = "USERUUID"
tracker_version = "TRACKERVERSION"
user_browser_version = "USERBROWSERVERSION"
user_device_type = "USERDEVICETYPE",
class LiveSessionSearchFilterSchema(BaseModel):
value: Union[List[str], str] = Field(...)
type: LiveFilterType = Field(...)
source: Optional[str] = Field(None)
@root_validator
def validator(cls, values):
if values.get("type") is not None and values["type"] == LiveFilterType.metadata.value:
assert values.get("source") is not None, "source should not be null for METADATA type"
assert len(values.get("source")) > 0, "source should not be empty for METADATA type"
return values
class LiveSessionsSearchPayloadSchema(_PaginatedSchema):
filters: List[LiveSessionSearchFilterSchema] = Field([])
sort: Union[LiveFilterType, str] = Field(default="TIMESTAMP")
order: SortOrderType = Field(default=SortOrderType.desc)
@root_validator(pre=True)
def transform(cls, values):
if values.get("order") is not None:
values["order"] = values["order"].upper()
if values.get("filters") is not None:
i = 0
while i < len(values["filters"]):
if values["filters"][i]["value"] is None or len(values["filters"][i]["value"]) == 0:
del values["filters"][i]
else:
i += 1
for i in values["filters"]:
if i.get("type") == LiveFilterType.platform.value:
i["type"] = LiveFilterType.user_device_type.value
if values.get("sort") is not None:
if values["sort"].lower() == "startts":
values["sort"] = "TIMESTAMP"
return values
class Config:
alias_generator = attribute_to_camel_case

6
backend/.dockerignore Normal file
View file

@ -0,0 +1,6 @@
# ignore .git and .cache folders
.git
.cache
**/build.sh
**/build_*.sh
**/*deploy.sh

View file

@ -10,13 +10,16 @@ RUN go mod download
FROM prepare AS build
COPY cmd cmd
COPY pkg pkg
COPY services services
COPY internal internal
RUN go mod tidy
ARG SERVICE_NAME
RUN CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -o service -tags musl openreplay/backend/services/$SERVICE_NAME
RUN CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -o service -tags musl openreplay/backend/cmd/$SERVICE_NAME
FROM alpine
FROM alpine AS entrypoint
RUN apk add --no-cache ca-certificates
ENV TZ=UTC \
@ -28,7 +31,7 @@ ENV TZ=UTC \
BEACON_SIZE_LIMIT=7000000 \
KAFKA_USE_SSL=true \
KAFKA_MAX_POLL_INTERVAL_MS=400000 \
REDIS_STREAMS_MAX_LEN=3000 \
REDIS_STREAMS_MAX_LEN=10000 \
TOPIC_RAW_WEB=raw \
TOPIC_RAW_IOS=raw-ios \
TOPIC_CACHE=cache \
@ -39,13 +42,18 @@ ENV TZ=UTC \
GROUP_DB=db \
GROUP_ENDER=ender \
GROUP_CACHE=cache \
GROUP_HEURISTICS=heuristics \
AWS_REGION_WEB=eu-central-1 \
AWS_REGION_IOS=eu-west-1 \
AWS_REGION_ASSETS=eu-central-1 \
CACHE_ASSETS=true \
ASSETS_SIZE_LIMIT=6291456 \
FS_CLEAN_HRS=72 \
LOG_QUEUE_STATS_INTERVAL_SEC=60
FILE_SPLIT_SIZE=300000 \
LOG_QUEUE_STATS_INTERVAL_SEC=60 \
BATCH_QUEUE_LIMIT=20 \
BATCH_SIZE_LIMIT=10000000 \
PARTITIONS_NUMBER=1
ARG SERVICE_NAME

View file

@ -1,4 +1,4 @@
FROM golang:1.13-alpine3.10 AS prepare
FROM golang:1.18-alpine3.15 AS prepare
RUN apk add --no-cache git openssh openssl-dev pkgconf gcc g++ make libc-dev bash
@ -10,13 +10,13 @@ RUN go mod download
FROM prepare AS build
COPY cmd cmd
COPY pkg pkg
COPY services services
COPY internal internal
RUN for name in alerts assets db ender http integrations sink storage;do CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -o bin/$name -tags musl openreplay/backend/services/$name; done
RUN for name in assets db ender http integrations sink storage;do CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -o bin/$name -tags musl openreplay/backend/cmd/$name; done
FROM alpine
FROM alpine AS entrypoint
#FROM pygmy/alpine-tini:latest
RUN apk add --no-cache ca-certificates
@ -26,8 +26,9 @@ ENV TZ=UTC \
MAXMINDDB_FILE=/root/geoip.mmdb \
UAPARSER_FILE=/root/regexes.yaml \
HTTP_PORT=80 \
BEACON_SIZE_LIMIT=1000000 \
BEACON_SIZE_LIMIT=7000000 \
KAFKA_USE_SSL=true \
KAFKA_MAX_POLL_INTERVAL_MS=400000 \
REDIS_STREAMS_MAX_LEN=3000 \
TOPIC_RAW_WEB=raw \
TOPIC_RAW_IOS=raw-ios \
@ -42,10 +43,11 @@ ENV TZ=UTC \
AWS_REGION_WEB=eu-central-1 \
AWS_REGION_IOS=eu-west-1 \
AWS_REGION_ASSETS=eu-central-1 \
CACHE_ASSETS=false \
CACHE_ASSETS=true \
ASSETS_SIZE_LIMIT=6291456 \
FS_CLEAN_HRS=12
FS_CLEAN_HRS=12 \
FILE_SPLIT_SIZE=300000 \
LOG_QUEUE_STATS_INTERVAL_SEC=60
RUN mkdir $FS_DIR
#VOLUME [ $FS_DIR ] # Uncomment in case of using Bind mount.

28
backend/build.sh Normal file → Executable file
View file

@ -13,9 +13,19 @@ ee="false"
check_prereq() {
which docker || {
echo "Docker not installed, please install docker."
exit=1
exit 1
}
[[ exit -eq 1 ]] && exit 1
return
}
function build_service() {
image="$1"
echo "BUILDING $image"
docker build -t ${DOCKER_REPO:-'local'}/$image:${git_sha1} --platform linux/amd64 --build-arg SERVICE_NAME=$image .
[[ $PUSH_IMAGE -eq 1 ]] && {
docker push ${DOCKER_REPO:-'local'}/$image:${git_sha1}
}
return
}
function build_api(){
@ -25,21 +35,15 @@ function build_api(){
ee="true"
}
[[ $2 != "" ]] && {
image="$2"
docker build -t ${DOCKER_REPO:-'local'}/$image:${git_sha1} --build-arg SERVICE_NAME=$image .
[[ $PUSH_IMAGE -eq 1 ]] && {
docker push ${DOCKER_REPO:-'local'}/$image:${git_sha1}
}
build_service $2
return
}
for image in $(ls services);
for image in $(ls cmd);
do
docker build -t ${DOCKER_REPO:-'local'}/$image:${git_sha1} --build-arg SERVICE_NAME=$image .
[[ $PUSH_IMAGE -eq 1 ]] && {
docker push ${DOCKER_REPO:-'local'}/$image:${git_sha1}
}
build_service $image
echo "::set-output name=image::${DOCKER_REPO:-'local'}/$image:${git_sha1}"
done
echo "backend build completed"
}
check_prereq

View file

@ -1,45 +1,58 @@
package main
import (
"context"
"log"
"time"
"openreplay/backend/pkg/monitoring"
"os"
"os/signal"
"syscall"
"time"
"openreplay/backend/pkg/env"
"openreplay/backend/internal/assets"
"openreplay/backend/internal/assets/cacher"
config "openreplay/backend/internal/config/assets"
"openreplay/backend/pkg/messages"
"openreplay/backend/pkg/queue"
"openreplay/backend/pkg/queue/types"
"openreplay/backend/services/assets/cacher"
)
/*
Assets
*/
func main() {
metrics := monitoring.New("assets")
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
GROUP_CACHE := env.String("GROUP_CACHE")
TOPIC_CACHE := env.String("TOPIC_CACHE")
cfg := config.New()
cacher := cacher.NewCacher(
env.String("AWS_REGION"),
env.String("S3_BUCKET_ASSETS"),
env.String("ASSETS_ORIGIN"),
env.Int("ASSETS_SIZE_LIMIT"),
cfg.AWSRegion,
cfg.S3BucketAssets,
cfg.AssetsOrigin,
cfg.AssetsSizeLimit,
)
totalAssets, err := metrics.RegisterCounter("assets_total")
if err != nil {
log.Printf("can't create assets_total metric: %s", err)
}
consumer := queue.NewMessageConsumer(
GROUP_CACHE,
[]string{TOPIC_CACHE},
cfg.GroupCache,
[]string{cfg.TopicCache},
func(sessionID uint64, message messages.Message, e *types.Meta) {
switch msg := message.(type) {
case *messages.AssetCache:
cacher.CacheURL(sessionID, msg.URL)
totalAssets.Add(context.Background(), 1)
case *messages.ErrorEvent:
if msg.Source != "js_exception" {
return
}
sourceList, err := extractJSExceptionSources(&msg.Payload)
sourceList, err := assets.ExtractJSExceptionSources(&msg.Payload)
if err != nil {
log.Printf("Error on source extraction: %v", err)
return
@ -52,12 +65,12 @@ func main() {
true,
)
tick := time.Tick(20 * time.Minute)
log.Printf("Cacher service started\n")
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
log.Printf("Cacher service started\n")
tick := time.Tick(20 * time.Minute)
for {
select {
case sig := <-sigchan:
@ -66,6 +79,7 @@ func main() {
os.Exit(0)
case err := <-cacher.Errors:
log.Printf("Error while caching: %v", err)
// TODO: notify user
case <-tick:
cacher.UpdateTimeouts()
default:

141
backend/cmd/db/main.go Normal file
View file

@ -0,0 +1,141 @@
package main
import (
"errors"
"log"
"openreplay/backend/internal/config/db"
"openreplay/backend/internal/db/datasaver"
"openreplay/backend/pkg/handlers"
custom2 "openreplay/backend/pkg/handlers/custom"
"openreplay/backend/pkg/sessions"
"time"
"os"
"os/signal"
"syscall"
"openreplay/backend/pkg/db/cache"
"openreplay/backend/pkg/db/postgres"
logger "openreplay/backend/pkg/log"
"openreplay/backend/pkg/messages"
"openreplay/backend/pkg/queue"
"openreplay/backend/pkg/queue/types"
)
/*
DB
*/
func main() {
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
cfg := db.New()
// Init database
pg := cache.NewPGCache(postgres.NewConn(cfg.Postgres), cfg.ProjectExpirationTimeoutMs)
defer pg.Close()
// HandlersFabric returns the list of message handlers we want to be applied to each incoming message.
handlersFabric := func() []handlers.MessageProcessor {
return []handlers.MessageProcessor{
&custom2.EventMapper{},
custom2.NewInputEventBuilder(),
custom2.NewPageEventBuilder(),
}
}
// Create handler's aggregator
builderMap := sessions.NewBuilderMap(handlersFabric)
// Init modules
saver := datasaver.New(pg)
saver.InitStats()
statsLogger := logger.NewQueueStats(cfg.LoggerTimeout)
// Handler logic
handler := func(sessionID uint64, msg messages.Message, meta *types.Meta) {
statsLogger.Collect(sessionID, meta)
// Just save session data into db without additional checks
if err := saver.InsertMessage(sessionID, msg); err != nil {
if !postgres.IsPkeyViolation(err) {
log.Printf("Message Insertion Error %v, SessionID: %v, Message: %v", err, sessionID, msg)
}
return
}
session, err := pg.GetSession(sessionID)
if session == nil {
if err != nil && !errors.Is(err, cache.NilSessionInCacheError) {
log.Printf("Error on session retrieving from cache: %v, SessionID: %v, Message: %v", err, sessionID, msg)
}
return
}
// Save statistics to db
err = saver.InsertStats(session, msg)
if err != nil {
log.Printf("Stats Insertion Error %v; Session: %v, Message: %v", err, session, msg)
}
// Handle heuristics and save to temporary queue in memory
builderMap.HandleMessage(sessionID, msg, msg.Meta().Index)
// Process saved heuristics messages as usual messages above in the code
builderMap.IterateSessionReadyMessages(sessionID, func(msg messages.Message) {
// TODO: DRY code (carefully with the return statement logic)
if err := saver.InsertMessage(sessionID, msg); err != nil {
if !postgres.IsPkeyViolation(err) {
log.Printf("Message Insertion Error %v; Session: %v, Message %v", err, session, msg)
}
return
}
if err := saver.InsertStats(session, msg); err != nil {
log.Printf("Stats Insertion Error %v; Session: %v, Message %v", err, session, msg)
}
})
}
// Init consumer
consumer := queue.NewMessageConsumer(
cfg.GroupDB,
[]string{
cfg.TopicRawWeb,
cfg.TopicAnalytics,
},
handler,
false,
)
log.Printf("Db service started\n")
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
commitTick := time.Tick(cfg.CommitBatchTimeout)
for {
select {
case sig := <-sigchan:
log.Printf("Caught signal %v: terminating\n", sig)
consumer.Close()
os.Exit(0)
case <-commitTick:
// Send collected batches to db
pg.CommitBatches()
if err := saver.CommitStats(); err != nil {
log.Printf("Error on stats commit: %v", err)
}
// TODO?: separate stats & regular messages
if err := consumer.Commit(); err != nil {
log.Printf("Error on consumer commit: %v", err)
}
default:
// Handle new message from queue
err := consumer.ConsumeNext()
if err != nil {
log.Fatalf("Error on consumption: %v", err) // TODO: is always fatal?
}
}
}
}

89
backend/cmd/ender/main.go Normal file
View file

@ -0,0 +1,89 @@
package main
import (
"log"
"openreplay/backend/internal/config/ender"
"openreplay/backend/internal/sessionender"
"openreplay/backend/pkg/monitoring"
"time"
"os"
"os/signal"
"syscall"
"openreplay/backend/pkg/intervals"
logger "openreplay/backend/pkg/log"
"openreplay/backend/pkg/messages"
"openreplay/backend/pkg/queue"
"openreplay/backend/pkg/queue/types"
)
/*
Ender
*/
func main() {
metrics := monitoring.New("ender")
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
// Load service configuration
cfg := ender.New()
// Init all modules
statsLogger := logger.NewQueueStats(cfg.LoggerTimeout)
sessions, err := sessionender.New(metrics, intervals.EVENTS_SESSION_END_TIMEOUT, cfg.PartitionsNumber)
if err != nil {
log.Printf("can't init ender service: %s", err)
return
}
producer := queue.NewProducer()
consumer := queue.NewMessageConsumer(
cfg.GroupEnder,
[]string{
cfg.TopicRawWeb,
},
func(sessionID uint64, msg messages.Message, meta *types.Meta) {
statsLogger.Collect(sessionID, meta)
sessions.UpdateSession(sessionID, meta.Timestamp)
},
false,
)
log.Printf("Ender service started\n")
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
tick := time.Tick(intervals.EVENTS_COMMIT_INTERVAL * time.Millisecond)
for {
select {
case sig := <-sigchan:
log.Printf("Caught signal %v: terminating\n", sig)
producer.Close(cfg.ProducerTimeout)
if err := consumer.CommitBack(intervals.EVENTS_BACK_COMMIT_GAP); err != nil {
log.Printf("can't commit messages with offset: %s", err)
}
consumer.Close()
os.Exit(0)
case <-tick:
// Find ended sessions and send notification to other services
sessions.HandleEndedSessions(func(sessionID uint64, timestamp int64) bool {
msg := &messages.SessionEnd{Timestamp: uint64(timestamp)}
if err := producer.Produce(cfg.TopicRawWeb, sessionID, messages.Encode(msg)); err != nil {
log.Printf("can't send SessionEnd to trigger topic: %s; sessID: %d", err, sessionID)
return false
}
return true
})
producer.Flush(cfg.ProducerTimeout)
if err := consumer.CommitBack(intervals.EVENTS_BACK_COMMIT_GAP); err != nil {
log.Printf("can't commit messages with offset: %s", err)
}
default:
if err := consumer.ConsumeNext(); err != nil {
log.Fatalf("Error on consuming: %v", err)
}
}
}
}

View file

@ -0,0 +1,97 @@
package main
import (
"log"
"openreplay/backend/internal/config/heuristics"
"openreplay/backend/pkg/handlers"
"openreplay/backend/pkg/handlers/custom"
ios2 "openreplay/backend/pkg/handlers/ios"
web2 "openreplay/backend/pkg/handlers/web"
"openreplay/backend/pkg/intervals"
logger "openreplay/backend/pkg/log"
"openreplay/backend/pkg/messages"
"openreplay/backend/pkg/queue"
"openreplay/backend/pkg/queue/types"
"openreplay/backend/pkg/sessions"
"os"
"os/signal"
"syscall"
"time"
)
/*
Heuristics
*/
func main() {
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
// Load service configuration
cfg := heuristics.New()
// HandlersFabric returns the list of message handlers we want to be applied to each incoming message.
handlersFabric := func() []handlers.MessageProcessor {
return []handlers.MessageProcessor{
// web handlers
&web2.ClickRageDetector{},
&web2.CpuIssueDetector{},
&web2.DeadClickDetector{},
&web2.MemoryIssueDetector{},
&web2.NetworkIssueDetector{},
&web2.PerformanceAggregator{},
// iOS handlers
&ios2.AppNotResponding{},
&ios2.ClickRageDetector{},
&ios2.PerformanceAggregator{},
// Other handlers (you can add your custom handlers here)
&custom.CustomHandler{},
}
}
// Create handler's aggregator
builderMap := sessions.NewBuilderMap(handlersFabric)
// Init logger
statsLogger := logger.NewQueueStats(cfg.LoggerTimeout)
// Init producer and consumer for data bus
producer := queue.NewProducer()
consumer := queue.NewMessageConsumer(
cfg.GroupHeuristics,
[]string{
cfg.TopicRawWeb,
},
func(sessionID uint64, msg messages.Message, meta *types.Meta) {
statsLogger.Collect(sessionID, meta)
builderMap.HandleMessage(sessionID, msg, msg.Meta().Index)
},
false,
)
log.Printf("Heuristics service started\n")
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
tick := time.Tick(intervals.EVENTS_COMMIT_INTERVAL * time.Millisecond)
for {
select {
case sig := <-sigchan:
log.Printf("Caught signal %v: terminating\n", sig)
producer.Close(cfg.ProducerTimeout)
consumer.Commit()
consumer.Close()
os.Exit(0)
case <-tick:
builderMap.IterateReadyMessages(func(sessionID uint64, readyMsg messages.Message) {
producer.Produce(cfg.TopicAnalytics, sessionID, messages.Encode(readyMsg))
})
producer.Flush(cfg.ProducerTimeout)
consumer.Commit()
default:
if err := consumer.ConsumeNext(); err != nil {
log.Fatalf("Error on consuming: %v", err)
}
}
}
}

71
backend/cmd/http/main.go Normal file
View file

@ -0,0 +1,71 @@
package main
import (
"log"
"openreplay/backend/internal/config/http"
"openreplay/backend/internal/http/router"
"openreplay/backend/internal/http/server"
"openreplay/backend/internal/http/services"
"openreplay/backend/pkg/monitoring"
"os"
"os/signal"
"syscall"
"openreplay/backend/pkg/db/cache"
"openreplay/backend/pkg/db/postgres"
"openreplay/backend/pkg/pprof"
"openreplay/backend/pkg/queue"
)
/*
HTTP
*/
func main() {
metrics := monitoring.New("http")
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
pprof.StartProfilingServer()
// Load configuration
cfg := http.New()
// Connect to queue
producer := queue.NewProducer()
defer producer.Close(15000)
// Connect to database
dbConn := cache.NewPGCache(postgres.NewConn(cfg.Postgres), 1000*60*20)
defer dbConn.Close()
// Build all services
services := services.New(cfg, producer, dbConn)
// Init server's routes
router, err := router.NewRouter(cfg, services, metrics)
if err != nil {
log.Fatalf("failed while creating engine: %s", err)
}
// Init server
server, err := server.New(router.GetHandler(), cfg.HTTPHost, cfg.HTTPPort, cfg.HTTPTimeout)
if err != nil {
log.Fatalf("failed while creating server: %s", err)
}
// Run server
go func() {
if err := server.Start(); err != nil {
log.Fatalf("Server error: %v\n", err)
}
}()
log.Printf("Server successfully started on port %v\n", cfg.HTTPPort)
// Wait stop signal to shut down server gracefully
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
<-sigchan
log.Printf("Shutting down the server\n")
server.Stop()
}

View file

@ -2,6 +2,8 @@ package main
import (
"log"
config "openreplay/backend/internal/config/integrations"
"openreplay/backend/internal/integrations/clientManager"
"time"
"os"
@ -9,23 +11,25 @@ import (
"syscall"
"openreplay/backend/pkg/db/postgres"
"openreplay/backend/pkg/env"
"openreplay/backend/pkg/intervals"
"openreplay/backend/pkg/messages"
"openreplay/backend/pkg/queue"
"openreplay/backend/pkg/token"
"openreplay/backend/services/integrations/clientManager"
)
/*
Integrations
*/
func main() {
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
TOPIC_RAW_WEB := env.String("TOPIC_RAW_WEB")
POSTGRES_STRING := env.String("POSTGRES_STRING")
pg := postgres.NewConn(POSTGRES_STRING)
cfg := config.New()
pg := postgres.NewConn(cfg.PostgresURI)
defer pg.Close()
tokenizer := token.NewTokenizer(env.String("TOKEN_SECRET"))
tokenizer := token.NewTokenizer(cfg.TokenSecret)
manager := clientManager.NewManager()
@ -45,7 +49,7 @@ func main() {
producer := queue.NewProducer()
defer producer.Close(15000)
listener, err := postgres.NewIntegrationsListener(POSTGRES_STRING)
listener, err := postgres.NewIntegrationsListener(cfg.PostgresURI)
if err != nil {
log.Printf("Postgres listener error: %v\n", err)
log.Fatalf("Postgres listener error")
@ -81,7 +85,7 @@ func main() {
sessionID = sessData.ID
}
// TODO: send to ready-events topic. Otherwise it have to go through the events worker.
producer.Produce(TOPIC_RAW_WEB, sessionID, messages.Encode(event.RawErrorEvent))
producer.Produce(cfg.TopicRawWeb, sessionID, messages.Encode(event.RawErrorEvent))
case err := <-manager.Errors:
log.Printf("Integration error: %v\n", err)
case i := <-manager.RequestDataUpdates:

144
backend/cmd/sink/main.go Normal file
View file

@ -0,0 +1,144 @@
package main
import (
"context"
"encoding/binary"
"log"
"openreplay/backend/internal/sink/assetscache"
"openreplay/backend/internal/sink/oswriter"
"openreplay/backend/internal/storage"
"openreplay/backend/pkg/monitoring"
"time"
"os"
"os/signal"
"syscall"
"openreplay/backend/internal/config/sink"
. "openreplay/backend/pkg/messages"
"openreplay/backend/pkg/queue"
"openreplay/backend/pkg/queue/types"
"openreplay/backend/pkg/url/assets"
)
/*
Sink
*/
func main() {
metrics := monitoring.New("sink")
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
cfg := sink.New()
if _, err := os.Stat(cfg.FsDir); os.IsNotExist(err) {
log.Fatalf("%v doesn't exist. %v", cfg.FsDir, err)
}
writer := oswriter.NewWriter(cfg.FsUlimit, cfg.FsDir)
producer := queue.NewProducer()
defer producer.Close(cfg.ProducerCloseTimeout)
rewriter := assets.NewRewriter(cfg.AssetsOrigin)
assetMessageHandler := assetscache.New(cfg, rewriter, producer)
counter := storage.NewLogCounter()
totalMessages, err := metrics.RegisterCounter("messages_total")
if err != nil {
log.Printf("can't create messages_total metric: %s", err)
}
savedMessages, err := metrics.RegisterCounter("messages_saved")
if err != nil {
log.Printf("can't create messages_saved metric: %s", err)
}
messageSize, err := metrics.RegisterHistogram("messages_size")
if err != nil {
log.Printf("can't create messages_size metric: %s", err)
}
consumer := queue.NewMessageConsumer(
cfg.GroupSink,
[]string{
cfg.TopicRawWeb,
},
func(sessionID uint64, message Message, _ *types.Meta) {
// Process assets
message = assetMessageHandler.ParseAssets(sessionID, message)
totalMessages.Add(context.Background(), 1)
// Filter message
typeID := message.TypeID()
// Send SessionEnd trigger to storage service
switch message.(type) {
case *SessionEnd:
if err := producer.Produce(cfg.TopicTrigger, sessionID, Encode(message)); err != nil {
log.Printf("can't send SessionEnd to trigger topic: %s; sessID: %d", err, sessionID)
}
return
}
if !IsReplayerType(typeID) {
return
}
// If message timestamp is empty, use at least ts of session start
ts := message.Meta().Timestamp
if ts == 0 {
log.Printf("zero ts; sessID: %d, msg: %+v", sessionID, message)
} else {
// Log ts of last processed message
counter.Update(sessionID, time.UnixMilli(ts))
}
value := message.Encode()
var data []byte
if IsIOSType(typeID) {
data = value
} else {
data = make([]byte, len(value)+8)
copy(data[8:], value[:])
binary.LittleEndian.PutUint64(data[0:], message.Meta().Index)
}
if err := writer.Write(sessionID, data); err != nil {
log.Printf("Writer error: %v\n", err)
}
messageSize.Record(context.Background(), float64(len(data)))
savedMessages.Add(context.Background(), 1)
},
false,
)
log.Printf("Sink service started\n")
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
tick := time.Tick(30 * time.Second)
for {
select {
case sig := <-sigchan:
log.Printf("Caught signal %v: terminating\n", sig)
if err := consumer.Commit(); err != nil {
log.Printf("can't commit messages: %s", err)
}
consumer.Close()
os.Exit(0)
case <-tick:
if err := writer.SyncAll(); err != nil {
log.Fatalf("Sync error: %v\n", err)
}
counter.Print()
if err := consumer.Commit(); err != nil {
log.Printf("can't commit messages: %s", err)
}
default:
err := consumer.ConsumeNext()
if err != nil {
log.Fatalf("Error on consumption: %v", err)
}
}
}
}

View file

@ -0,0 +1,77 @@
package main
import (
"log"
"openreplay/backend/pkg/monitoring"
"os"
"os/signal"
"strconv"
"syscall"
"time"
config "openreplay/backend/internal/config/storage"
"openreplay/backend/internal/storage"
"openreplay/backend/pkg/messages"
"openreplay/backend/pkg/queue"
"openreplay/backend/pkg/queue/types"
s3storage "openreplay/backend/pkg/storage"
)
/*
Storage
*/
func main() {
metrics := monitoring.New("storage")
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
cfg := config.New()
s3 := s3storage.NewS3(cfg.S3Region, cfg.S3Bucket)
srv, err := storage.New(cfg, s3, metrics)
if err != nil {
log.Printf("can't init storage service: %s", err)
return
}
counter := storage.NewLogCounter()
consumer := queue.NewMessageConsumer(
cfg.GroupStorage,
[]string{
cfg.TopicTrigger,
},
func(sessionID uint64, msg messages.Message, meta *types.Meta) {
switch msg.(type) {
case *messages.SessionEnd:
srv.UploadKey(strconv.FormatUint(sessionID, 10), 5)
// Log timestamp of last processed session
counter.Update(sessionID, time.UnixMilli(meta.Timestamp))
}
},
true,
)
log.Printf("Storage service started\n")
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
counterTick := time.Tick(time.Second * 30)
for {
select {
case sig := <-sigchan:
log.Printf("Caught signal %v: terminating\n", sig)
consumer.Close()
os.Exit(0)
case <-counterTick:
go counter.Print()
default:
err := consumer.ConsumeNext()
if err != nil {
log.Fatalf("Error on consumption: %v", err)
}
}
}
}

14
backend/development.md Normal file
View file

@ -0,0 +1,14 @@
### Prerequisites
- [Vagrant](../scripts/vagrant/README.md)
### Building and deploying locally
```bash
cd openreplay-contributions
vagrant ssh
cd openreplay-dev/openreplay/scripts/helmcharts
# For complete list of options
# bash local_deploy.sh help
bash local_deploy.sh <worker name>
```

View file

@ -4,7 +4,6 @@ go 1.18
require (
cloud.google.com/go/logging v1.4.2
github.com/ClickHouse/clickhouse-go v1.4.3
github.com/aws/aws-sdk-go v1.35.23
github.com/btcsuite/btcutil v1.0.2
github.com/elastic/go-elasticsearch/v7 v7.13.1
@ -19,18 +18,21 @@ require (
github.com/pkg/errors v0.9.1
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce
github.com/ua-parser/uap-go v0.0.0-20200325213135-e1c09f13e2fe
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420
go.opentelemetry.io/otel/exporters/prometheus v0.30.0
go.opentelemetry.io/otel/metric v0.30.0
golang.org/x/net v0.0.0-20210525063256-abc453219eb5
google.golang.org/api v0.50.0
gopkg.in/confluentinc/confluent-kafka-go.v1 v1.7.0
)
require (
cloud.google.com/go v0.84.0 // indirect
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 // indirect
github.com/confluentinc/confluent-kafka-go v1.7.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/go-cmp v0.5.6 // indirect
github.com/google/go-cmp v0.5.7 // indirect
github.com/googleapis/gax-go/v2 v2.0.5 // indirect
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
github.com/jackc/pgio v1.0.0 // indirect
@ -42,13 +44,22 @@ require (
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/jstemmer/go-junit-report v0.9.1 // indirect
github.com/klauspost/compress v1.11.9 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/prometheus/client_golang v1.12.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
go.opencensus.io v0.23.0 // indirect
go.opentelemetry.io/otel v1.7.0 // indirect
go.opentelemetry.io/otel/sdk v1.7.0 // indirect
go.opentelemetry.io/otel/sdk/metric v0.30.0 // indirect
go.opentelemetry.io/otel/trace v1.7.0 // indirect
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 // indirect
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect
golang.org/x/mod v0.4.2 // indirect
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914 // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 // indirect
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
golang.org/x/text v0.3.6 // indirect
golang.org/x/tools v0.1.4 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
@ -56,5 +67,5 @@ require (
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84 // indirect
google.golang.org/grpc v1.38.0 // indirect
google.golang.org/protobuf v1.26.0 // indirect
gopkg.in/yaml.v2 v2.2.8 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
)

View file

@ -44,13 +44,20 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/ClickHouse/clickhouse-go v1.4.3 h1:iAFMa2UrQdR5bHJ2/yaSLffZkxpcOYQMCUuKeNXGdqc=
github.com/ClickHouse/clickhouse-go v1.4.3/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/aws/aws-sdk-go v1.35.23 h1:SCP0d0XvyJTDmfnHEQPvBaYi3kea1VNUo7uQmkVgFts=
github.com/aws/aws-sdk-go v1.35.23/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k=
github.com/bkaradzic/go-lz4 v1.0.0 h1:RXc4wYsyz985CkXXeX04y4VnZFGG8Rd43pRaHsOXAKk=
github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4=
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
@ -62,19 +69,18 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 h1:F1EaeKL/ta07PY/k9Os/UFtwERei2/XzGemhpGnBKNg=
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
github.com/confluentinc/confluent-kafka-go v1.7.0 h1:tXh3LWb2Ne0WiU3ng4h5qiGA9XV61rz46w60O+cq8bM=
github.com/confluentinc/confluent-kafka-go v1.7.0/go.mod h1:u2zNLny2xq+5rWeTQjFHbDzzNuba4P1vo31r9r4uAdg=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
@ -95,12 +101,23 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg=
github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE=
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@ -146,8 +163,10 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
@ -226,11 +245,17 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/klauspost/compress v1.11.9 h1:5OCMOdde1TCT2sookEuVeEZzA8bmRSFV3AwPDZAG8AA=
@ -239,6 +264,8 @@ github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
@ -255,7 +282,15 @@ github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@ -263,14 +298,34 @@ github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/oschwald/maxminddb-golang v1.7.0 h1:JmU4Q1WBv5Q+2KZy5xJI+98aUwTIrPPxZUkd5Cwr8Zc=
github.com/oschwald/maxminddb-golang v1.7.0/go.mod h1:RXZtst0N6+FY/3qCNmZMBApR19cdQj43/NM9VkrNAis=
github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk=
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
@ -278,8 +333,10 @@ github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThC
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE=
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
@ -287,8 +344,9 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce h1:fb190+cK2Xz/dvi9Hv8eCYJYvIGUTN2/KLq1pT6CjEc=
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4=
github.com/ua-parser/uap-go v0.0.0-20200325213135-e1c09f13e2fe h1:aj/vX5epIlQQBEocKoM9nSAiNpakdQzElc8SaRFPu+I=
@ -307,12 +365,25 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opentelemetry.io/otel v1.7.0 h1:Z2lA3Tdch0iDcrhJXDIlC94XE+bxok1F9B+4Lz/lGsM=
go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk=
go.opentelemetry.io/otel/exporters/prometheus v0.30.0 h1:YXo5ZY5nofaEYMCMTTMaRH2cLDZB8+0UGuk5RwMfIo0=
go.opentelemetry.io/otel/exporters/prometheus v0.30.0/go.mod h1:qN5feW+0/d661KDtJuATEmHtw5bKBK7NSvNEP927zSs=
go.opentelemetry.io/otel/metric v0.30.0 h1:Hs8eQZ8aQgs0U49diZoaS6Uaxw3+bBE3lcMUKBFIk3c=
go.opentelemetry.io/otel/metric v0.30.0/go.mod h1:/ShZ7+TS4dHzDFmfi1kSXMhMVubNoP0oIaBp70J6UXU=
go.opentelemetry.io/otel/sdk v1.7.0 h1:4OmStpcKVOfvDOgCt7UriAPtKolwIhxpnSNI/yK+1B0=
go.opentelemetry.io/otel/sdk v1.7.0/go.mod h1:uTEOTwaqIVuTGiJN7ii13Ibp75wJmYUDe374q6cZwUU=
go.opentelemetry.io/otel/sdk/metric v0.30.0 h1:XTqQ4y3erR2Oj8xSAOL5ovO5011ch2ELg51z4fVkpME=
go.opentelemetry.io/otel/sdk/metric v0.30.0/go.mod h1:8AKFRi5HyvTR0RRty3paN1aMC9HMT+NzcEhw/BLkLX8=
go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJqC6o=
go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@ -363,6 +434,7 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@ -370,6 +442,7 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@ -396,8 +469,9 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420 h1:a8jGStKg0XqKDlKqjLrXn0ioF5MH36pT7Z0BRTqLhbk=
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5 h1:wjuX4b5yYQnEQHzd+CBcrcC6OVR2J1CN6mUy0oSxIPo=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -428,6 +502,7 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -445,6 +520,7 @@ golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191224085550-c709ea063b76/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -457,6 +533,8 @@ golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -464,18 +542,22 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -666,11 +748,11 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/confluentinc/confluent-kafka-go.v1 v1.7.0 h1:+RlmciBLDd/XwM1iudiG3HtCg45purnsOxEoY/+JZdQ=
gopkg.in/confluentinc/confluent-kafka-go.v1 v1.7.0/go.mod h1:ZdI3yfYmdNSLQPNCpO1y00EHyWaHG5EnQEyL/ntAegY=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
@ -679,8 +761,12 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View file

@ -1,17 +1,15 @@
package main
package assets
import (
"encoding/json"
"strings"
)
type frame struct {
FileName string `json:"fileName"`
}
func extractJSExceptionSources(payload *string) ([]string, error) {
func ExtractJSExceptionSources(payload *string) ([]string, error) {
var frameList []frame
err := json.Unmarshal([]byte(*payload), &frameList)
if err != nil {
@ -25,8 +23,8 @@ func extractJSExceptionSources(payload *string) ([]string, error) {
fn := strings.Split(f.FileName, "?")[0]
if strings.HasPrefix(fn, "http") && !presentedFileName[fn] {
fileNamesList = append(fileNamesList, f.FileName)
presentedFileName[fn] = true
presentedFileName[fn] = true
}
}
return fileNamesList, nil
}
}

View file

@ -0,0 +1,23 @@
package assets
import "openreplay/backend/pkg/env"
type Config struct {
GroupCache string
TopicCache string
AWSRegion string
S3BucketAssets string
AssetsOrigin string
AssetsSizeLimit int
}
func New() *Config {
return &Config{
GroupCache: env.String("GROUP_CACHE"),
TopicCache: env.String("TOPIC_CACHE"),
AWSRegion: env.String("AWS_REGION"),
S3BucketAssets: env.String("S3_BUCKET_ASSETS"),
AssetsOrigin: env.String("ASSETS_ORIGIN"),
AssetsSizeLimit: env.Int("ASSETS_SIZE_LIMIT"),
}
}

View file

@ -0,0 +1,32 @@
package db
import (
"openreplay/backend/pkg/env"
"time"
)
type Config struct {
Postgres string
ProjectExpirationTimeoutMs int64
LoggerTimeout int
GroupDB string
TopicRawWeb string
TopicAnalytics string
CommitBatchTimeout time.Duration
BatchQueueLimit int
BatchSizeLimit int
}
func New() *Config {
return &Config{
Postgres: env.String("POSTGRES_STRING"),
ProjectExpirationTimeoutMs: 1000 * 60 * 20,
LoggerTimeout: env.Int("LOG_QUEUE_STATS_INTERVAL_SEC"),
GroupDB: env.String("GROUP_DB"),
TopicRawWeb: env.String("TOPIC_RAW_WEB"),
TopicAnalytics: env.String("TOPIC_ANALYTICS"),
CommitBatchTimeout: 15 * time.Second,
BatchQueueLimit: env.Int("BATCH_QUEUE_LIMIT"),
BatchSizeLimit: env.Int("BATCH_SIZE_LIMIT"),
}
}

View file

@ -0,0 +1,23 @@
package ender
import (
"openreplay/backend/pkg/env"
)
type Config struct {
GroupEnder string
LoggerTimeout int
TopicRawWeb string
ProducerTimeout int
PartitionsNumber int
}
func New() *Config {
return &Config{
GroupEnder: env.String("GROUP_ENDER"),
LoggerTimeout: env.Int("LOG_QUEUE_STATS_INTERVAL_SEC"),
TopicRawWeb: env.String("TOPIC_RAW_WEB"),
ProducerTimeout: 2000,
PartitionsNumber: env.Int("PARTITIONS_NUMBER"),
}
}

View file

@ -0,0 +1,25 @@
package heuristics
import (
"openreplay/backend/pkg/env"
)
type Config struct {
GroupHeuristics string
TopicAnalytics string
LoggerTimeout int
TopicRawWeb string
TopicRawIOS string
ProducerTimeout int
}
func New() *Config {
return &Config{
GroupHeuristics: env.String("GROUP_HEURISTICS"),
TopicAnalytics: env.String("TOPIC_ANALYTICS"),
LoggerTimeout: env.Int("LOG_QUEUE_STATS_INTERVAL_SEC"),
TopicRawWeb: env.String("TOPIC_RAW_WEB"),
TopicRawIOS: env.String("TOPIC_RAW_IOS"),
ProducerTimeout: 2000,
}
}

View file

@ -0,0 +1,44 @@
package http
import (
"openreplay/backend/pkg/env"
"time"
)
type Config struct {
HTTPHost string
HTTPPort string
HTTPTimeout time.Duration
TopicRawWeb string
TopicRawIOS string
BeaconSizeLimit int64
JsonSizeLimit int64
FileSizeLimit int64
AWSRegion string
S3BucketIOSImages string
Postgres string
TokenSecret string
UAParserFile string
MaxMinDBFile string
WorkerID uint16
}
func New() *Config {
return &Config{
HTTPHost: "", // empty by default
HTTPPort: env.String("HTTP_PORT"),
HTTPTimeout: time.Second * 60,
TopicRawWeb: env.String("TOPIC_RAW_WEB"),
TopicRawIOS: env.String("TOPIC_RAW_IOS"),
BeaconSizeLimit: int64(env.Uint64("BEACON_SIZE_LIMIT")),
JsonSizeLimit: 1e3, // 1Kb
FileSizeLimit: 1e7, // 10Mb
AWSRegion: env.String("AWS_REGION"),
S3BucketIOSImages: env.String("S3_BUCKET_IOS_IMAGES"),
Postgres: env.String("POSTGRES_STRING"),
TokenSecret: env.String("TOKEN_SECRET"),
UAParserFile: env.String("UAPARSER_FILE"),
MaxMinDBFile: env.String("MAXMINDDB_FILE"),
WorkerID: env.WorkerID(),
}
}

View file

@ -0,0 +1,17 @@
package integrations
import "openreplay/backend/pkg/env"
type Config struct {
TopicRawWeb string
PostgresURI string
TokenSecret string
}
func New() *Config {
return &Config{
TopicRawWeb: env.String("TOPIC_RAW_WEB"),
PostgresURI: env.String("POSTGRES_STRING"),
TokenSecret: env.String("TOKEN_SECRET"),
}
}

View file

@ -0,0 +1,33 @@
package sink
import (
"openreplay/backend/pkg/env"
)
type Config struct {
FsDir string
FsUlimit uint16
GroupSink string
TopicRawWeb string
TopicRawIOS string
TopicCache string
TopicTrigger string
CacheAssets bool
AssetsOrigin string
ProducerCloseTimeout int
}
func New() *Config {
return &Config{
FsDir: env.String("FS_DIR"),
FsUlimit: env.Uint16("FS_ULIMIT"),
GroupSink: env.String("GROUP_SINK"),
TopicRawWeb: env.String("TOPIC_RAW_WEB"),
TopicRawIOS: env.String("TOPIC_RAW_IOS"),
TopicCache: env.String("TOPIC_CACHE"),
TopicTrigger: env.String("TOPIC_TRIGGER"),
CacheAssets: env.Bool("CACHE_ASSETS"),
AssetsOrigin: env.String("ASSETS_ORIGIN"),
ProducerCloseTimeout: 15000,
}
}

View file

@ -0,0 +1,32 @@
package storage
import (
"openreplay/backend/pkg/env"
"time"
)
type Config struct {
S3Region string
S3Bucket string
FSDir string
FSCleanHRS int
FileSplitSize int
RetryTimeout time.Duration
GroupStorage string
TopicTrigger string
DeleteTimeout time.Duration
}
func New() *Config {
return &Config{
S3Region: env.String("AWS_REGION_WEB"),
S3Bucket: env.String("S3_BUCKET_WEB"),
FSDir: env.String("FS_DIR"),
FSCleanHRS: env.Int("FS_CLEAN_HRS"),
FileSplitSize: env.Int("FILE_SPLIT_SIZE"),
RetryTimeout: 2 * time.Minute,
GroupStorage: env.String("GROUP_STORAGE"),
TopicTrigger: env.String("TOPIC_TRIGGER"),
DeleteTimeout: 48 * time.Hour,
}
}

View file

@ -0,0 +1,80 @@
package datasaver
import (
"fmt"
. "openreplay/backend/pkg/messages"
)
func (mi *Saver) InsertMessage(sessionID uint64, msg Message) error {
switch m := msg.(type) {
// Common
case *Metadata:
if err := mi.pg.InsertMetadata(sessionID, m); err != nil {
return fmt.Errorf("insert metadata err: %s", err)
}
return nil
case *IssueEvent:
return mi.pg.InsertIssueEvent(sessionID, m)
//TODO: message adapter (transformer) (at the level of pkg/message) for types: *IOSMetadata, *IOSIssueEvent and others
// Web
case *SessionStart:
return mi.pg.InsertWebSessionStart(sessionID, m)
case *SessionEnd:
return mi.pg.InsertWebSessionEnd(sessionID, m)
case *UserID:
return mi.pg.InsertWebUserID(sessionID, m)
case *UserAnonymousID:
return mi.pg.InsertWebUserAnonymousID(sessionID, m)
case *CustomEvent:
return mi.pg.InsertWebCustomEvent(sessionID, m)
case *ClickEvent:
return mi.pg.InsertWebClickEvent(sessionID, m)
case *InputEvent:
return mi.pg.InsertWebInputEvent(sessionID, m)
// Unique Web messages
case *PageEvent:
return mi.pg.InsertWebPageEvent(sessionID, m)
case *ErrorEvent:
return mi.pg.InsertWebErrorEvent(sessionID, m)
case *FetchEvent:
return mi.pg.InsertWebFetchEvent(sessionID, m)
case *GraphQLEvent:
return mi.pg.InsertWebGraphQLEvent(sessionID, m)
// IOS
case *IOSSessionStart:
return mi.pg.InsertIOSSessionStart(sessionID, m)
case *IOSSessionEnd:
return mi.pg.InsertIOSSessionEnd(sessionID, m)
case *IOSUserID:
return mi.pg.InsertIOSUserID(sessionID, m)
case *IOSUserAnonymousID:
return mi.pg.InsertIOSUserAnonymousID(sessionID, m)
case *IOSCustomEvent:
return mi.pg.InsertIOSCustomEvent(sessionID, m)
case *IOSClickEvent:
return mi.pg.InsertIOSClickEvent(sessionID, m)
case *IOSInputEvent:
return mi.pg.InsertIOSInputEvent(sessionID, m)
// Unique IOS messages
case *IOSNetworkCall:
return mi.pg.InsertIOSNetworkCall(sessionID, m)
case *IOSScreenEnter:
return mi.pg.InsertIOSScreenEnter(sessionID, m)
case *IOSCrash:
return mi.pg.InsertIOSCrash(sessionID, m)
case *RawErrorEvent:
return mi.pg.InsertWebErrorEvent(sessionID, &ErrorEvent{
MessageID: m.Meta().Index, // TODO: is it possible to catch panic here???
Timestamp: m.Timestamp,
Source: m.Source,
Name: m.Name,
Message: m.Message,
Payload: m.Payload,
})
}
return nil // "Not implemented"
}

View file

@ -0,0 +1,11 @@
package datasaver
import "openreplay/backend/pkg/db/cache"
type Saver struct {
pg *cache.PGCache
}
func New(pg *cache.PGCache) *Saver {
return &Saver{pg: pg}
}

View file

@ -0,0 +1,27 @@
package datasaver
import (
. "openreplay/backend/pkg/db/types"
. "openreplay/backend/pkg/messages"
)
func (si *Saver) InitStats() {
// noop
}
func (si *Saver) InsertStats(session *Session, msg Message) error {
switch m := msg.(type) {
// Web
case *PerformanceTrackAggr:
return si.pg.InsertWebStatsPerformance(session.SessionID, m)
case *ResourceEvent:
return si.pg.InsertWebStatsResourceEvent(session.SessionID, m)
case *LongTask:
return si.pg.InsertWebStatsLongtask(session.SessionID, m)
}
return nil
}
func (si *Saver) CommitStats() error {
return nil
}

View file

@ -0,0 +1,138 @@
package ios
import (
"strings"
)
func MapIOSDevice(identifier string) string {
switch identifier {
case "iPod5,1":
return "iPod touch (5th generation)"
case "iPod7,1":
return "iPod touch (6th generation)"
case "iPod9,1":
return "iPod touch (7th generation)"
case "iPhone3,1", "iPhone3,2", "iPhone3,3":
return "iPhone 4"
case "iPhone4,1":
return "iPhone 4s"
case "iPhone5,1", "iPhone5,2":
return "iPhone 5"
case "iPhone5,3", "iPhone5,4":
return "iPhone 5c"
case "iPhone6,1", "iPhone6,2":
return "iPhone 5s"
case "iPhone7,2":
return "iPhone 6"
case "iPhone7,1":
return "iPhone 6 Plus"
case "iPhone8,1":
return "iPhone 6s"
case "iPhone8,2":
return "iPhone 6s Plus"
case "iPhone8,4":
return "iPhone SE"
case "iPhone9,1", "iPhone9,3":
return "iPhone 7"
case "iPhone9,2", "iPhone9,4":
return "iPhone 7 Plus"
case "iPhone10,1", "iPhone10,4":
return "iPhone 8"
case "iPhone10,2", "iPhone10,5":
return "iPhone 8 Plus"
case "iPhone10,3", "iPhone10,6":
return "iPhone X"
case "iPhone11,2":
return "iPhone XS"
case "iPhone11,4", "iPhone11,6":
return "iPhone XS Max"
case "iPhone11,8":
return "iPhone XR"
case "iPhone12,1":
return "iPhone 11"
case "iPhone12,3":
return "iPhone 11 Pro"
case "iPhone12,5":
return "iPhone 11 Pro Max"
case "iPhone12,8":
return "iPhone SE (2nd generation)"
case "iPhone13,1":
return "iPhone 12 mini"
case "iPhone13,2":
return "iPhone 12"
case "iPhone13,3":
return "iPhone 12 Pro"
case "iPhone13,4":
return "iPhone 12 Pro Max"
case "iPad2,1", "iPad2,2", "iPad2,3", "iPad2,4":
return "iPad 2"
case "iPad3,1", "iPad3,2", "iPad3,3":
return "iPad (3rd generation)"
case "iPad3,4", "iPad3,5", "iPad3,6":
return "iPad (4th generation)"
case "iPad6,11", "iPad6,12":
return "iPad (5th generation)"
case "iPad7,5", "iPad7,6":
return "iPad (6th generation)"
case "iPad7,11", "iPad7,12":
return "iPad (7th generation)"
case "iPad11,6", "iPad11,7":
return "iPad (8th generation)"
case "iPad4,1", "iPad4,2", "iPad4,3":
return "iPad Air"
case "iPad5,3", "iPad5,4":
return "iPad Air 2"
case "iPad11,3", "iPad11,4":
return "iPad Air (3rd generation)"
case "iPad13,1", "iPad13,2":
return "iPad Air (4th generation)"
case "iPad2,5", "iPad2,6", "iPad2,7":
return "iPad mini"
case "iPad4,4", "iPad4,5", "iPad4,6":
return "iPad mini 2"
case "iPad4,7", "iPad4,8", "iPad4,9":
return "iPad mini 3"
case "iPad5,1", "iPad5,2":
return "iPad mini 4"
case "iPad11,1", "iPad11,2":
return "iPad mini (5th generation)"
case "iPad6,3", "iPad6,4":
return "iPad Pro (9.7-inch)"
case "iPad7,3", "iPad7,4":
return "iPad Pro (10.5-inch)"
case "iPad8,1", "iPad8,2", "iPad8,3", "iPad8,4":
return "iPad Pro (11-inch) (1st generation)"
case "iPad8,9", "iPad8,10":
return "iPad Pro (11-inch) (2nd generation)"
case "iPad6,7", "iPad6,8":
return "iPad Pro (12.9-inch) (1st generation)"
case "iPad7,1", "iPad7,2":
return "iPad Pro (12.9-inch) (2nd generation)"
case "iPad8,5", "iPad8,6", "iPad8,7", "iPad8,8":
return "iPad Pro (12.9-inch) (3rd generation)"
case "iPad8,11", "iPad8,12":
return "iPad Pro (12.9-inch) (4th generation)"
case "AppleTV5,3":
return "Apple TV"
case "AppleTV6,2":
return "Apple TV 4K"
case "AudioAccessory1,1":
return "HomePod"
case "AudioAccessory5,1":
return "HomePod mini"
case "i386", "x86_64":
return "Simulator"
default:
return identifier
}
}
func GetIOSDeviceType(identifier string) string {
if strings.Contains(identifier, "iPhone") {
return "mobile" //"phone"
}
if strings.Contains(identifier, "iPad") {
return "tablet"
}
return "other"
}

View file

@ -0,0 +1,172 @@
package router
import (
"encoding/json"
"errors"
"log"
"math/rand"
"net/http"
"openreplay/backend/internal/http/ios"
"openreplay/backend/internal/http/uuid"
"strconv"
"time"
"openreplay/backend/pkg/db/postgres"
. "openreplay/backend/pkg/messages"
"openreplay/backend/pkg/token"
)
func (e *Router) startSessionHandlerIOS(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
req := &StartIOSSessionRequest{}
if r.Body == nil {
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"))
return
}
body := http.MaxBytesReader(w, r.Body, e.cfg.JsonSizeLimit)
defer body.Close()
if err := json.NewDecoder(body).Decode(req); err != nil {
ResponseWithError(w, http.StatusBadRequest, err)
return
}
if req.ProjectKey == nil {
ResponseWithError(w, http.StatusForbidden, errors.New("ProjectKey value required"))
return
}
p, err := e.services.Database.GetProjectByKey(*req.ProjectKey)
if err != nil {
if postgres.IsNoRowsErr(err) {
ResponseWithError(w, http.StatusNotFound, errors.New("Project doesn't exist or is not active"))
} else {
ResponseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging
}
return
}
userUUID := uuid.GetUUID(req.UserUUID)
tokenData, err := e.services.Tokenizer.Parse(req.Token)
if err != nil { // Starting the new one
dice := byte(rand.Intn(100)) // [0, 100)
if dice >= p.SampleRate {
ResponseWithError(w, http.StatusForbidden, errors.New("cancel"))
return
}
ua := e.services.UaParser.ParseFromHTTPRequest(r)
if ua == nil {
ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"))
return
}
sessionID, err := e.services.Flaker.Compose(uint64(startTime.UnixMilli()))
if err != nil {
ResponseWithError(w, http.StatusInternalServerError, err)
return
}
// TODO: if EXPIRED => send message for two sessions association
expTime := startTime.Add(time.Duration(p.MaxSessionDuration) * time.Millisecond)
tokenData = &token.TokenData{sessionID, expTime.UnixMilli()}
country := e.services.GeoIP.ExtractISOCodeFromHTTPRequest(r)
// The difference with web is mostly here:
e.services.Producer.Produce(e.cfg.TopicRawIOS, tokenData.ID, Encode(&IOSSessionStart{
Timestamp: req.Timestamp,
ProjectID: uint64(p.ProjectID),
TrackerVersion: req.TrackerVersion,
RevID: req.RevID,
UserUUID: userUUID,
UserOS: "IOS",
UserOSVersion: req.UserOSVersion,
UserDevice: ios.MapIOSDevice(req.UserDevice),
UserDeviceType: ios.GetIOSDeviceType(req.UserDevice),
UserCountry: country,
}))
}
ResponseWithJSON(w, &StartIOSSessionResponse{
Token: e.services.Tokenizer.Compose(*tokenData),
UserUUID: userUUID,
SessionID: strconv.FormatUint(tokenData.ID, 10),
BeaconSizeLimit: e.cfg.BeaconSizeLimit,
})
}
func (e *Router) pushMessagesHandlerIOS(w http.ResponseWriter, r *http.Request) {
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
if err != nil {
ResponseWithError(w, http.StatusUnauthorized, err)
return
}
e.pushMessages(w, r, sessionData.ID, e.cfg.TopicRawIOS)
}
func (e *Router) pushLateMessagesHandlerIOS(w http.ResponseWriter, r *http.Request) {
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
if err != nil && err != token.EXPIRED {
ResponseWithError(w, http.StatusUnauthorized, err)
return
}
// Check timestamps here?
e.pushMessages(w, r, sessionData.ID, e.cfg.TopicRawIOS)
}
func (e *Router) imagesUploadHandlerIOS(w http.ResponseWriter, r *http.Request) {
log.Printf("recieved imagerequest")
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
if err != nil { // Should accept expired token?
ResponseWithError(w, http.StatusUnauthorized, err)
return
}
if r.Body == nil {
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"))
return
}
r.Body = http.MaxBytesReader(w, r.Body, e.cfg.FileSizeLimit)
defer r.Body.Close()
err = r.ParseMultipartForm(1e6) // ~1Mb
if err == http.ErrNotMultipart || err == http.ErrMissingBoundary {
ResponseWithError(w, http.StatusUnsupportedMediaType, err)
return
// } else if err == multipart.ErrMessageTooLarge // if non-files part exceeds 10 MB
} else if err != nil {
ResponseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging
return
}
if r.MultipartForm == nil {
ResponseWithError(w, http.StatusInternalServerError, errors.New("Multipart not parsed"))
return
}
if len(r.MultipartForm.Value["projectKey"]) == 0 {
ResponseWithError(w, http.StatusBadRequest, errors.New("projectKey parameter missing")) // status for missing/wrong parameter?
return
}
prefix := r.MultipartForm.Value["projectKey"][0] + "/" + strconv.FormatUint(sessionData.ID, 10) + "/"
for _, fileHeaderList := range r.MultipartForm.File {
for _, fileHeader := range fileHeaderList {
file, err := fileHeader.Open()
if err != nil {
continue // TODO: send server error or accumulate successful files
}
key := prefix + fileHeader.Filename
log.Printf("Uploading image... %v", key)
go func() { //TODO: mime type from header
if err := e.services.Storage.Upload(file, key, "image/jpeg", false); err != nil {
log.Printf("Upload ios screen error. %v", err)
}
}()
}
}
w.WriteHeader(http.StatusOK)
}

View file

@ -0,0 +1,212 @@
package router
import (
"encoding/json"
"errors"
"go.opentelemetry.io/otel/attribute"
"io"
"log"
"math/rand"
"net/http"
"openreplay/backend/internal/http/uuid"
"strconv"
"time"
"openreplay/backend/pkg/db/postgres"
. "openreplay/backend/pkg/messages"
"openreplay/backend/pkg/token"
)
func (e *Router) readBody(w http.ResponseWriter, r *http.Request, limit int64) ([]byte, error) {
body := http.MaxBytesReader(w, r.Body, limit)
bodyBytes, err := io.ReadAll(body)
if closeErr := body.Close(); closeErr != nil {
log.Printf("error while closing request body: %s", closeErr)
}
if err != nil {
return nil, err
}
reqSize := len(bodyBytes)
e.requestSize.Record(
r.Context(),
float64(reqSize),
[]attribute.KeyValue{attribute.String("method", r.URL.Path)}...,
)
return bodyBytes, nil
}
func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
// Check request body
if r.Body == nil {
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"))
return
}
bodyBytes, err := e.readBody(w, r, e.cfg.JsonSizeLimit)
if err != nil {
log.Printf("error while reading request body: %s", err)
ResponseWithError(w, http.StatusRequestEntityTooLarge, err)
return
}
// Parse request body
req := &StartSessionRequest{}
if err := json.Unmarshal(bodyBytes, req); err != nil {
ResponseWithError(w, http.StatusBadRequest, err)
return
}
// Handler's logic
if req.ProjectKey == nil {
ResponseWithError(w, http.StatusForbidden, errors.New("ProjectKey value required"))
return
}
p, err := e.services.Database.GetProjectByKey(*req.ProjectKey)
if err != nil {
if postgres.IsNoRowsErr(err) {
ResponseWithError(w, http.StatusNotFound, errors.New("Project doesn't exist or capture limit has been reached"))
} else {
ResponseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging
}
return
}
userUUID := uuid.GetUUID(req.UserUUID)
tokenData, err := e.services.Tokenizer.Parse(req.Token)
if err != nil || req.Reset { // Starting the new one
dice := byte(rand.Intn(100)) // [0, 100)
if dice >= p.SampleRate {
ResponseWithError(w, http.StatusForbidden, errors.New("cancel"))
return
}
ua := e.services.UaParser.ParseFromHTTPRequest(r)
if ua == nil {
ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"))
return
}
sessionID, err := e.services.Flaker.Compose(uint64(startTime.UnixMilli()))
if err != nil {
ResponseWithError(w, http.StatusInternalServerError, err)
return
}
// TODO: if EXPIRED => send message for two sessions association
expTime := startTime.Add(time.Duration(p.MaxSessionDuration) * time.Millisecond)
tokenData = &token.TokenData{ID: sessionID, ExpTime: expTime.UnixMilli()}
e.services.Producer.Produce(e.cfg.TopicRawWeb, tokenData.ID, Encode(&SessionStart{
Timestamp: req.Timestamp,
ProjectID: uint64(p.ProjectID),
TrackerVersion: req.TrackerVersion,
RevID: req.RevID,
UserUUID: userUUID,
UserAgent: r.Header.Get("User-Agent"),
UserOS: ua.OS,
UserOSVersion: ua.OSVersion,
UserBrowser: ua.Browser,
UserBrowserVersion: ua.BrowserVersion,
UserDevice: ua.Device,
UserDeviceType: ua.DeviceType,
UserCountry: e.services.GeoIP.ExtractISOCodeFromHTTPRequest(r),
UserDeviceMemorySize: req.DeviceMemory,
UserDeviceHeapSize: req.JsHeapSizeLimit,
UserID: req.UserID,
}))
}
ResponseWithJSON(w, &StartSessionResponse{
Token: e.services.Tokenizer.Compose(*tokenData),
UserUUID: userUUID,
SessionID: strconv.FormatUint(tokenData.ID, 10),
BeaconSizeLimit: e.cfg.BeaconSizeLimit,
})
}
func (e *Router) pushMessagesHandlerWeb(w http.ResponseWriter, r *http.Request) {
// Check authorization
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
if err != nil {
ResponseWithError(w, http.StatusUnauthorized, err)
return
}
// Check request body
if r.Body == nil {
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"))
return
}
bodyBytes, err := e.readBody(w, r, e.cfg.BeaconSizeLimit)
if err != nil {
log.Printf("error while reading request body: %s", err)
ResponseWithError(w, http.StatusRequestEntityTooLarge, err)
return
}
// Send processed messages to queue as array of bytes
// TODO: check bytes for nonsense crap
err = e.services.Producer.Produce(e.cfg.TopicRawWeb, sessionData.ID, bodyBytes)
if err != nil {
log.Printf("can't send processed messages to queue: %s", err)
}
w.WriteHeader(http.StatusOK)
}
func (e *Router) notStartedHandlerWeb(w http.ResponseWriter, r *http.Request) {
// Check request body
if r.Body == nil {
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"))
return
}
bodyBytes, err := e.readBody(w, r, e.cfg.JsonSizeLimit)
if err != nil {
log.Printf("error while reading request body: %s", err)
ResponseWithError(w, http.StatusRequestEntityTooLarge, err)
return
}
// Parse request body
req := &NotStartedRequest{}
if err := json.Unmarshal(bodyBytes, req); err != nil {
ResponseWithError(w, http.StatusBadRequest, err)
return
}
// Handler's logic
if req.ProjectKey == nil {
ResponseWithError(w, http.StatusForbidden, errors.New("ProjectKey value required"))
return
}
ua := e.services.UaParser.ParseFromHTTPRequest(r) // TODO?: insert anyway
if ua == nil {
ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"))
return
}
country := e.services.GeoIP.ExtractISOCodeFromHTTPRequest(r)
err = e.services.Database.InsertUnstartedSession(postgres.UnstartedSession{
ProjectKey: *req.ProjectKey,
TrackerVersion: req.TrackerVersion,
DoNotTrack: req.DoNotTrack,
Platform: "web",
UserAgent: r.Header.Get("User-Agent"),
UserOS: ua.OS,
UserOSVersion: ua.OSVersion,
UserBrowser: ua.Browser,
UserBrowserVersion: ua.BrowserVersion,
UserDevice: ua.Device,
UserDeviceType: ua.DeviceType,
UserCountry: country,
})
if err != nil {
log.Printf("Unable to insert Unstarted Session: %v\n", err)
}
w.WriteHeader(http.StatusOK)
}

View file

@ -1,28 +1,27 @@
package main
package router
import (
gzip "github.com/klauspost/pgzip"
"io"
"io/ioutil"
"log"
"net/http"
gzip "github.com/klauspost/pgzip"
)
const JSON_SIZE_LIMIT int64 = 1e3 // 1Kb
func pushMessages(w http.ResponseWriter, r *http.Request, sessionID uint64, topicName string) {
body := http.MaxBytesReader(w, r.Body, BEACON_SIZE_LIMIT)
func (e *Router) pushMessages(w http.ResponseWriter, r *http.Request, sessionID uint64, topicName string) {
body := http.MaxBytesReader(w, r.Body, e.cfg.BeaconSizeLimit)
defer body.Close()
var reader io.ReadCloser
var err error
switch r.Header.Get("Content-Encoding") {
case "gzip":
log.Println("Gzip", reader)
reader, err = gzip.NewReader(body)
if err != nil {
responseWithError(w, http.StatusInternalServerError, err) // TODO: stage-dependent responce
ResponseWithError(w, http.StatusInternalServerError, err) // TODO: stage-dependent response
return
}
log.Println("Gzip reader init", reader)
@ -33,9 +32,9 @@ func pushMessages(w http.ResponseWriter, r *http.Request, sessionID uint64, topi
log.Println("Reader after switch:", reader)
buf, err := ioutil.ReadAll(reader)
if err != nil {
responseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging
ResponseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging
return
}
producer.Produce(topicName, sessionID, buf) // What if not able to send?
e.services.Producer.Produce(topicName, sessionID, buf) // What if not able to send?
w.WriteHeader(http.StatusOK)
}

View file

@ -0,0 +1,49 @@
package router
type StartSessionRequest struct {
Token string `json:"token"`
UserUUID *string `json:"userUUID"`
RevID string `json:"revID"`
Timestamp uint64 `json:"timestamp"`
TrackerVersion string `json:"trackerVersion"`
IsSnippet bool `json:"isSnippet"`
DeviceMemory uint64 `json:"deviceMemory"`
JsHeapSizeLimit uint64 `json:"jsHeapSizeLimit"`
ProjectKey *string `json:"projectKey"`
Reset bool `json:"reset"`
UserID string `json:"userID"`
}
type StartSessionResponse struct {
Timestamp int64 `json:"timestamp"`
Delay int64 `json:"delay"`
Token string `json:"token"`
UserUUID string `json:"userUUID"`
SessionID string `json:"sessionID"`
BeaconSizeLimit int64 `json:"beaconSizeLimit"`
}
type NotStartedRequest struct {
ProjectKey *string `json:"projectKey"`
TrackerVersion string `json:"trackerVersion"`
DoNotTrack bool `json:"DoNotTrack"`
}
type StartIOSSessionRequest struct {
Token string `json:"token"`
ProjectKey *string `json:"projectKey"`
TrackerVersion string `json:"trackerVersion"`
RevID string `json:"revID"`
UserUUID *string `json:"userUUID"`
UserOSVersion string `json:"userOSVersion"`
UserDevice string `json:"userDevice"`
Timestamp uint64 `json:"timestamp"`
}
type StartIOSSessionResponse struct {
Token string `json:"token"`
ImagesHashList []string `json:"imagesHashList"`
UserUUID string `json:"userUUID"`
BeaconSizeLimit int64 `json:"beaconSizeLimit"`
SessionID string `json:"sessionID"`
}

View file

@ -1,4 +1,4 @@
package main
package router
import (
"encoding/json"
@ -6,7 +6,7 @@ import (
"net/http"
)
func responseWithJSON(w http.ResponseWriter, res interface{}) {
func ResponseWithJSON(w http.ResponseWriter, res interface{}) {
body, err := json.Marshal(res)
if err != nil {
log.Println(err)
@ -15,10 +15,10 @@ func responseWithJSON(w http.ResponseWriter, res interface{}) {
w.Write(body)
}
func responseWithError(w http.ResponseWriter, code int, err error) {
func ResponseWithError(w http.ResponseWriter, code int, err error) {
type response struct {
Error string `json:"error"`
}
w.WriteHeader(code)
responseWithJSON(w, &response{err.Error()})
ResponseWithJSON(w, &response{err.Error()})
}

View file

@ -0,0 +1,119 @@
package router
import (
"context"
"fmt"
"github.com/gorilla/mux"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
"log"
"net/http"
http3 "openreplay/backend/internal/config/http"
http2 "openreplay/backend/internal/http/services"
"openreplay/backend/pkg/monitoring"
"time"
)
type Router struct {
router *mux.Router
cfg *http3.Config
services *http2.ServicesBuilder
requestSize syncfloat64.Histogram
requestDuration syncfloat64.Histogram
totalRequests syncfloat64.Counter
}
func NewRouter(cfg *http3.Config, services *http2.ServicesBuilder, metrics *monitoring.Metrics) (*Router, error) {
switch {
case cfg == nil:
return nil, fmt.Errorf("config is empty")
case services == nil:
return nil, fmt.Errorf("services is empty")
case metrics == nil:
return nil, fmt.Errorf("metrics is empty")
}
e := &Router{
cfg: cfg,
services: services,
}
e.initMetrics(metrics)
e.init()
return e, nil
}
func (e *Router) init() {
e.router = mux.NewRouter()
// Root path
e.router.HandleFunc("/", e.root)
handlers := map[string]func(http.ResponseWriter, *http.Request){
"/v1/web/not-started": e.notStartedHandlerWeb,
"/v1/web/start": e.startSessionHandlerWeb,
"/v1/web/i": e.pushMessagesHandlerWeb,
"/v1/ios/start": e.startSessionHandlerIOS,
"/v1/ios/i": e.pushMessagesHandlerIOS,
"/v1/ios/late": e.pushLateMessagesHandlerIOS,
"/v1/ios/images": e.imagesUploadHandlerIOS,
}
prefix := "/ingest"
for path, handler := range handlers {
e.router.HandleFunc(path, handler).Methods("POST", "OPTIONS")
e.router.HandleFunc(prefix+path, handler).Methods("POST", "OPTIONS")
}
// CORS middleware
e.router.Use(e.corsMiddleware)
}
func (e *Router) initMetrics(metrics *monitoring.Metrics) {
var err error
e.requestSize, err = metrics.RegisterHistogram("requests_body_size")
if err != nil {
log.Printf("can't create requests_body_size metric: %s", err)
}
e.requestDuration, err = metrics.RegisterHistogram("requests_duration")
if err != nil {
log.Printf("can't create requests_duration metric: %s", err)
}
e.totalRequests, err = metrics.RegisterCounter("requests_total")
if err != nil {
log.Printf("can't create requests_total metric: %s", err)
}
}
func (e *Router) root(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}
func (e *Router) corsMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Prepare headers for preflight requests
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "POST")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type,Authorization")
if r.Method == http.MethodOptions {
w.Header().Set("Cache-Control", "max-age=86400")
w.WriteHeader(http.StatusOK)
return
}
log.Printf("Request: %v - %v ", r.Method, r.URL.Path)
requestStart := time.Now()
// Serve request
next.ServeHTTP(w, r)
metricsContext, _ := context.WithTimeout(context.Background(), time.Millisecond*100)
e.totalRequests.Add(metricsContext, 1)
e.requestDuration.Record(metricsContext,
float64(time.Now().Sub(requestStart).Milliseconds()),
[]attribute.KeyValue{attribute.String("method", r.URL.Path)}...,
)
})
}
func (e *Router) GetHandler() http.Handler {
return e.router
}

View file

@ -0,0 +1,46 @@
package server
import (
"context"
"errors"
"fmt"
"golang.org/x/net/http2"
"log"
"net/http"
"time"
)
type Server struct {
server *http.Server
}
func New(handler http.Handler, host, port string, timeout time.Duration) (*Server, error) {
switch {
case port == "":
return nil, errors.New("empty server port")
case handler == nil:
return nil, errors.New("empty handler")
case timeout < 1:
return nil, fmt.Errorf("invalid timeout %d", timeout)
}
server := &http.Server{
Addr: fmt.Sprintf("%s:%s", host, port),
Handler: handler,
ReadTimeout: timeout,
WriteTimeout: timeout,
}
if err := http2.ConfigureServer(server, nil); err != nil {
log.Printf("can't configure http2 server: %s", err)
}
return &Server{
server: server,
}, nil
}
func (s *Server) Start() error {
return s.server.ListenAndServe()
}
func (s *Server) Stop() {
s.server.Shutdown(context.Background())
}

View file

@ -0,0 +1,34 @@
package services
import (
"openreplay/backend/internal/config/http"
"openreplay/backend/internal/http/geoip"
"openreplay/backend/internal/http/uaparser"
"openreplay/backend/pkg/db/cache"
"openreplay/backend/pkg/flakeid"
"openreplay/backend/pkg/queue/types"
"openreplay/backend/pkg/storage"
"openreplay/backend/pkg/token"
)
type ServicesBuilder struct {
Database *cache.PGCache
Producer types.Producer
Flaker *flakeid.Flaker
UaParser *uaparser.UAParser
GeoIP *geoip.GeoIP
Tokenizer *token.Tokenizer
Storage *storage.S3
}
func New(cfg *http.Config, producer types.Producer, pgconn *cache.PGCache) *ServicesBuilder {
return &ServicesBuilder{
Database: pgconn,
Producer: producer,
Storage: storage.NewS3(cfg.AWSRegion, cfg.S3BucketIOSImages),
Tokenizer: token.NewTokenizer(cfg.TokenSecret),
UaParser: uaparser.NewUAParser(cfg.UAParserFile),
GeoIP: geoip.NewGeoIP(cfg.MaxMinDBFile),
Flaker: flakeid.NewFlaker(cfg.WorkerID),
}
}

View file

@ -1,10 +1,10 @@
package main
package uuid
import (
"github.com/google/uuid"
)
func getUUID(u *string) string {
func GetUUID(u *string) string {
if u != nil {
_, err := uuid.Parse(*u)
if err == nil {
@ -12,4 +12,4 @@ func getUUID(u *string) string {
}
}
return uuid.New().String()
}
}

View file

@ -0,0 +1,49 @@
package clientManager
import (
"openreplay/backend/internal/integrations/integration"
"strconv"
"openreplay/backend/pkg/db/postgres"
)
type manager struct {
clientMap integration.ClientMap
Events chan *integration.SessionErrorEvent
Errors chan error
RequestDataUpdates chan postgres.Integration // not pointer because it could change in other thread
}
func NewManager() *manager {
return &manager{
clientMap: make(integration.ClientMap),
RequestDataUpdates: make(chan postgres.Integration, 100),
Events: make(chan *integration.SessionErrorEvent, 100),
Errors: make(chan error, 100),
}
}
func (m *manager) Update(i *postgres.Integration) error {
key := strconv.Itoa(int(i.ProjectID)) + i.Provider
if i.Options == nil {
delete(m.clientMap, key)
return nil
}
c, exists := m.clientMap[key]
if !exists {
c, err := integration.NewClient(i, m.RequestDataUpdates, m.Events, m.Errors)
if err != nil {
return err
}
m.clientMap[key] = c
return nil
}
return c.Update(i)
}
func (m *manager) RequestAll() {
for _, c := range m.clientMap {
go c.Request()
}
}

View file

@ -2,43 +2,40 @@ package integration
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
"strings"
"regexp"
"openreplay/backend/pkg/messages"
"regexp"
"strings"
)
var reIsException = regexp.MustCompile(`(?i)exception|error`)
type cloudwatch struct {
AwsAccessKeyId string // `json:"aws_access_key_id"`
AwsSecretAccessKey string // `json:"aws_secret_access_key"`
LogGroupName string // `json:"log_group_name"`
Region string // `json:"region"`
AwsAccessKeyId string // `json:"aws_access_key_id"`
AwsSecretAccessKey string // `json:"aws_secret_access_key"`
LogGroupName string // `json:"log_group_name"`
Region string // `json:"region"`
}
func (cw *cloudwatch) Request(c *client) error {
startTs := int64(c.getLastMessageTimestamp() + 1) // From next millisecond
startTs := int64(c.getLastMessageTimestamp() + 1) // From next millisecond
//endTs := utils.CurrentTimestamp()
sess, err := session.NewSession(aws.NewConfig().
WithRegion(cw.Region).
WithCredentials(
credentials.NewStaticCredentials(cw.AwsAccessKeyId, cw.AwsSecretAccessKey, ""),
),
WithRegion(cw.Region).
WithCredentials(
credentials.NewStaticCredentials(cw.AwsAccessKeyId, cw.AwsSecretAccessKey, ""),
),
)
if err != nil {
return err
}
svc := cloudwatchlogs.New(sess)
filterOptions := new(cloudwatchlogs.FilterLogEventsInput).
SetStartTime(startTs). // Inclusively both startTime and endTime
SetStartTime(startTs). // Inclusively both startTime and endTime
// SetEndTime(endTs). // Default nil?
// SetLimit(10000). // Default 10000
SetLogGroupName(cw.LogGroupName).
@ -56,7 +53,7 @@ func (cw *cloudwatch) Request(c *client) error {
}
if !reIsException.MatchString(*e.Message) { // too weak condition ?
continue
}
}
token, err := GetToken(*e.Message)
if err != nil {
c.errChan <- err
@ -72,18 +69,18 @@ func (cw *cloudwatch) Request(c *client) error {
//SessionID: sessionID,
Token: token,
RawErrorEvent: &messages.RawErrorEvent{
Source: "cloudwatch",
Timestamp: timestamp, // e.IngestionTime ??
Name: name,
Payload: strings.ReplaceAll(e.String(), "\n", ""),
Source: "cloudwatch",
Timestamp: timestamp, // e.IngestionTime ??
Name: name,
Payload: strings.ReplaceAll(e.String(), "\n", ""),
},
}
}
if output.NextToken == nil {
break;
break
}
filterOptions.NextToken = output.NextToken
}
return nil
}
}

View file

@ -53,14 +53,14 @@ func (es *elasticsearch) Request(c *client) error {
"query": map[string]interface{}{
"bool": map[string]interface{}{
"filter": []map[string]interface{}{
map[string]interface{}{
{
"match": map[string]interface{}{
"message": map[string]interface{}{
"query": "openReplaySessionToken=", // asayer_session_id=
},
},
},
map[string]interface{}{
{
"range": map[string]interface{}{
"utc_time": map[string]interface{}{
"gte": strconv.FormatUint(gteTs, 10),
@ -68,7 +68,7 @@ func (es *elasticsearch) Request(c *client) error {
},
},
},
map[string]interface{}{
{
"term": map[string]interface{}{
"tags": "error",
},

Some files were not shown because too many files have changed in this diff Show more