Compare commits
1280 commits
ui-events-
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
90510aa33b | ||
|
|
96a70f5d41 | ||
|
|
d4a13edcf0 | ||
|
|
51fad91a22 | ||
|
|
36abcda1e1 | ||
|
|
dd5f464f73 | ||
|
|
f9ada41272 | ||
|
|
9e24a3583e | ||
|
|
0a3129d3cd | ||
|
|
99d61db9d9 | ||
|
|
133958622e | ||
|
|
fb021f606f | ||
|
|
a2905fa8ed | ||
|
|
beec2283fd | ||
|
|
6c8b55019e | ||
|
|
e3e3e11227 | ||
|
|
c6f7de04cc | ||
|
|
2921c17cbf | ||
|
|
7eb3f5c4c8 | ||
|
|
5a9a8e588a | ||
|
|
4b14258266 | ||
|
|
744d2d4311 | ||
|
|
64242a5dc0 | ||
|
|
cae3002697 | ||
|
|
3d3c62196b | ||
|
|
e810958a5d | ||
|
|
39fa9787d1 | ||
|
|
c9c1ad4dde | ||
|
|
d9868928be | ||
|
|
a460d8c9a2 | ||
|
|
930417aab4 | ||
|
|
07bc184f4d | ||
|
|
71b7cca569 | ||
|
|
355d27eaa0 | ||
|
|
66b485cccf | ||
|
|
de33a42151 | ||
|
|
f12bdebf82 | ||
|
|
bbfa20c693 | ||
|
|
f264ba043d | ||
|
|
a05dce8125 | ||
|
|
3a1635d81f | ||
|
|
ccb332c636 | ||
|
|
80ffa15959 | ||
|
|
b2e961d621 | ||
|
|
b4d0598f23 | ||
|
|
e77f083f10 | ||
|
|
58da1d3f64 | ||
|
|
447fc26a2a | ||
|
|
9bdf6e4f92 | ||
|
|
01f403e12d | ||
|
|
39eb943b86 | ||
|
|
366b0d38b0 | ||
|
|
f4d5b3c06e | ||
|
|
93ae18133e | ||
|
|
fbe5d78270 | ||
|
|
b803eed1d4 | ||
|
|
9ed3cb1b7e | ||
|
|
5e0e5730ba | ||
|
|
d78b33dcd2 | ||
|
|
4b1ca200b4 | ||
|
|
08d930f9ff | ||
|
|
da37809bc8 | ||
|
|
d922fc7ad5 | ||
|
|
796360fdd2 | ||
|
|
13dbb60d8b | ||
|
|
9e20a49128 | ||
|
|
91f8cc1399 | ||
|
|
f8ba3f6d89 | ||
|
|
85e30b3692 | ||
|
|
0360e3726e | ||
|
|
77bbb5af36 | ||
|
|
ab0d4cfb62 | ||
|
|
3fd506a812 | ||
|
|
e8432e2dec | ||
|
|
5c76a8524c | ||
|
|
3ba40a4811 | ||
|
|
f9a3f24590 | ||
|
|
85d6d0abac | ||
|
|
b3594136ce | ||
|
|
8f67edde8d | ||
|
|
74ed29915b | ||
|
|
3ca71ec211 | ||
|
|
0e469fd056 | ||
|
|
a8cb0e1643 | ||
|
|
e171f0d8d5 | ||
|
|
68ea291444 | ||
|
|
05cbb831c7 | ||
|
|
5070ded1f4 | ||
|
|
77610a4924 | ||
|
|
7c34e4a0f6 | ||
|
|
330e21183f | ||
|
|
30ce37896c | ||
|
|
80a7817e7d | ||
|
|
1b9c568cb1 | ||
|
|
3759771ae9 | ||
|
|
f6ae5aba88 | ||
|
|
5190dc512a | ||
|
|
3fcccb51e8 | ||
|
|
26077d5689 | ||
|
|
00c57348fd | ||
|
|
1f9bc5520a | ||
|
|
aef94618f6 | ||
|
|
2a330318c7 | ||
|
|
6777d5ce2a | ||
|
|
8a6f8fe91f | ||
|
|
7b078fed4c | ||
|
|
894d4c84b3 | ||
|
|
46390a3ba9 | ||
|
|
621667f5ce | ||
|
|
a72f476f1c | ||
|
|
623946ce4e | ||
|
|
2d099214fc | ||
|
|
b0e7054f89 | ||
|
|
a9097270af | ||
|
|
5d514ddaf2 | ||
|
|
43688bb03b | ||
|
|
e050cee7bb | ||
|
|
6b35df7125 | ||
|
|
8e099b6dc3 | ||
|
|
c0a4734054 | ||
|
|
7de1efb5fe | ||
|
|
d4ff28ddbe | ||
|
|
b2256f72d0 | ||
|
|
a63bda1c79 | ||
|
|
3a0176789e | ||
|
|
f2b7271fca | ||
|
|
d50f89662b | ||
|
|
35051d201c | ||
|
|
214be95ecc | ||
|
|
dbc142c114 | ||
|
|
443f5e8f08 | ||
|
|
9f693f220d | ||
|
|
5ab30380b0 | ||
|
|
fc86555644 | ||
|
|
2a3c611a27 | ||
|
|
1d6fb0ae9e | ||
|
|
bef91a6136 | ||
|
|
1e2bd19d32 | ||
|
|
3b58cb347e | ||
|
|
ca4590501a | ||
|
|
fd12cc7585 | ||
|
|
6abded53e0 | ||
|
|
82c5e5e59d | ||
|
|
c77b0cc4de | ||
|
|
de344e62ef | ||
|
|
deb78a62c0 | ||
|
|
0724cf05f0 | ||
|
|
cc704f1bc3 | ||
|
|
4c159b2d26 | ||
|
|
42df33bc01 | ||
|
|
ae95b48760 | ||
|
|
4be3050e61 | ||
|
|
8eec6e983b | ||
|
|
5fec615044 | ||
|
|
f77568a01c | ||
|
|
618e4dc59f | ||
|
|
b94fcb11e5 | ||
|
|
f93ee6fb8f | ||
|
|
23820b7ea5 | ||
|
|
e92bfe3cfe | ||
|
|
102f0c7b06 | ||
|
|
8d57cc55a5 | ||
|
|
24b36efc9d | ||
|
|
fe91cad4af | ||
|
|
033ffcb7b9 | ||
|
|
499048e46c | ||
|
|
5b6c653862 | ||
|
|
4169ab87c6 | ||
|
|
80229a0214 | ||
|
|
fb48ba8300 | ||
|
|
b0f3c50c0f | ||
|
|
5806362ce0 | ||
|
|
2458af460b | ||
|
|
6c891cb131 | ||
|
|
8e41c3ce91 | ||
|
|
14d0a77a73 | ||
|
|
0333c56d52 | ||
|
|
52d4abb61c | ||
|
|
b0e7d3aa79 | ||
|
|
e9eea78283 | ||
|
|
0f4c509582 | ||
|
|
820bca6308 | ||
|
|
51e71a4d52 | ||
|
|
2c9e9576c5 | ||
|
|
9e7f751df6 | ||
|
|
b6d0e71544 | ||
|
|
93a9e03026 | ||
|
|
a62f6f6bb0 | ||
|
|
cd80aa85ea | ||
|
|
961c685310 | ||
|
|
160b5ac2c8 | ||
|
|
1cca40d4c5 | ||
|
|
bd2a59266d | ||
|
|
8acee7d357 | ||
|
|
fb49c715cb | ||
|
|
221bee70f5 | ||
|
|
8eb431f70c | ||
|
|
820b0954e7 | ||
|
|
19b350761c | ||
|
|
3b3e95a413 | ||
|
|
fe1130397c | ||
|
|
fd4b71d854 | ||
|
|
404ffd5b2d | ||
|
|
5af63eb9f1 | ||
|
|
038bfee383 | ||
|
|
bd09160a4a | ||
|
|
136a5b2bfb | ||
|
|
33deaef0ce | ||
|
|
3f541e5d59 | ||
|
|
ae463db150 | ||
|
|
9eb19fedf1 | ||
|
|
5df934c9ce | ||
|
|
e027a2d016 | ||
|
|
c7f3c78740 | ||
|
|
3245579b7c | ||
|
|
0107c9c523 | ||
|
|
05f4054b31 | ||
|
|
ce844296ed | ||
|
|
0a5856afe1 | ||
|
|
45b8bdef8a | ||
|
|
264f28ed39 | ||
|
|
59d3253737 | ||
|
|
1c8c231d13 | ||
|
|
77208b95e8 | ||
|
|
cdbbb482ce | ||
|
|
ccd8d76e98 | ||
|
|
17a5089c24 | ||
|
|
384866621c | ||
|
|
743625f66b | ||
|
|
ffd134c204 | ||
|
|
8da099ba98 | ||
|
|
75ca0267ae | ||
|
|
6ab3c80985 | ||
|
|
eab2d3a2cf | ||
|
|
c6cbc4eba8 | ||
|
|
fdd26c567c | ||
|
|
4b9be69719 | ||
|
|
b8511b6be1 | ||
|
|
5cc9945f16 | ||
|
|
cef251db6a | ||
|
|
687ab05f22 | ||
|
|
4b09213448 | ||
|
|
af4a344c85 | ||
|
|
c40e32d624 | ||
|
|
afbf5fee7a | ||
|
|
28b580499f | ||
|
|
9d7c54554e | ||
|
|
adf302bc34 | ||
|
|
6852d63cdb | ||
|
|
41178ba841 | ||
|
|
90bc6bc83e | ||
|
|
b8d365de3d | ||
|
|
87e7acecde | ||
|
|
e53301d18e | ||
|
|
ff04276623 | ||
|
|
b0e0321224 | ||
|
|
e95417c1ed | ||
|
|
5f3b3bb2ef | ||
|
|
06937b305a | ||
|
|
a693a36a6c | ||
|
|
c8ff481725 | ||
|
|
ef897538d1 | ||
|
|
07ffb06db1 | ||
|
|
ad9883ceb2 | ||
|
|
5c9a29570c | ||
|
|
9f9990d737 | ||
|
|
fd5c0c9747 | ||
|
|
b8091b69c2 | ||
|
|
502303aee7 | ||
|
|
632bc1cbb9 | ||
|
|
bcc7d35b7f | ||
|
|
45656ec6d7 | ||
|
|
15829d865e | ||
|
|
029376c3e4 | ||
|
|
3ca6f78bed | ||
|
|
12a729fafe | ||
|
|
0a17460c5a | ||
|
|
faadfa497f | ||
|
|
bbeb508738 | ||
|
|
333fd642be | ||
|
|
5e93178876 | ||
|
|
da433e1666 | ||
|
|
a87f6c658c | ||
|
|
4ebbfd3501 | ||
|
|
6dc3dcfd4e | ||
|
|
74146eecf1 | ||
|
|
2e69a6e4df | ||
|
|
afacbc1460 | ||
|
|
6e1316c05f | ||
|
|
d3851cedec | ||
|
|
a1989eb574 | ||
|
|
95455f761b | ||
|
|
69d1d88600 | ||
|
|
ceb40992cc | ||
|
|
1ab7d0ad7f | ||
|
|
2ee535f213 | ||
|
|
0ba1382c16 | ||
|
|
c025b2f1a5 | ||
|
|
918d9de4c9 | ||
|
|
047a5f52e7 | ||
|
|
7a88acfa9f | ||
|
|
366d2e1017 | ||
|
|
46e6f1a503 | ||
|
|
ce2a65f276 | ||
|
|
f168f90f10 | ||
|
|
b6cca71053 | ||
|
|
2841740afb | ||
|
|
927f96cb79 | ||
|
|
e174a11466 | ||
|
|
ee4c5cf45d | ||
|
|
78ddbb9233 | ||
|
|
66edf44f8b | ||
|
|
0af941e543 | ||
|
|
fd64d721c6 | ||
|
|
f965c69a26 | ||
|
|
9bb93d5daa | ||
|
|
4d19586eb9 | ||
|
|
5e10e168c6 | ||
|
|
aa2c14b7c1 | ||
|
|
4ef61f6fb5 | ||
|
|
95a5037abf | ||
|
|
23514d4b3f | ||
|
|
ee46413b13 | ||
|
|
9f57271af2 | ||
|
|
84771542a6 | ||
|
|
83f8b67f74 | ||
|
|
6af9f719c8 | ||
|
|
789427dd57 | ||
|
|
59bbc6a903 | ||
|
|
0529ee3afd | ||
|
|
307b0c1cd8 | ||
|
|
11a2ea48bc | ||
|
|
1146900dc0 | ||
|
|
0a999247e4 | ||
|
|
f13ad8a882 | ||
|
|
0d12fdddc9 | ||
|
|
c0a5415eb9 | ||
|
|
b8a70367ed | ||
|
|
1efe5c87e8 | ||
|
|
2dcbfe2ef9 | ||
|
|
fedc48bd0e | ||
|
|
de72e79fc6 | ||
|
|
d43bc3a2e9 | ||
|
|
8ba6a17055 | ||
|
|
e5809a5eff | ||
|
|
171fd5aa59 | ||
|
|
533fb71cb7 | ||
|
|
90964e8f50 | ||
|
|
7d5ac6a8c9 | ||
|
|
32b281f689 | ||
|
|
b175c836a3 | ||
|
|
4e54bced9c | ||
|
|
e2fa3c91e2 | ||
|
|
19c8fba445 | ||
|
|
94e8e0319d | ||
|
|
ec8f9a349d | ||
|
|
992cb2feca | ||
|
|
844f79a989 | ||
|
|
1ec06d360e | ||
|
|
fd76f7c302 | ||
|
|
c793d9d177 | ||
|
|
1c1a41bb55 | ||
|
|
6873f1c56b | ||
|
|
d79665cbea | ||
|
|
256c065153 | ||
|
|
114bd4080b | ||
|
|
d4965f2137 | ||
|
|
8ed97b353b | ||
|
|
ac232ef599 | ||
|
|
264f35cc9e | ||
|
|
d85f63c72e | ||
|
|
4b16e50e5f | ||
|
|
735b86d778 | ||
|
|
78bb1c3c6b | ||
|
|
968a3eefde | ||
|
|
1122ced4c3 | ||
|
|
b406893d00 | ||
|
|
8b2cf031ca | ||
|
|
fe06f43dd5 | ||
|
|
64e08916f9 | ||
|
|
99d6545720 | ||
|
|
7e4782ae71 | ||
|
|
ed3020dc7e | ||
|
|
74f6c2cd66 | ||
|
|
0533624c25 | ||
|
|
c271e01dfc | ||
|
|
c07ad14ffc | ||
|
|
b91d979c98 | ||
|
|
d63877de1c | ||
|
|
3a331d266c | ||
|
|
e8835d3058 | ||
|
|
3c32e8eec1 | ||
|
|
b1d51c19ea | ||
|
|
f6015f31f5 | ||
|
|
06113f7534 | ||
|
|
8500c1c11e | ||
|
|
fc542cd7d2 | ||
|
|
44a1d96d2d | ||
|
|
bb8e097759 | ||
|
|
7e7387001f | ||
|
|
5dd1256cd3 | ||
|
|
bf56cc53a7 | ||
|
|
da9b926b25 | ||
|
|
ef3ed8b690 | ||
|
|
b86e6fdadc | ||
|
|
1293cbde7d | ||
|
|
3e1f073e07 | ||
|
|
7cfe29adf3 | ||
|
|
aa07d41bb5 | ||
|
|
305c7ae064 | ||
|
|
724d5a2897 | ||
|
|
f8a40fd875 | ||
|
|
6c7880efbc | ||
|
|
2465029a6c | ||
|
|
11824d2993 | ||
|
|
0a4379be6b | ||
|
|
659aa7495f | ||
|
|
af5d730028 | ||
|
|
346fd76ea8 | ||
|
|
963c8354c6 | ||
|
|
4970bc365b | ||
|
|
def33daa6c | ||
|
|
f752876675 | ||
|
|
e046bcbe0a | ||
|
|
3ed8b3c27d | ||
|
|
e996600dc8 | ||
|
|
00a834b143 | ||
|
|
b4497edb05 | ||
|
|
231a3ac330 | ||
|
|
b70effa904 | ||
|
|
63b6b39d75 | ||
|
|
cd5d6e861d | ||
|
|
2144a90ea7 | ||
|
|
ecdb98b057 | ||
|
|
8d0c9d5a1f | ||
|
|
f8a1c9447b | ||
|
|
f1614b6626 | ||
|
|
adb359b3bf | ||
|
|
9949928335 | ||
|
|
6360b9a580 | ||
|
|
132de0af0d | ||
|
|
b70a641af5 | ||
|
|
ea142b9596 | ||
|
|
08340eb0f4 | ||
|
|
7b61d06454 | ||
|
|
d031210365 | ||
|
|
4b21194ec5 | ||
|
|
6bd5b60b1e | ||
|
|
c55b1971c4 | ||
|
|
e34e4fad6c | ||
|
|
57041140cb | ||
|
|
d70ecab1d9 | ||
|
|
c4c5fcc2b2 | ||
|
|
118412d4ab | ||
|
|
38653d200f | ||
|
|
3be8e8092d | ||
|
|
2654273f97 | ||
|
|
7dc70c0ce5 | ||
|
|
7d31197c78 | ||
|
|
f4b659e508 | ||
|
|
31290d7a89 | ||
|
|
198c5e3a92 | ||
|
|
7da11341cf | ||
|
|
9492234ccc | ||
|
|
ed528e7b5e | ||
|
|
d7a85d0920 | ||
|
|
f7339c8954 | ||
|
|
febe784322 | ||
|
|
f6cd20712d | ||
|
|
c2b84d18b5 | ||
|
|
6579b6842b | ||
|
|
ba55b359fb | ||
|
|
0d9c265452 | ||
|
|
b09becdcb7 | ||
|
|
4819907635 | ||
|
|
6e7ced6959 | ||
|
|
3a2e822bea | ||
|
|
4245dd49e8 | ||
|
|
b93e953fd9 | ||
|
|
a67ca7b870 | ||
|
|
d457332461 | ||
|
|
36a0fad5b4 | ||
|
|
e6c7c43246 | ||
|
|
b04bcb935e | ||
|
|
9e17673a4a | ||
|
|
1799f9d4a2 | ||
|
|
eed357a79b | ||
|
|
ff061567d8 | ||
|
|
ebee06b37a | ||
|
|
442611cb26 | ||
|
|
ad022f9cea | ||
|
|
b9ac2d2238 | ||
|
|
3191843829 | ||
|
|
5267a1c830 | ||
|
|
0b7b857d65 | ||
|
|
4ba16bada1 | ||
|
|
c7523a1526 | ||
|
|
3e722ea5ba | ||
|
|
06bad31a7d | ||
|
|
4f2b8d43b7 | ||
|
|
51ba151794 | ||
|
|
fda53bc4ad | ||
|
|
d45347da2b | ||
|
|
9c1be9b22a | ||
|
|
dd549b4c1f | ||
|
|
5dc5f085b9 | ||
|
|
e325eee47e | ||
|
|
0d68fcc428 | ||
|
|
e73d633518 | ||
|
|
22b95f308c | ||
|
|
f87c3e7a5e | ||
|
|
603df2d559 | ||
|
|
98405db9ff | ||
|
|
d4092ebc69 | ||
|
|
5d49a91dde | ||
|
|
8162236139 | ||
|
|
3dc933daf3 | ||
|
|
afb08cfe6d | ||
|
|
500d70aa67 | ||
|
|
c697c99fec | ||
|
|
600eba27a1 | ||
|
|
c50515e799 | ||
|
|
d29c7f20a4 | ||
|
|
a4b65c618f | ||
|
|
7a8be69c85 | ||
|
|
92c142ec33 | ||
|
|
8d878a3445 | ||
|
|
e1b05dbd33 | ||
|
|
30dd123f29 | ||
|
|
f88ff53e15 | ||
|
|
cb8d87e367 | ||
|
|
0e5fe14dc2 | ||
|
|
1feb4bdc64 | ||
|
|
de0c10de56 | ||
|
|
c59dbbc79d | ||
|
|
49c408f44e | ||
|
|
d374137e42 | ||
|
|
7caa386d2d | ||
|
|
82e170ff1c | ||
|
|
047a4d0108 | ||
|
|
7485016f92 | ||
|
|
ff6342298e | ||
|
|
8d8e6176be | ||
|
|
82621012de | ||
|
|
1b3a3dfc21 | ||
|
|
da923f13b9 | ||
|
|
2a52de073d | ||
|
|
ea8729dd93 | ||
|
|
84f9c02802 | ||
|
|
6ec7fe64a7 | ||
|
|
68c5d986fe | ||
|
|
cb977d54e1 | ||
|
|
392088be22 | ||
|
|
88c1f18c48 | ||
|
|
8597f9ef84 | ||
|
|
12f4d9a10c | ||
|
|
ab7e9e505d | ||
|
|
0484c0ccdd | ||
|
|
e72d492e66 | ||
|
|
12472cf84c | ||
|
|
2fc4f552d5 | ||
|
|
dbcb651f40 | ||
|
|
cd868f736b | ||
|
|
d4b3791b19 | ||
|
|
a8f167b5af | ||
|
|
5c1e5078b5 | ||
|
|
c2ce9b8466 | ||
|
|
3da965959b | ||
|
|
44108bd57e | ||
|
|
30c0e5abe9 | ||
|
|
3dd56cbf13 | ||
|
|
084749b6f9 | ||
|
|
693634fb14 | ||
|
|
d50ad9e579 | ||
|
|
c83dec7774 | ||
|
|
2b05bb59af | ||
|
|
4c6f23e31f | ||
|
|
312db29d23 | ||
|
|
3038fe58d0 | ||
|
|
defcc65848 | ||
|
|
14d64256a9 | ||
|
|
bced0611ea | ||
|
|
1dbf29a595 | ||
|
|
93db47901d | ||
|
|
260ed8ac19 | ||
|
|
2585107bd7 | ||
|
|
bd80b7fccd | ||
|
|
ab84a872db | ||
|
|
b4d2e685de | ||
|
|
16182031e1 | ||
|
|
6cbe17c8e6 | ||
|
|
fbfd0a9854 | ||
|
|
778112c751 | ||
|
|
0f744ec1a0 | ||
|
|
ab454894f8 | ||
|
|
6882c62a32 | ||
|
|
c2878bacd4 | ||
|
|
1a70e61de8 | ||
|
|
f4c94aa2d1 | ||
|
|
6ccf2e2887 | ||
|
|
2cd96b0df0 | ||
|
|
622d0a7dfa | ||
|
|
954e811be0 | ||
|
|
f535870811 | ||
|
|
6559fe27ee | ||
|
|
c26c235f2f | ||
|
|
57e604794c | ||
|
|
eb11f4bf58 | ||
|
|
12a9448a8d | ||
|
|
9370a7a50e | ||
|
|
3f13eeef75 | ||
|
|
e8169fdf2a | ||
|
|
325937dc4e | ||
|
|
74637f3042 | ||
|
|
eb0fd35688 | ||
|
|
c692ff26b5 | ||
|
|
48bf849f4b | ||
|
|
e71036b2f8 | ||
|
|
0911c6528c | ||
|
|
d42905d394 | ||
|
|
016011dd23 | ||
|
|
9ee853365c | ||
|
|
a58bff9d11 | ||
|
|
830fb70ee0 | ||
|
|
637a265c24 | ||
|
|
c28f677d10 | ||
|
|
7c5af16493 | ||
|
|
b5bf70a8e0 | ||
|
|
a9bbf31f73 | ||
|
|
c74d1671a5 | ||
|
|
4c81b195a1 | ||
|
|
8c66fd412d | ||
|
|
bd9f95851c | ||
|
|
2291980a89 | ||
|
|
80462e4534 | ||
|
|
adf27d4cb7 | ||
|
|
22d04436c0 | ||
|
|
e7e821daee | ||
|
|
700870f957 | ||
|
|
0dfe1da0af | ||
|
|
dcad0798e8 | ||
|
|
776069fca1 | ||
|
|
463ffc8cae | ||
|
|
3003934374 | ||
|
|
69e1e60c70 | ||
|
|
e8dbc40a5c | ||
|
|
297f633906 | ||
|
|
70bae502d3 | ||
|
|
c9d63d912f | ||
|
|
b92d5c8706 | ||
|
|
b18871c632 | ||
|
|
aad8542d97 | ||
|
|
fe7e200dba | ||
|
|
471558fec5 | ||
|
|
7b7856184e | ||
|
|
82ab91bc25 | ||
|
|
4c4e1b6580 | ||
|
|
763aed14a1 | ||
|
|
230924c4b8 | ||
|
|
880f4f1a94 | ||
|
|
f05e84777b | ||
|
|
5fe204020f | ||
|
|
4395c9ee46 | ||
|
|
0658e3b3d9 | ||
|
|
31ba4176aa | ||
|
|
2a75785181 | ||
|
|
ddfaaeb6c5 | ||
|
|
7c4ff2ed3f | ||
|
|
21992ceadb | ||
|
|
d35d201a10 | ||
|
|
9d4120e7d6 | ||
|
|
93d51acfc4 | ||
|
|
fdae00c602 | ||
|
|
9d82c2935a | ||
|
|
99ddcd9708 | ||
|
|
f7ddf82591 | ||
|
|
c004bc8932 | ||
|
|
694de75052 | ||
|
|
3b68bebf40 | ||
|
|
b6080b2492 | ||
|
|
f791d06ecd | ||
|
|
d7f810809e | ||
|
|
21895677c3 | ||
|
|
129ab734f3 | ||
|
|
8882a18c0d | ||
|
|
4f8dd444ff | ||
|
|
c42391c3da | ||
|
|
e27d2394d1 | ||
|
|
77981feb2b | ||
|
|
e38b729edd | ||
|
|
8ca332e2f0 | ||
|
|
af761693aa | ||
|
|
64d9029554 | ||
|
|
0e00ca19ad | ||
|
|
a88002852d | ||
|
|
77673b15f8 | ||
|
|
efa0a2878b | ||
|
|
bc2259aef3 | ||
|
|
92a6379e2c | ||
|
|
0a49df3996 | ||
|
|
69ef083abe | ||
|
|
00b7f65e31 | ||
|
|
0c0cac8fbe | ||
|
|
48483be8f9 | ||
|
|
0eae03f29d | ||
|
|
383bbee2dc | ||
|
|
77d4c890cf | ||
|
|
a654e30df2 | ||
|
|
e03bce3ba5 | ||
|
|
92f3e8a0b5 | ||
|
|
8d2b998f9a | ||
|
|
83c979ade0 | ||
|
|
5640913e68 | ||
|
|
c93df14f93 | ||
|
|
c151d55c67 | ||
|
|
d6e0865b8a | ||
|
|
d30d1570bd | ||
|
|
8e7cfebdba | ||
|
|
ca035d699e | ||
|
|
7cb6bc7d38 | ||
|
|
566d6c2fdb | ||
|
|
f1e43b12be | ||
|
|
9a37ba0739 | ||
|
|
e74effe24d | ||
|
|
dab822e772 | ||
|
|
ec53099eb0 | ||
|
|
5cde5aefce | ||
|
|
4eea15b053 | ||
|
|
37f00f4d73 | ||
|
|
9b75e4502f | ||
|
|
122416d311 | ||
|
|
890630dfa0 | ||
|
|
42dd341e6c | ||
|
|
922ccede98 | ||
|
|
f6cf1cfb4a | ||
|
|
0ac4ed1fa2 | ||
|
|
013d866455 | ||
|
|
a010ef9d0f | ||
|
|
71b96c1728 | ||
|
|
d35837416b | ||
|
|
d0ef617e40 | ||
|
|
aa8cebca7e | ||
|
|
f360961500 | ||
|
|
962385651f | ||
|
|
ac47e339cf | ||
|
|
d99187e14a | ||
|
|
e2417ef2be | ||
|
|
2f693cd490 | ||
|
|
c13a220b52 | ||
|
|
844b9d80c3 | ||
|
|
27e94fed35 | ||
|
|
5180ad8717 | ||
|
|
8434044611 | ||
|
|
99bdb5dba7 | ||
|
|
e51455b8ea | ||
|
|
30c9f6184e | ||
|
|
ca20ae3d10 | ||
|
|
eeb1d616bc | ||
|
|
594d57905f | ||
|
|
db92ae3bf0 | ||
|
|
cadb2b456d | ||
|
|
8e8064abd5 | ||
|
|
9d740c7bb2 | ||
|
|
4ec2dbfeca | ||
|
|
f1ce859e8b | ||
|
|
eff22eb554 | ||
|
|
076e664ced | ||
|
|
f95c1c9c94 | ||
|
|
67a3494804 | ||
|
|
77273b0df9 | ||
|
|
72d4867e42 | ||
|
|
ef9a077655 | ||
|
|
5831da93e1 | ||
|
|
197d6bbc0a | ||
|
|
0b72007006 | ||
|
|
54b07c6110 | ||
|
|
99085a95a1 | ||
|
|
f5df3fb5b5 | ||
|
|
253feefe53 | ||
|
|
b84c05cbad | ||
|
|
043d6a9f53 | ||
|
|
5dbe313a68 | ||
|
|
73db2c44d0 | ||
|
|
4f269ce4a0 | ||
|
|
7c8912933f | ||
|
|
f6f2a14a18 | ||
|
|
aa213e036c | ||
|
|
ab331d57a4 | ||
|
|
423d9cb671 | ||
|
|
d409b41ddb | ||
|
|
38367777b7 | ||
|
|
6830c8879f | ||
|
|
d95738bb0d | ||
|
|
73ade8da81 | ||
|
|
27f1027ad2 | ||
|
|
88addf6e4a | ||
|
|
f621f6ac00 | ||
|
|
7eeb71ed25 | ||
|
|
2267ec44c2 | ||
|
|
2ebfd99068 | ||
|
|
c997549874 | ||
|
|
fa723ebc87 | ||
|
|
18ee36832b | ||
|
|
ebdc2bd08c | ||
|
|
4db693a6b2 | ||
|
|
5b6a74f9a6 | ||
|
|
a538546d62 | ||
|
|
db38f914a8 | ||
|
|
94b9a492f0 | ||
|
|
544457016a | ||
|
|
33a71f1a52 | ||
|
|
78416fdbac | ||
|
|
3f6d944553 | ||
|
|
dcbc437c7b | ||
|
|
c137ddb10d | ||
|
|
7c443265a0 | ||
|
|
5be619d3ea | ||
|
|
9ca08d7e23 | ||
|
|
82d353ec49 | ||
|
|
dd5368219f | ||
|
|
5b46f673c3 | ||
|
|
ba745ed2c9 | ||
|
|
5f40700de2 | ||
|
|
d48bb72d68 | ||
|
|
e13ec2ab97 | ||
|
|
8761f71bda | ||
|
|
746d7a04eb | ||
|
|
ea53e7ce48 | ||
|
|
0115f6e604 | ||
|
|
d30522f35b | ||
|
|
08329ed3a0 | ||
|
|
11d4b168a1 | ||
|
|
2a7cb78b8b | ||
|
|
152720eeef | ||
|
|
03c18def0a | ||
|
|
6d5629872a | ||
|
|
3188b0ad6f | ||
|
|
99a83fab5e | ||
|
|
24e8e018e5 | ||
|
|
eb7e7de463 | ||
|
|
1bf697e5d9 | ||
|
|
1b748293d5 | ||
|
|
9678c86a13 | ||
|
|
d2a5f42e37 | ||
|
|
c47d1063c1 | ||
|
|
3ccb1c7aa6 | ||
|
|
db33701a18 | ||
|
|
c1ef27fc9b | ||
|
|
769d8851bb | ||
|
|
f321ffbdda | ||
|
|
7904e74343 | ||
|
|
6f18c1a301 | ||
|
|
4df65c70ac | ||
|
|
40b7aa1c78 | ||
|
|
9dbae3bc92 | ||
|
|
1375c14337 | ||
|
|
aaf7830dc4 | ||
|
|
d5b182227d | ||
|
|
c88bd374e9 | ||
|
|
7c2326c7d2 | ||
|
|
61113ddc38 | ||
|
|
8c5ef24bcc | ||
|
|
361eb0e35b | ||
|
|
61e15b62ef | ||
|
|
d6d6e26208 | ||
|
|
39f459b852 | ||
|
|
bbf6e51231 | ||
|
|
38bed444ca | ||
|
|
7d9bd30939 | ||
|
|
db9fe8c72a | ||
|
|
ae69eebb1d | ||
|
|
c07be53124 | ||
|
|
ac0ca8bb3d | ||
|
|
c832457f5b | ||
|
|
3723afe3b2 | ||
|
|
16778e9bb3 | ||
|
|
c222a72a8b | ||
|
|
5104ecd9dd | ||
|
|
d11aa741e0 | ||
|
|
92c4c5a1e3 | ||
|
|
404b2e75a2 | ||
|
|
1232ab5b57 | ||
|
|
6b7a1499af | ||
|
|
d41ea47347 | ||
|
|
7a17a08a6e | ||
|
|
b83d8db2a9 | ||
|
|
baebdcd8d2 | ||
|
|
c2405dfbb3 | ||
|
|
d1de937ed2 | ||
|
|
06667df5cd | ||
|
|
d47491f989 | ||
|
|
ce7dd32a41 | ||
|
|
a6395684a8 | ||
|
|
32813bea1b | ||
|
|
d2697061e9 | ||
|
|
8deb37e8b0 | ||
|
|
369adfc3f5 | ||
|
|
a9acf01f61 | ||
|
|
778e24b578 | ||
|
|
09c3a217cb | ||
|
|
f1e5c858f1 | ||
|
|
c15c003289 | ||
|
|
d275ab5344 | ||
|
|
b2068ee9d9 | ||
|
|
7f28f87f5c | ||
|
|
2c7fc9b88e | ||
|
|
4192203071 | ||
|
|
4b2db13449 | ||
|
|
eaf762e1bd | ||
|
|
5b5a3033d0 | ||
|
|
c184ab55dc | ||
|
|
ff2cd695ba | ||
|
|
187e24ee19 | ||
|
|
30a69893bb | ||
|
|
c0910b015a | ||
|
|
e809d087cb | ||
|
|
b9bf65d67b | ||
|
|
c239d8d080 | ||
|
|
568c1f211e | ||
|
|
bf818d2683 | ||
|
|
b6fa43a2be | ||
|
|
8e1ac0f95c | ||
|
|
b74650fcce | ||
|
|
fec728d450 | ||
|
|
c90236e598 | ||
|
|
d22795b14e | ||
|
|
677971f415 | ||
|
|
a7c884cd45 | ||
|
|
bffcec11fd | ||
|
|
73a95503c0 | ||
|
|
9bf79f78f6 | ||
|
|
20397488a1 | ||
|
|
fdefe31cf3 | ||
|
|
6b9bbf570a | ||
|
|
b928ddb53c | ||
|
|
c36946ee9e | ||
|
|
2167d0c172 | ||
|
|
9f8bf12768 | ||
|
|
a17796e811 | ||
|
|
6834eb8d8d | ||
|
|
3dfa806348 | ||
|
|
5312d27693 | ||
|
|
63d1b8ab49 | ||
|
|
6479292158 | ||
|
|
60f29eea6a | ||
|
|
131f5663fa | ||
|
|
4762f37414 | ||
|
|
b6bebf3b78 | ||
|
|
d8642808d7 | ||
|
|
5cbb89d949 | ||
|
|
c0077b7fa7 | ||
|
|
38bae5d2c9 | ||
|
|
82bc6e62fc | ||
|
|
2c7cb25357 | ||
|
|
086ade9595 | ||
|
|
fc63e4d83d | ||
|
|
36edfd8413 | ||
|
|
079a5fda6d | ||
|
|
411dfa0965 | ||
|
|
c144add4bd | ||
|
|
1d96ec02fc | ||
|
|
1abd925c56 | ||
|
|
8d26f77590 | ||
|
|
7c6409ee74 | ||
|
|
9a5716fdcb | ||
|
|
f597750cf7 | ||
|
|
fa634b84e9 | ||
|
|
796018eb57 | ||
|
|
c2fc5016e6 | ||
|
|
a19c3ad0c3 | ||
|
|
7416005370 | ||
|
|
d733ab9b4f | ||
|
|
ffc63f0eb0 | ||
|
|
6628c204b5 | ||
|
|
7a6e11e936 | ||
|
|
83f694b580 | ||
|
|
68248ce20d | ||
|
|
b947fafebf | ||
|
|
7f1686acdf | ||
|
|
d7f4406fd5 | ||
|
|
23c5eaebb7 | ||
|
|
c53604e216 | ||
|
|
6240a8636b | ||
|
|
24072a867a | ||
|
|
1dbdafe64f | ||
|
|
24c82a30f7 | ||
|
|
914154404a | ||
|
|
45d6aeb6c3 | ||
|
|
41dce26281 | ||
|
|
f789d77806 | ||
|
|
e7afd66820 | ||
|
|
70deccdcfc | ||
|
|
c847147f87 | ||
|
|
0002950e3c | ||
|
|
62600fdf6a | ||
|
|
525fc39cbf | ||
|
|
1efe57797c | ||
|
|
4a97094c0e | ||
|
|
d0df66b539 | ||
|
|
89afab8a22 | ||
|
|
8024083c64 | ||
|
|
59d6ef6bd0 | ||
|
|
c8690afbba | ||
|
|
4ea1321d17 | ||
|
|
6b84ba2500 | ||
|
|
2e4e428f57 | ||
|
|
beb3eeac0c | ||
|
|
44d0bb1369 | ||
|
|
15da91385d | ||
|
|
87d45714b1 | ||
|
|
d26e6ec098 | ||
|
|
790a436e70 | ||
|
|
b10fdfc9c9 | ||
|
|
0fc0371758 | ||
|
|
fca752526d | ||
|
|
4cfc5d2517 | ||
|
|
18bb6e0263 | ||
|
|
5c39f96f34 | ||
|
|
56c6ec66f1 | ||
|
|
719a102996 | ||
|
|
b12c71f277 | ||
|
|
97d669ebf1 | ||
|
|
597cbfa03a | ||
|
|
dc8e64bcb1 | ||
|
|
d1e4a80819 | ||
|
|
ca4abdcadc | ||
|
|
d94e2f0250 | ||
|
|
6689b89d0c | ||
|
|
4d0730f394 | ||
|
|
e189ff8b09 | ||
|
|
6c71d8adf8 | ||
|
|
9203951ec8 | ||
|
|
506a944403 | ||
|
|
17057fb6a1 | ||
|
|
a70bb24a42 | ||
|
|
a71381da40 | ||
|
|
70a337f766 | ||
|
|
1cf1137c7d | ||
|
|
97a08853e8 | ||
|
|
702bad06b9 | ||
|
|
cac0f3edc0 | ||
|
|
4996210959 | ||
|
|
ba1fba3a85 | ||
|
|
bc92568138 | ||
|
|
e66423dcf4 | ||
|
|
264abc986f | ||
|
|
b06d56cfa7 | ||
|
|
bd7ecf18f4 | ||
|
|
62d3636136 | ||
|
|
b91762cb09 | ||
|
|
246d186816 | ||
|
|
e5fd1b235e | ||
|
|
71caeb8ac0 | ||
|
|
3393c045f6 | ||
|
|
5d05a9d102 | ||
|
|
9e0b292ddb | ||
|
|
56ca2842f7 | ||
|
|
9ab89d81f2 | ||
|
|
d3f3b613ab | ||
|
|
cff1e4801b | ||
|
|
9a9f850697 | ||
|
|
0f65ff856a | ||
|
|
a2a1459e8f | ||
|
|
521b04bf14 | ||
|
|
32001b1c8b | ||
|
|
37701af9d4 | ||
|
|
61b664bf52 | ||
|
|
715edd2b9c | ||
|
|
aa0f62af85 | ||
|
|
2e536a2232 | ||
|
|
4b3563cacd | ||
|
|
67eb4a6bca | ||
|
|
0029558c1f | ||
|
|
932d9fb0d3 | ||
|
|
c7f91767f9 | ||
|
|
5333862b94 | ||
|
|
7f17b0f328 | ||
|
|
3591dc4de4 | ||
|
|
8c32beae83 | ||
|
|
f1f4c92ca5 | ||
|
|
69b3f7cdf4 | ||
|
|
bc46a6910f | ||
|
|
62c0d07fe0 | ||
|
|
ea61f09f09 | ||
|
|
6541965dd1 | ||
|
|
9db323a6bb | ||
|
|
e196643c8f | ||
|
|
5e1d895ce6 | ||
|
|
347da4fb33 | ||
|
|
0652462dc7 | ||
|
|
e95b18d00c | ||
|
|
dbba413833 | ||
|
|
2b4990da5a | ||
|
|
4a9a42bb8c | ||
|
|
b7698be2ae | ||
|
|
a85d519669 | ||
|
|
453892588f | ||
|
|
38c2757d4a | ||
|
|
9030806146 | ||
|
|
8e81bdea7e | ||
|
|
d1dd18d1aa | ||
|
|
34a5ab4c0c | ||
|
|
0fa46bdb3c | ||
|
|
98323e2634 | ||
|
|
a0daa3ae36 | ||
|
|
d8b9872504 | ||
|
|
d8b3ea3902 | ||
|
|
2253ac394d | ||
|
|
7829a2e2f2 | ||
|
|
b0a1ca1d75 | ||
|
|
5f899a5ee4 | ||
|
|
bfe29fa606 | ||
|
|
20c1578a76 | ||
|
|
1bc840cb89 | ||
|
|
e71606b990 | ||
|
|
617dbe4e5e | ||
|
|
acb679cf18 | ||
|
|
420071c436 | ||
|
|
42b6ae0dfe | ||
|
|
99af6ce45f | ||
|
|
8757178617 | ||
|
|
47c33655de | ||
|
|
de691e0f8d | ||
|
|
1a44c0c9c3 | ||
|
|
8e0f0d389d | ||
|
|
fc01e47e28 | ||
|
|
d3b6511e55 | ||
|
|
a89eeca148 | ||
|
|
1fd0de1159 | ||
|
|
3f6c1dad6f | ||
|
|
4a019e9821 | ||
|
|
8c82e98d48 | ||
|
|
74b8f1d731 | ||
|
|
d65ecacb18 | ||
|
|
ba7558c72c | ||
|
|
042eecefb9 | ||
|
|
be470452a5 | ||
|
|
7f46f3cec2 | ||
|
|
8e936d1657 | ||
|
|
2994a8425e | ||
|
|
619702e91b | ||
|
|
4c6e9fd8aa | ||
|
|
8b75d97093 | ||
|
|
2ed3472768 | ||
|
|
8bd23b40ca | ||
|
|
11bd75df08 | ||
|
|
50b6a8ac42 | ||
|
|
98e18990d9 | ||
|
|
d45a06c136 | ||
|
|
2ffb52f5d1 | ||
|
|
ef5c51e383 | ||
|
|
eb02f26e1b | ||
|
|
83883929d6 | ||
|
|
e628d1ac2f | ||
|
|
e96416d1e6 | ||
|
|
d274e08f43 | ||
|
|
687d12470a | ||
|
|
527cf46130 | ||
|
|
3dd31dc0a7 | ||
|
|
25e6862ce6 | ||
|
|
2d8ff056c4 | ||
|
|
f449a561cd | ||
|
|
126c9acc9b | ||
|
|
2d5ca69705 | ||
|
|
4d1350a770 | ||
|
|
dbd065aec7 | ||
|
|
e7fe260c81 | ||
|
|
082bc5d489 | ||
|
|
b46bcee817 | ||
|
|
c8c458764e | ||
|
|
5a48bdfa83 | ||
|
|
6ca9bda5d8 | ||
|
|
e4a25401e6 | ||
|
|
90ea84c557 | ||
|
|
8b19c8f831 | ||
|
|
d73c481ab7 | ||
|
|
840f133f73 | ||
|
|
9725bba276 | ||
|
|
45eb21e73f | ||
|
|
b5e8e86f23 | ||
|
|
bff9b7fb93 | ||
|
|
dd6b1a6c97 | ||
|
|
2dcf447d5b | ||
|
|
4563f90eee | ||
|
|
a2b0366267 | ||
|
|
d4ce72787b | ||
|
|
8de1e0e543 | ||
|
|
1c2d781498 | ||
|
|
e72ee5766a | ||
|
|
4733db7c64 | ||
|
|
b471d7f260 | ||
|
|
1d47d77dda | ||
|
|
3c8b9c13b9 | ||
|
|
bd95aa7267 | ||
|
|
bb94a926d4 | ||
|
|
af28a291df | ||
|
|
ccca40011b | ||
|
|
1bc96062fd | ||
|
|
a79a280f96 | ||
|
|
49a7390d38 | ||
|
|
efe25337e9 | ||
|
|
b732c94f0f | ||
|
|
e6687a31e1 | ||
|
|
44e87c995b | ||
|
|
92df1c671a | ||
|
|
1c1e6beb41 | ||
|
|
e7a0ca843f | ||
|
|
c179b4f9b9 | ||
|
|
1767918a8c | ||
|
|
4b62a2fd0c | ||
|
|
8596b46614 | ||
|
|
bb08e4fa84 | ||
|
|
966f08e95d | ||
|
|
0dbce91c82 | ||
|
|
e59f14458f | ||
|
|
1636d9cc06 | ||
|
|
2f1e9de6a0 | ||
|
|
eb7f60c721 | ||
|
|
7ed1413e6b | ||
|
|
1dec5ee262 | ||
|
|
bbf1dbcc23 | ||
|
|
7634db34db | ||
|
|
8ba496277f | ||
|
|
e4555cf7af | ||
|
|
81238bb0a1 | ||
|
|
9e239bb5f1 | ||
|
|
80e913d0d1 | ||
|
|
fee823f69f | ||
|
|
30dc00a3e7 | ||
|
|
7d3c91ff67 | ||
|
|
644ef29d1f | ||
|
|
cbf389f6f9 | ||
|
|
70e2d62de9 | ||
|
|
180ac3e466 | ||
|
|
0d9d7ef800 | ||
|
|
27d40dbcc9 | ||
|
|
308f070b1d | ||
|
|
506a886ba4 | ||
|
|
90d37f74d2 | ||
|
|
1f1bc273d0 | ||
|
|
3131f58afd | ||
|
|
b66711219d | ||
|
|
86b6f5de0a | ||
|
|
651e899c53 | ||
|
|
b6bbc12698 | ||
|
|
4d35e996fa | ||
|
|
67c65b845c | ||
|
|
db5bf061e0 | ||
|
|
7ab35cd7c8 | ||
|
|
924547f551 | ||
|
|
3dbd7f5575 | ||
|
|
10430e21a6 | ||
|
|
49b067c266 | ||
|
|
f007cb56fd | ||
|
|
3828159a86 | ||
|
|
29360c719e | ||
|
|
2704691852 | ||
|
|
ed390daef8 | ||
|
|
a7c68b751b | ||
|
|
837dfe2f92 | ||
|
|
128746fa15 | ||
|
|
3a14e0f7ce | ||
|
|
332ffa3ca0 | ||
|
|
efd6678a4d | ||
|
|
63a43a1ca9 | ||
|
|
f76014cc6b | ||
|
|
f0d3b361a0 | ||
|
|
2c9d801501 | ||
|
|
251de2bf9c | ||
|
|
a0b13b2195 | ||
|
|
345f316b27 | ||
|
|
7c52b83a63 | ||
|
|
0bf63ee7bb | ||
|
|
ff2b449c58 | ||
|
|
addd8f23a3 | ||
|
|
4ed6523ee8 | ||
|
|
1326bb2eae | ||
|
|
a788fadb1c | ||
|
|
6e175f48c5 | ||
|
|
203791f0f6 |
3521 changed files with 172863 additions and 132677 deletions
38
.github/composite-actions/update-keys/action.yml
vendored
38
.github/composite-actions/update-keys/action.yml
vendored
|
|
@ -10,6 +10,9 @@ inputs:
|
|||
jwt_secret:
|
||||
required: true
|
||||
description: 'JWT Secret'
|
||||
jwt_spot_secret:
|
||||
required: true
|
||||
description: 'JWT spot Secret'
|
||||
minio_access_key:
|
||||
required: true
|
||||
description: 'MinIO Access Key'
|
||||
|
|
@ -36,21 +39,36 @@ runs:
|
|||
- name: "Updating OSS secrets"
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
yq e -i '.global.domainName = strenv(DOMAIN_NAME)' vars.yaml
|
||||
yq e -i '.global.assistKey = strenv(JWT_SECRET)' vars.yaml
|
||||
yq e -i '.global.assistJWTSecret = strenv(JWT_SECRET)' vars.yaml
|
||||
yq e -i '.chalice.env.jwt_secret = strenv(JWT_SECRET)' vars.yaml
|
||||
yq e -i '.global.enterpriseEditionLicense = strenv(LICENSE_KEY)' vars.yaml
|
||||
yq e -i '.global.s3.accessKey = strenv(MINIO_ACCESS_KEY)' vars.yaml
|
||||
yq e -i '.global.s3.secretKey = strenv(MINIO_SECRET_KEY)' vars.yaml
|
||||
yq e -i '.postgresql.postgresqlPassword = strenv(PG_PASSWORD)' vars.yaml
|
||||
yq e -i '.global.openReplayContainerRegistry = strenv(REGISTRY_URL)' vars.yaml
|
||||
vars=(
|
||||
"ASSIST_JWT_SECRET:.global.assistJWTSecret"
|
||||
"ASSIST_KEY:.global.assistKey"
|
||||
"DOMAIN_NAME:.global.domainName"
|
||||
"JWT_REFRESH_SECRET:.chalice.env.JWT_REFRESH_SECRET"
|
||||
"JWT_SECRET:.global.jwtSecret"
|
||||
"JWT_SPOT_REFRESH_SECRET:.chalice.env.JWT_SPOT_REFRESH_SECRET"
|
||||
"JWT_SPOT_SECRET:.global.jwtSpotSecret"
|
||||
"LICENSE_KEY:.global.enterpriseEditionLicense"
|
||||
"MINIO_ACCESS_KEY:.global.s3.accessKey"
|
||||
"MINIO_SECRET_KEY:.global.s3.secretKey"
|
||||
"PG_PASSWORD:.postgresql.postgresqlPassword"
|
||||
"REGISTRY_URL:.global.openReplayContainerRegistry"
|
||||
)
|
||||
for var in "${vars[@]}"; do
|
||||
IFS=":" read -r env_var yq_path <<<"$var"
|
||||
yq e -i "${yq_path} = strenv(${env_var})" vars.yaml
|
||||
done
|
||||
shell: bash
|
||||
env:
|
||||
ASSIST_JWT_SECRET: ${{ inputs.assist_jwt_secret }}
|
||||
ASSIST_KEY: ${{ inputs.assist_key }}
|
||||
DOMAIN_NAME: ${{ inputs.domain_name }}
|
||||
LICENSE_KEY: ${{ inputs.license_key }}
|
||||
JWT_REFRESH_SECRET: ${{ inputs.jwt_refresh_secret }}
|
||||
JWT_SECRET: ${{ inputs.jwt_secret }}
|
||||
JWT_SPOT_REFRESH_SECRET: ${{inputs.jwt_spot_refresh_secret}}
|
||||
JWT_SPOT_SECRET: ${{ inputs.jwt_spot_secret }}
|
||||
LICENSE_KEY: ${{ inputs.license_key }}
|
||||
MINIO_ACCESS_KEY: ${{ inputs.minio_access_key }}
|
||||
MINIO_SECRET_KEY: ${{ inputs.minio_secret_key }}
|
||||
PG_PASSWORD: ${{ inputs.pg_password }}
|
||||
REGISTRY_URL: ${{ inputs.registry_url }}
|
||||
|
||||
|
|
|
|||
11
.github/workflows/alerts-ee.yaml
vendored
11
.github/workflows/alerts-ee.yaml
vendored
|
|
@ -43,9 +43,14 @@ jobs:
|
|||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.EE_PG_PASSWORD }}
|
||||
|
|
@ -78,10 +83,10 @@ jobs:
|
|||
cd api
|
||||
PUSH_IMAGE=0 bash -x ./build_alerts.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("alerts")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
|
|
|
|||
11
.github/workflows/alerts.yaml
vendored
11
.github/workflows/alerts.yaml
vendored
|
|
@ -36,9 +36,14 @@ jobs:
|
|||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
|
||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
|
||||
|
|
@ -71,10 +76,10 @@ jobs:
|
|||
cd api
|
||||
PUSH_IMAGE=0 bash -x ./build_alerts.sh
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("alerts")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
|
|
|
|||
13
.github/workflows/api-ee.yaml
vendored
13
.github/workflows/api-ee.yaml
vendored
|
|
@ -10,8 +10,6 @@ on:
|
|||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
- v1.11.0-patch
|
||||
- actions_test
|
||||
paths:
|
||||
- "ee/api/**"
|
||||
- "api/**"
|
||||
|
|
@ -42,9 +40,14 @@ jobs:
|
|||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.EE_PG_PASSWORD }}
|
||||
|
|
@ -77,10 +80,10 @@ jobs:
|
|||
cd api
|
||||
PUSH_IMAGE=0 bash -x ./build.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("chalice")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
|
|
|
|||
30
.github/workflows/api.yaml
vendored
30
.github/workflows/api.yaml
vendored
|
|
@ -10,7 +10,6 @@ on:
|
|||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
- v1.11.0-patch
|
||||
paths:
|
||||
- "api/**"
|
||||
- "!api/.gitignore"
|
||||
|
|
@ -35,9 +34,14 @@ jobs:
|
|||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
|
||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
|
||||
|
|
@ -70,10 +74,10 @@ jobs:
|
|||
cd api
|
||||
PUSH_IMAGE=0 bash -x ./build.sh
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("chalice")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
|
|
@ -135,12 +139,12 @@ jobs:
|
|||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: "Build failed :bomb:"
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# iimit-access-to-actor: true
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# limit-access-to-actor: true
|
||||
|
|
|
|||
12
.github/workflows/assist-ee.yaml
vendored
12
.github/workflows/assist-ee.yaml
vendored
|
|
@ -9,7 +9,6 @@ on:
|
|||
push:
|
||||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
paths:
|
||||
- "ee/assist/**"
|
||||
- "assist/**"
|
||||
|
|
@ -33,9 +32,14 @@ jobs:
|
|||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.EE_PG_PASSWORD }}
|
||||
|
|
@ -63,10 +67,10 @@ jobs:
|
|||
cd assist
|
||||
PUSH_IMAGE=0 bash -x ./build.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("assist")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
# This action will push the peers changes to aws
|
||||
# This action will push the assist changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
|
|
@ -9,14 +9,10 @@ on:
|
|||
push:
|
||||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
paths:
|
||||
- "ee/peers/**"
|
||||
- "peers/**"
|
||||
- "!peers/.gitignore"
|
||||
- "!peers/*-dev.sh"
|
||||
- "ee/assist-server/**"
|
||||
|
||||
name: Build and Deploy Peers EE
|
||||
name: Build and Deploy Assist-Server EE
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
|
|
@ -33,9 +29,14 @@ jobs:
|
|||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.EE_PG_PASSWORD }}
|
||||
|
|
@ -52,12 +53,7 @@ jobs:
|
|||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
- name: Building and Pushing peers image
|
||||
- name: Building and Pushing Assist-Server image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
|
|
@ -65,13 +61,13 @@ jobs:
|
|||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd peers
|
||||
cd assist-server
|
||||
PUSH_IMAGE=0 bash -x ./build.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("peers")
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("assist-server")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
|
|
@ -80,7 +76,7 @@ jobs:
|
|||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("peers")
|
||||
images=("assist-server")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
|
|
@ -104,43 +100,23 @@ jobs:
|
|||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||
EOF
|
||||
done
|
||||
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
pwd
|
||||
cd scripts/helmcharts/
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/peers/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
sed -i "/assist-server/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,peers,quickwit,connector} /tmp/charts/
|
||||
mv openreplay/charts/{ingress-nginx,assist-server,quickwit,connector} /tmp/charts/
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: ee
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: "Build failed :bomb:"
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# iimit-access-to-actor: true
|
||||
23
.github/workflows/assist-stats.yaml
vendored
23
.github/workflows/assist-stats.yaml
vendored
|
|
@ -15,7 +15,7 @@ on:
|
|||
- "!assist-stats/*-dev.sh"
|
||||
- "!assist-stats/requirements-*.txt"
|
||||
|
||||
name: Build and Deploy Assist Stats
|
||||
name: Build and Deploy Assist Stats ee
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
|
|
@ -32,9 +32,14 @@ jobs:
|
|||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
|
||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
|
||||
|
|
@ -67,10 +72,10 @@ jobs:
|
|||
cd assist-stats
|
||||
PUSH_IMAGE=0 bash -x ./build.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("assist-stats")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
|
|
@ -94,9 +99,14 @@ jobs:
|
|||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.EE_PG_PASSWORD }}
|
||||
|
|
@ -113,8 +123,9 @@ jobs:
|
|||
tag: ${IMAGE_TAG}
|
||||
EOF
|
||||
|
||||
export IMAGE_TAG=${IMAGE_TAG}
|
||||
# Update changed image tag
|
||||
sed -i "/assist-stats/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
yq '.utilities.apiCrons.assiststats.image.tag = strenv(IMAGE_TAG)' -i /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
|
|
|
|||
12
.github/workflows/assist.yaml
vendored
12
.github/workflows/assist.yaml
vendored
|
|
@ -9,7 +9,6 @@ on:
|
|||
push:
|
||||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
paths:
|
||||
- "assist/**"
|
||||
- "!assist/.gitignore"
|
||||
|
|
@ -32,9 +31,14 @@ jobs:
|
|||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
|
||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
|
||||
|
|
@ -62,10 +66,10 @@ jobs:
|
|||
cd assist
|
||||
PUSH_IMAGE=0 bash -x ./build.sh
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("assist")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
|
|
|
|||
53
.github/workflows/crons-ee.yaml
vendored
53
.github/workflows/crons-ee.yaml
vendored
|
|
@ -10,7 +10,6 @@ on:
|
|||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
- v1.11.0-patch
|
||||
paths:
|
||||
- "ee/api/**"
|
||||
- "api/**"
|
||||
|
|
@ -44,9 +43,14 @@ jobs:
|
|||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.EE_PG_PASSWORD }}
|
||||
|
|
@ -79,10 +83,10 @@ jobs:
|
|||
cd api
|
||||
PUSH_IMAGE=0 bash -x ./build_crons.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("crons")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
|
|
@ -96,33 +100,32 @@ jobs:
|
|||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
env:
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||
cd scripts/helmcharts/
|
||||
cat <<EOF>/tmp/image_override.yaml
|
||||
image: &image
|
||||
tag: "${IMAGE_TAG}"
|
||||
utilities:
|
||||
apiCrons:
|
||||
assiststats:
|
||||
image: *image
|
||||
report:
|
||||
image: *image
|
||||
sessionsCleaner:
|
||||
image: *image
|
||||
projectsStats:
|
||||
image: *image
|
||||
fixProjectsStats:
|
||||
image: *image
|
||||
EOF
|
||||
done
|
||||
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/crons/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
|
|
@ -132,8 +135,6 @@ jobs:
|
|||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
- name: Alert slack
|
||||
|
|
|
|||
2
.github/workflows/frontend-dev.yaml
vendored
2
.github/workflows/frontend-dev.yaml
vendored
|
|
@ -52,7 +52,7 @@ jobs:
|
|||
set -x
|
||||
cd frontend
|
||||
mv .env.sample .env
|
||||
docker run --rm -v /etc/passwd:/etc/passwd -u `id -u`:`id -g` -v $(pwd):/home/${USER} -w /home/${USER} --name node_build node:14-stretch-slim /bin/bash -c "yarn && yarn build"
|
||||
docker run --rm -v /etc/passwd:/etc/passwd -u `id -u`:`id -g` -v $(pwd):/home/${USER} -w /home/${USER} --name node_build node:20-slim /bin/bash -c "yarn && yarn build"
|
||||
# https://github.com/docker/cli/issues/1134#issuecomment-613516912
|
||||
DOCKER_BUILDKIT=1 docker build --target=cicd -t $DOCKER_REPO/frontend:${IMAGE_TAG} .
|
||||
docker tag $DOCKER_REPO/frontend:${IMAGE_TAG} $DOCKER_REPO/frontend:${IMAGE_TAG}-ee
|
||||
|
|
|
|||
24
.github/workflows/frontend.yaml
vendored
24
.github/workflows/frontend.yaml
vendored
|
|
@ -19,19 +19,26 @@ jobs:
|
|||
uses: actions/checkout@v2
|
||||
|
||||
- name: Cache node modules
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: node_modules
|
||||
key: ${{ runner.OS }}-build-${{ hashFiles('**/package-lock.json') }}
|
||||
path: |
|
||||
/home/runner/work/openreplay/openreplay/frontend/node_modules
|
||||
/home/runner/work/openreplay/openreplay/frontend/.yarn
|
||||
key: ${{ runner.OS }}-build-${{ hashFiles('frontend/yarn.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.OS }}-build-
|
||||
${{ runner.OS }}-
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
|
||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
|
||||
|
|
@ -58,7 +65,7 @@ jobs:
|
|||
set -x
|
||||
cd frontend
|
||||
mv .env.sample .env
|
||||
docker run --rm -v /etc/passwd:/etc/passwd -u `id -u`:`id -g` -v $(pwd):/home/${USER} -w /home/${USER} --name node_build node:18-slim /bin/bash -c "yarn && yarn build"
|
||||
docker run --rm -v /etc/passwd:/etc/passwd -u `id -u`:`id -g` -v $(pwd):/home/${USER} -w /home/${USER} --name node_build node:20-slim /bin/bash -c "yarn && yarn build"
|
||||
# https://github.com/docker/cli/issues/1134#issuecomment-613516912
|
||||
DOCKER_BUILDKIT=1 docker build --target=cicd -t $DOCKER_REPO/frontend:${IMAGE_TAG} .
|
||||
docker tag $DOCKER_REPO/frontend:${IMAGE_TAG} $DOCKER_REPO/frontend:${IMAGE_TAG}-ee
|
||||
|
|
@ -101,9 +108,14 @@ jobs:
|
|||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.EE_PG_PASSWORD }}
|
||||
|
|
|
|||
189
.github/workflows/patch-build-old.yaml
vendored
Normal file
189
.github/workflows/patch-build-old.yaml
vendored
Normal file
|
|
@ -0,0 +1,189 @@
|
|||
# Ref: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
services:
|
||||
description: 'Comma separated names of services to build(in small letters).'
|
||||
required: true
|
||||
default: 'chalice,frontend'
|
||||
tag:
|
||||
description: 'Tag to update.'
|
||||
required: true
|
||||
type: string
|
||||
branch:
|
||||
description: 'Branch to build patches from. Make sure the branch is uptodate with tag. Else itll cause missing commits.'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
name: Build patches from tag, rewrite commit HEAD to older timestamp, and Push the tag
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Build Patch from old tag
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
|
||||
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 4
|
||||
ref: ${{ github.event.inputs.tag }}
|
||||
|
||||
- name: Set Remote with GITHUB_TOKEN
|
||||
run: |
|
||||
git config --unset http.https://github.com/.extraheader
|
||||
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}.git
|
||||
|
||||
- name: Create backup tag with timestamp
|
||||
run: |
|
||||
set -e # Exit immediately if a command exits with a non-zero status
|
||||
TIMESTAMP=$(date +%Y%m%d%H%M%S)
|
||||
BACKUP_TAG="${{ github.event.inputs.tag }}-backup-${TIMESTAMP}"
|
||||
echo "BACKUP_TAG=${BACKUP_TAG}" >> $GITHUB_ENV
|
||||
echo "INPUT_TAG=${{ github.event.inputs.tag }}" >> $GITHUB_ENV
|
||||
git tag $BACKUP_TAG || { echo "Failed to create backup tag"; exit 1; }
|
||||
git push origin $BACKUP_TAG || { echo "Failed to push backup tag"; exit 1; }
|
||||
echo "Created backup tag: $BACKUP_TAG"
|
||||
|
||||
# Get the oldest commit date from the last 3 commits in raw format
|
||||
OLDEST_COMMIT_TIMESTAMP=$(git log -3 --pretty=format:"%at" | tail -1)
|
||||
echo "Oldest commit timestamp: $OLDEST_COMMIT_TIMESTAMP"
|
||||
# Add 1 second to the timestamp
|
||||
NEW_TIMESTAMP=$((OLDEST_COMMIT_TIMESTAMP + 1))
|
||||
echo "NEW_TIMESTAMP=$NEW_TIMESTAMP" >> $GITHUB_ENV
|
||||
|
||||
|
||||
- name: Setup yq
|
||||
uses: mikefarah/yq@master
|
||||
|
||||
# Configure AWS credentials for the first registry
|
||||
- name: Configure AWS credentials for RELEASE_ARM_REGISTRY
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_DEPOT_ACCESS_KEY }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_DEPOT_SECRET_KEY }}
|
||||
aws-region: ${{ secrets.AWS_DEPOT_DEFAULT_REGION }}
|
||||
|
||||
- name: Login to Amazon ECR for RELEASE_ARM_REGISTRY
|
||||
id: login-ecr-arm
|
||||
run: |
|
||||
aws ecr get-login-password --region ${{ secrets.AWS_DEPOT_DEFAULT_REGION }} | docker login --username AWS --password-stdin ${{ secrets.RELEASE_ARM_REGISTRY }}
|
||||
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ secrets.RELEASE_OSS_REGISTRY }}
|
||||
|
||||
- uses: depot/setup-action@v1
|
||||
- name: Get HEAD Commit ID
|
||||
run: echo "HEAD_COMMIT_ID=$(git rev-parse HEAD)" >> $GITHUB_ENV
|
||||
- name: Define Branch Name
|
||||
run: echo "BRANCH_NAME=${{inputs.branch}}" >> $GITHUB_ENV
|
||||
|
||||
- name: Build
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO_ARM: ${{ secrets.RELEASE_ARM_REGISTRY }}
|
||||
DOCKER_REPO_OSS: ${{ secrets.RELEASE_OSS_REGISTRY }}
|
||||
MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
|
||||
MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
|
||||
MSAAS_REPO_FOLDER: /tmp/msaas
|
||||
run: |
|
||||
set -exo pipefail
|
||||
git config --local user.email "action@github.com"
|
||||
git config --local user.name "GitHub Action"
|
||||
git checkout -b $BRANCH_NAME
|
||||
working_dir=$(pwd)
|
||||
function image_version(){
|
||||
local service=$1
|
||||
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
|
||||
current_version=$(yq eval '.AppVersion' $chart_path)
|
||||
new_version=$(echo $current_version | awk -F. '{$NF += 1 ; print $1"."$2"."$3}')
|
||||
echo $new_version
|
||||
# yq eval ".AppVersion = \"$new_version\"" -i $chart_path
|
||||
}
|
||||
function clone_msaas() {
|
||||
[ -d $MSAAS_REPO_FOLDER ] || {
|
||||
git clone -b $INPUT_TAG --recursive https://x-access-token:$MSAAS_REPO_CLONE_TOKEN@$MSAAS_REPO_URL $MSAAS_REPO_FOLDER
|
||||
cd $MSAAS_REPO_FOLDER
|
||||
cd openreplay && git fetch origin && git checkout $INPUT_TAG
|
||||
git log -1
|
||||
cd $MSAAS_REPO_FOLDER
|
||||
bash git-init.sh
|
||||
git checkout
|
||||
}
|
||||
}
|
||||
function build_managed() {
|
||||
local service=$1
|
||||
local version=$2
|
||||
echo building managed
|
||||
clone_msaas
|
||||
if [[ $service == 'chalice' ]]; then
|
||||
cd $MSAAS_REPO_FOLDER/openreplay/api
|
||||
else
|
||||
cd $MSAAS_REPO_FOLDER/openreplay/$service
|
||||
fi
|
||||
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh >> /tmp/arm.txt
|
||||
}
|
||||
# Checking for backend images
|
||||
ls backend/cmd >> /tmp/backend.txt
|
||||
echo Services: "${{ github.event.inputs.services }}"
|
||||
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
||||
BUILD_SCRIPT_NAME="build.sh"
|
||||
# Build FOSS
|
||||
for SERVICE in "${SERVICES[@]}"; do
|
||||
# Check if service is backend
|
||||
if grep -q $SERVICE /tmp/backend.txt; then
|
||||
cd backend
|
||||
foss_build_args="nil $SERVICE"
|
||||
ee_build_args="ee $SERVICE"
|
||||
else
|
||||
[[ $SERVICE == 'chalice' || $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && cd $working_dir/api || cd $SERVICE
|
||||
[[ $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && BUILD_SCRIPT_NAME="build_${SERVICE}.sh"
|
||||
ee_build_args="ee"
|
||||
fi
|
||||
version=$(image_version $SERVICE)
|
||||
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
echo IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||
IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||
if [[ "$SERVICE" != "chalice" && "$SERVICE" != "frontend" ]]; then
|
||||
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
else
|
||||
build_managed $SERVICE $version
|
||||
fi
|
||||
cd $working_dir
|
||||
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$SERVICE/Chart.yaml"
|
||||
yq eval ".AppVersion = \"$version\"" -i $chart_path
|
||||
git add $chart_path
|
||||
git commit -m "Increment $SERVICE chart version"
|
||||
done
|
||||
|
||||
- name: Change commit timestamp
|
||||
run: |
|
||||
# Convert the timestamp to a date format git can understand
|
||||
NEW_DATE=$(perl -le 'print scalar gmtime($ARGV[0])." +0000"' $NEW_TIMESTAMP)
|
||||
echo "Setting commit date to: $NEW_DATE"
|
||||
|
||||
# Amend the commit with the new date
|
||||
GIT_COMMITTER_DATE="$NEW_DATE" git commit --amend --no-edit --date="$NEW_DATE"
|
||||
|
||||
# Verify the change
|
||||
git log -1 --pretty=format:"Commit now dated: %cD"
|
||||
|
||||
# git tag and push
|
||||
git tag $INPUT_TAG -f
|
||||
git push origin $INPUT_TAG -f
|
||||
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO_ARM: ${{ secrets.RELEASE_ARM_REGISTRY }}
|
||||
# DOCKER_REPO_OSS: ${{ secrets.RELEASE_OSS_REGISTRY }}
|
||||
# MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
|
||||
# MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
|
||||
# MSAAS_REPO_FOLDER: /tmp/msaas
|
||||
# with:
|
||||
# limit-access-to-actor: true
|
||||
241
.github/workflows/patch-build.yaml
vendored
241
.github/workflows/patch-build.yaml
vendored
|
|
@ -2,7 +2,6 @@
|
|||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
description: 'This workflow will build for patches for latest tag, and will Always use commit from main branch.'
|
||||
inputs:
|
||||
services:
|
||||
description: 'Comma separated names of services to build(in small letters).'
|
||||
|
|
@ -20,12 +19,20 @@ jobs:
|
|||
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Rebase with main branch, to make sure the code has latest main changes
|
||||
if: github.ref != 'refs/heads/main'
|
||||
run: |
|
||||
git pull --rebase origin main
|
||||
git remote -v
|
||||
git config --global user.email "action@github.com"
|
||||
git config --global user.name "GitHub Action"
|
||||
git config --global rebase.autoStash true
|
||||
git fetch origin main:main
|
||||
git rebase main
|
||||
git log -3
|
||||
|
||||
- name: Downloading yq
|
||||
run: |
|
||||
|
|
@ -48,6 +55,8 @@ jobs:
|
|||
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ secrets.RELEASE_OSS_REGISTRY }}
|
||||
|
||||
- uses: depot/setup-action@v1
|
||||
env:
|
||||
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
|
||||
- name: Get HEAD Commit ID
|
||||
run: echo "HEAD_COMMIT_ID=$(git rev-parse HEAD)" >> $GITHUB_ENV
|
||||
- name: Define Branch Name
|
||||
|
|
@ -65,73 +74,168 @@ jobs:
|
|||
MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
|
||||
MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
|
||||
MSAAS_REPO_FOLDER: /tmp/msaas
|
||||
SERVICES_INPUT: ${{ github.event.inputs.services }}
|
||||
run: |
|
||||
set -exo pipefail
|
||||
git config --local user.email "action@github.com"
|
||||
git config --local user.name "GitHub Action"
|
||||
git checkout -b $BRANCH_NAME
|
||||
working_dir=$(pwd)
|
||||
function image_version(){
|
||||
local service=$1
|
||||
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
|
||||
current_version=$(yq eval '.AppVersion' $chart_path)
|
||||
new_version=$(echo $current_version | awk -F. '{$NF += 1 ; print $1"."$2"."$3}')
|
||||
echo $new_version
|
||||
# yq eval ".AppVersion = \"$new_version\"" -i $chart_path
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
# Configuration
|
||||
readonly WORKING_DIR=$(pwd)
|
||||
readonly BUILD_SCRIPT_NAME="build.sh"
|
||||
readonly BACKEND_SERVICES_FILE="/tmp/backend.txt"
|
||||
|
||||
# Initialize git configuration
|
||||
setup_git() {
|
||||
git config --local user.email "action@github.com"
|
||||
git config --local user.name "GitHub Action"
|
||||
git checkout -b "$BRANCH_NAME"
|
||||
}
|
||||
function clone_msaas() {
|
||||
[ -d $MSAAS_REPO_FOLDER ] || {
|
||||
git clone -b dev --recursive https://x-access-token:$MSAAS_REPO_CLONE_TOKEN@$MSAAS_REPO_URL $MSAAS_REPO_FOLDER
|
||||
cd $MSAAS_REPO_FOLDER
|
||||
bash git-init.sh
|
||||
git checkout
|
||||
}
|
||||
|
||||
# Get and increment image version
|
||||
image_version() {
|
||||
local service=$1
|
||||
local chart_path="$WORKING_DIR/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
|
||||
local current_version new_version
|
||||
|
||||
current_version=$(yq eval '.AppVersion' "$chart_path")
|
||||
new_version=$(echo "$current_version" | awk -F. '{$NF += 1; print $1"."$2"."$3}')
|
||||
echo "$new_version"
|
||||
}
|
||||
function build_managed() {
|
||||
local service=$1
|
||||
local version=$2
|
||||
echo building managed
|
||||
clone_msaas
|
||||
if [[ $service == 'chalice' ]]; then
|
||||
cd $MSAAS_REPO_FOLDER/openreplay/api
|
||||
else
|
||||
cd $MSAAS_REPO_FOLDER/openreplay/$service
|
||||
fi
|
||||
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh >> /tmp/arm.txt
|
||||
|
||||
# Clone MSAAS repository if not exists
|
||||
clone_msaas() {
|
||||
if [[ ! -d "$MSAAS_REPO_FOLDER" ]]; then
|
||||
git clone -b dev --recursive "https://x-access-token:${MSAAS_REPO_CLONE_TOKEN}@${MSAAS_REPO_URL}" "$MSAAS_REPO_FOLDER"
|
||||
cd "$MSAAS_REPO_FOLDER"
|
||||
cd openreplay && git fetch origin && git checkout main
|
||||
git log -1
|
||||
cd "$MSAAS_REPO_FOLDER"
|
||||
bash git-init.sh
|
||||
git checkout
|
||||
fi
|
||||
}
|
||||
# Checking for backend images
|
||||
ls backend/cmd >> /tmp/backend.txt
|
||||
echo Services: "${{ github.event.inputs.services }}"
|
||||
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
||||
# Build FOSS
|
||||
for SERVICE in "${SERVICES[@]}"; do
|
||||
# Check if service is backend
|
||||
if grep -q $SERVICE /tmp/backend.txt; then
|
||||
cd backend
|
||||
foss_build_args="nil $SERVICE"
|
||||
ee_build_args="ee $SERVICE"
|
||||
else
|
||||
[[ $SERVICE == 'chalice' ]] && cd $working_dir/api || cd $SERVICE
|
||||
ee_build_args="ee"
|
||||
fi
|
||||
version=$(image_version $SERVICE)
|
||||
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash build.sh $foss_build_args
|
||||
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash build.sh $foss_build_args
|
||||
echo IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash build.sh $ee_build_args
|
||||
IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash build.sh $ee_build_args
|
||||
if [[ "$SERVICE" != "chalice" && "$SERVICE" != "frontend" ]]; then
|
||||
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh $foss_build_args
|
||||
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh $foss_build_args
|
||||
else
|
||||
build_managed $SERVICE $version
|
||||
fi
|
||||
cd $working_dir
|
||||
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$SERVICE/Chart.yaml"
|
||||
yq eval ".AppVersion = \"$version\"" -i $chart_path
|
||||
git add $chart_path
|
||||
git commit -m "Increment $SERVICE chart version"
|
||||
git push --set-upstream origin $BRANCH_NAME
|
||||
done
|
||||
|
||||
# Build managed services
|
||||
build_managed() {
|
||||
local service=$1
|
||||
local version=$2
|
||||
|
||||
echo "Building managed service: $service"
|
||||
clone_msaas
|
||||
|
||||
if [[ $service == 'chalice' ]]; then
|
||||
cd "$MSAAS_REPO_FOLDER/openreplay/api"
|
||||
else
|
||||
cd "$MSAAS_REPO_FOLDER/openreplay/$service"
|
||||
fi
|
||||
|
||||
local build_cmd="IMAGE_TAG=$version DOCKER_RUNTIME=depot DOCKER_BUILD_ARGS=--push ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh"
|
||||
|
||||
echo "Executing: $build_cmd"
|
||||
if ! eval "$build_cmd" 2>&1; then
|
||||
echo "Build failed for $service"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Build service with given arguments
|
||||
build_service() {
|
||||
local service=$1
|
||||
local version=$2
|
||||
local build_args=$3
|
||||
local build_script=${4:-$BUILD_SCRIPT_NAME}
|
||||
|
||||
local command="IMAGE_TAG=$version DOCKER_RUNTIME=depot DOCKER_BUILD_ARGS=--push ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash $build_script $build_args"
|
||||
echo "Executing: $command"
|
||||
eval "$command"
|
||||
}
|
||||
|
||||
# Update chart version and commit changes
|
||||
update_chart_version() {
|
||||
local service=$1
|
||||
local version=$2
|
||||
local chart_path="$WORKING_DIR/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
|
||||
|
||||
# Ensure we're in the original working directory/repository
|
||||
cd "$WORKING_DIR"
|
||||
yq eval ".AppVersion = \"$version\"" -i "$chart_path"
|
||||
git add "$chart_path"
|
||||
git commit -m "Increment $service chart version to $version"
|
||||
git push --set-upstream origin "$BRANCH_NAME"
|
||||
cd -
|
||||
}
|
||||
|
||||
# Main execution
|
||||
main() {
|
||||
setup_git
|
||||
|
||||
# Get backend services list
|
||||
ls backend/cmd >"$BACKEND_SERVICES_FILE"
|
||||
|
||||
# Parse services input (fix for GitHub Actions syntax)
|
||||
echo "Services: ${SERVICES_INPUT:-$1}"
|
||||
IFS=',' read -ra services <<<"${SERVICES_INPUT:-$1}"
|
||||
|
||||
# Process each service
|
||||
for service in "${services[@]}"; do
|
||||
echo "Processing service: $service"
|
||||
cd "$WORKING_DIR"
|
||||
|
||||
local foss_build_args="" ee_build_args="" build_script="$BUILD_SCRIPT_NAME"
|
||||
|
||||
# Determine build configuration based on service type
|
||||
if grep -q "$service" "$BACKEND_SERVICES_FILE"; then
|
||||
# Backend service
|
||||
cd backend
|
||||
foss_build_args="nil $service"
|
||||
ee_build_args="ee $service"
|
||||
else
|
||||
# Non-backend service
|
||||
case "$service" in
|
||||
chalice | alerts | crons)
|
||||
cd "$WORKING_DIR/api"
|
||||
;;
|
||||
*)
|
||||
cd "$service"
|
||||
;;
|
||||
esac
|
||||
|
||||
# Special build scripts for alerts/crons
|
||||
if [[ $service == 'alerts' || $service == 'crons' ]]; then
|
||||
build_script="build_${service}.sh"
|
||||
fi
|
||||
|
||||
ee_build_args="ee"
|
||||
fi
|
||||
|
||||
# Get version and build
|
||||
local version
|
||||
version=$(image_version "$service")
|
||||
|
||||
# Build FOSS and EE versions
|
||||
build_service "$service" "$version" "$foss_build_args"
|
||||
build_service "$service" "${version}-ee" "$ee_build_args"
|
||||
|
||||
# Build managed version for specific services
|
||||
if [[ "$service" != "chalice" && "$service" != "frontend" ]]; then
|
||||
echo "Nothing to build in managed for service $service"
|
||||
else
|
||||
build_managed "$service" "$version"
|
||||
fi
|
||||
|
||||
# Update chart and commit
|
||||
update_chart_version "$service" "$version"
|
||||
done
|
||||
cd "$WORKING_DIR"
|
||||
|
||||
# Cleanup
|
||||
rm -f "$BACKEND_SERVICES_FILE"
|
||||
}
|
||||
|
||||
echo "Working directory: $WORKING_DIR"
|
||||
# Run main function with all arguments
|
||||
main "$SERVICES_INPUT"
|
||||
|
||||
|
||||
- name: Create Pull Request
|
||||
uses: repo-sync/pull-request@v2
|
||||
|
|
@ -142,8 +246,7 @@ jobs:
|
|||
pr_title: "Updated patch build from main ${{ env.HEAD_COMMIT_ID }}"
|
||||
pr_body: |
|
||||
This PR updates the Helm chart version after building the patch from $HEAD_COMMIT_ID.
|
||||
Once this PR is merged, To update the latest tag, run the following workflow.
|
||||
https://github.com/openreplay/openreplay/actions/workflows/update-tag.yaml
|
||||
Once this PR is merged, tag update job will run automatically.
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
|
|
|
|||
144
.github/workflows/peers.yaml
vendored
144
.github/workflows/peers.yaml
vendored
|
|
@ -1,144 +0,0 @@
|
|||
# This action will push the peers changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_security_checks:
|
||||
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
|
||||
required: false
|
||||
default: "false"
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
paths:
|
||||
- "peers/**"
|
||||
- "!peers/.gitignore"
|
||||
- "!peers/*-dev.sh"
|
||||
|
||||
name: Build and Deploy Peers
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
|
||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
|
||||
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
- name: Building and Pushing peers image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd peers
|
||||
PUSH_IMAGE=0 bash -x ./build.sh
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("peers")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("peers")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
tag: ${image_array[1]}
|
||||
EOF
|
||||
done
|
||||
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/peers/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,peers,quickwit,connector} /tmp/charts/
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: foss
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: "Build failed :bomb:"
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# limit-access-to-actor: true
|
||||
103
.github/workflows/release-deployment.yaml
vendored
Normal file
103
.github/workflows/release-deployment.yaml
vendored
Normal file
|
|
@ -0,0 +1,103 @@
|
|||
name: Release Deployment
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
services:
|
||||
description: 'Comma-separated list of services to deploy. eg: frontend,api,sink'
|
||||
required: true
|
||||
branch:
|
||||
description: 'Branch to deploy (defaults to dev)'
|
||||
required: false
|
||||
default: 'dev'
|
||||
|
||||
env:
|
||||
IMAGE_REGISTRY_URL: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
||||
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.inputs.branch }}
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login $IMAGE_REGISTRY_URL -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
|
||||
- name: Set image tag with branch info
|
||||
run: |
|
||||
SHORT_SHA=$(git rev-parse --short HEAD)
|
||||
echo "IMAGE_TAG=${{ github.event.inputs.branch }}-${SHORT_SHA}" >> $GITHUB_ENV
|
||||
echo "Using image tag: $IMAGE_TAG"
|
||||
|
||||
- uses: depot/setup-action@v1
|
||||
|
||||
- name: Build and push Docker images
|
||||
run: |
|
||||
# Parse the comma-separated services list into an array
|
||||
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
||||
working_dir=$(pwd)
|
||||
|
||||
# Define backend services (consider moving this to workflow inputs or repo config)
|
||||
ls backend/cmd >> /tmp/backend.txt
|
||||
BUILD_SCRIPT_NAME="build.sh"
|
||||
|
||||
for SERVICE in "${SERVICES[@]}"; do
|
||||
# Check if service is backend
|
||||
if grep -q $SERVICE /tmp/backend.txt; then
|
||||
cd $working_dir/backend
|
||||
foss_build_args="nil $SERVICE"
|
||||
ee_build_args="ee $SERVICE"
|
||||
else
|
||||
cd $working_dir
|
||||
[[ $SERVICE == 'chalice' || $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && cd $working_dir/api || cd $SERVICE
|
||||
[[ $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && BUILD_SCRIPT_NAME="build_${SERVICE}.sh"
|
||||
ee_build_args="ee"
|
||||
fi
|
||||
{
|
||||
echo IMAGE_TAG=$IMAGE_TAG DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
IMAGE_TAG=$IMAGE_TAG DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
}&
|
||||
{
|
||||
echo IMAGE_TAG=${IMAGE_TAG}-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||
IMAGE_TAG=${IMAGE_TAG}-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||
}&
|
||||
done
|
||||
wait
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
name: Using ee release cluster
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_RELEASE_KUBECONFIG }}
|
||||
|
||||
- name: Deploy to ee release Kubernetes
|
||||
run: |
|
||||
echo "Deploying services to EE cluster: ${{ github.event.inputs.services }}"
|
||||
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
||||
for SERVICE in "${SERVICES[@]}"; do
|
||||
SERVICE=$(echo $SERVICE | xargs) # Trim whitespace
|
||||
echo "Deploying $SERVICE to EE cluster with image tag: ${IMAGE_TAG}"
|
||||
kubectl set image deployment/$SERVICE-openreplay -n app $SERVICE=${IMAGE_REGISTRY_URL}/$SERVICE:${IMAGE_TAG}-ee
|
||||
done
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
name: Using foss release cluster
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.FOSS_RELEASE_KUBECONFIG }}
|
||||
|
||||
- name: Deploy to FOSS release Kubernetes
|
||||
run: |
|
||||
echo "Deploying services to FOSS cluster: ${{ github.event.inputs.services }}"
|
||||
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
||||
for SERVICE in "${SERVICES[@]}"; do
|
||||
SERVICE=$(echo $SERVICE | xargs) # Trim whitespace
|
||||
echo "Deploying $SERVICE to FOSS cluster with image tag: ${IMAGE_TAG}"
|
||||
echo "Deploying $SERVICE to FOSS cluster with image tag: ${IMAGE_TAG}"
|
||||
kubectl set image deployment/$SERVICE-openreplay -n app $SERVICE=${IMAGE_REGISTRY_URL}/$SERVICE:${IMAGE_TAG}
|
||||
done
|
||||
21
.github/workflows/sourcemaps-reader-ee.yaml
vendored
21
.github/workflows/sourcemaps-reader-ee.yaml
vendored
|
|
@ -1,4 +1,4 @@
|
|||
# This action will push the sourcemapreader changes to aws
|
||||
# This action will push the sourcemapreader changes to ee
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
|
|
@ -9,13 +9,13 @@ on:
|
|||
push:
|
||||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
paths:
|
||||
- "ee/sourcemap-reader/**"
|
||||
- "sourcemap-reader/**"
|
||||
- "!sourcemap-reader/.gitignore"
|
||||
- "!sourcemap-reader/*-dev.sh"
|
||||
|
||||
name: Build and Deploy sourcemap-reader
|
||||
name: Build and Deploy sourcemap-reader EE
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
|
|
@ -32,9 +32,14 @@ jobs:
|
|||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.EE_PG_PASSWORD }}
|
||||
|
|
@ -59,7 +64,7 @@ jobs:
|
|||
- name: Building and Pushing sourcemaps-reader image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
|
|
@ -67,10 +72,10 @@ jobs:
|
|||
cd sourcemap-reader
|
||||
PUSH_IMAGE=0 bash -x ./build.sh
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("sourcemaps-reader")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
|
|
@ -127,7 +132,7 @@ jobs:
|
|||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: foss
|
||||
SLACK_CHANNEL: ee
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
|
|
|
|||
12
.github/workflows/sourcemaps-reader.yaml
vendored
12
.github/workflows/sourcemaps-reader.yaml
vendored
|
|
@ -9,7 +9,6 @@ on:
|
|||
push:
|
||||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
paths:
|
||||
- "sourcemap-reader/**"
|
||||
- "!sourcemap-reader/.gitignore"
|
||||
|
|
@ -32,9 +31,14 @@ jobs:
|
|||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
|
||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
|
||||
|
|
@ -67,10 +71,10 @@ jobs:
|
|||
cd sourcemap-reader
|
||||
PUSH_IMAGE=0 bash -x ./build.sh
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("sourcemaps-reader")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
|
|
|
|||
10
.github/workflows/tracker-tests.yaml
vendored
10
.github/workflows/tracker-tests.yaml
vendored
|
|
@ -9,24 +9,16 @@ on:
|
|||
pull_request:
|
||||
branches: [ "dev", "main" ]
|
||||
paths:
|
||||
- frontend/**
|
||||
- tracker/**
|
||||
jobs:
|
||||
build-and-test:
|
||||
runs-on: macos-latest
|
||||
name: Build and test Tracker
|
||||
strategy:
|
||||
matrix:
|
||||
node-version: [ 18.x ]
|
||||
steps:
|
||||
- uses: oven-sh/setup-bun@v1
|
||||
- uses: oven-sh/setup-bun@v2
|
||||
with:
|
||||
bun-version: latest
|
||||
- uses: actions/checkout@v3
|
||||
- name: Use Node.js ${{ matrix.node-version }}
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: ${{ matrix.node-version }}
|
||||
- name: Cache tracker modules
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
|
|
|
|||
10
.github/workflows/ui-tests.js.yml
vendored
10
.github/workflows/ui-tests.js.yml
vendored
|
|
@ -27,9 +27,9 @@ jobs:
|
|||
name: Build and test Tracker plus Replayer
|
||||
strategy:
|
||||
matrix:
|
||||
node-version: [ 18.x ]
|
||||
node-version: [ 20.x ]
|
||||
steps:
|
||||
- uses: oven-sh/setup-bun@v1
|
||||
- uses: oven-sh/setup-bun@v2
|
||||
with:
|
||||
bun-version: latest
|
||||
- uses: actions/checkout@v3
|
||||
|
|
@ -102,11 +102,11 @@ jobs:
|
|||
- name: Setup packages
|
||||
run: |
|
||||
cd frontend
|
||||
npm install --legacy-peer-deps
|
||||
yarn
|
||||
- name: Run unit tests
|
||||
run: |
|
||||
cd frontend
|
||||
npm run test:ci
|
||||
yarn test:ci
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
with:
|
||||
|
|
@ -138,7 +138,7 @@ jobs:
|
|||
- name: (Chrome) Run visual tests
|
||||
run: |
|
||||
cd frontend
|
||||
npm run cy:test
|
||||
yarn cy:test
|
||||
# firefox have different viewport somehow
|
||||
# - name: (Firefox) Run visual tests
|
||||
# run: yarn cy:test-firefox
|
||||
|
|
|
|||
47
.github/workflows/update-tag.yaml
vendored
47
.github/workflows/update-tag.yaml
vendored
|
|
@ -1,35 +1,42 @@
|
|||
on:
|
||||
workflow_dispatch:
|
||||
description: "This workflow will build for patches for latest tag, and will Always use commit from main branch."
|
||||
inputs:
|
||||
services:
|
||||
description: "This action will update the latest tag with current main branch HEAD. Should I proceed ? true/false"
|
||||
required: true
|
||||
default: "false"
|
||||
|
||||
name: Force Push tag with main branch HEAD
|
||||
pull_request:
|
||||
types: [closed]
|
||||
branches:
|
||||
- main
|
||||
name: Release tag update --force
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Build Patch from main
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
|
||||
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
||||
if: ${{ (github.event_name == 'pull_request' && github.event.pull_request.merged == true) || github.event.inputs.services == 'true' }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Get latest release tag using GitHub API
|
||||
id: get-latest-tag
|
||||
run: |
|
||||
LATEST_TAG=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
|
||||
"https://api.github.com/repos/${{ github.repository }}/releases/latest" \
|
||||
| jq -r .tag_name)
|
||||
|
||||
# Fallback to git command if API doesn't return a tag
|
||||
if [ "$LATEST_TAG" == "null" ] || [ -z "$LATEST_TAG" ]; then
|
||||
echo "Not found latest tag"
|
||||
exit 100
|
||||
fi
|
||||
|
||||
echo "LATEST_TAG=$LATEST_TAG" >> $GITHUB_ENV
|
||||
echo "Latest tag: $LATEST_TAG"
|
||||
|
||||
- name: Set Remote with GITHUB_TOKEN
|
||||
run: |
|
||||
git config --unset http.https://github.com/.extraheader
|
||||
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}.git
|
||||
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}
|
||||
|
||||
- name: Push main branch to tag
|
||||
run: |
|
||||
git fetch --tags
|
||||
git checkout main
|
||||
git push origin HEAD:refs/tags/$(git tag --list 'v[0-9]*' --sort=-v:refname | head -n 1) --force
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# with:
|
||||
# limit-access-to-actor: true
|
||||
echo "Updating tag ${{ env.LATEST_TAG }} to point to latest commit on main"
|
||||
git push origin HEAD:refs/tags/${{ env.LATEST_TAG }} --force
|
||||
|
|
|
|||
11
.github/workflows/workers-ee.yaml
vendored
11
.github/workflows/workers-ee.yaml
vendored
|
|
@ -36,9 +36,14 @@ jobs:
|
|||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.EE_PG_PASSWORD }}
|
||||
|
|
@ -116,8 +121,8 @@ jobs:
|
|||
echo "Bulding $image"
|
||||
PUSH_IMAGE=0 bash -x ./build.sh ee $image
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
./trivy image --exit-code 1 --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
|
|
|
|||
12
.github/workflows/workers.yaml
vendored
12
.github/workflows/workers.yaml
vendored
|
|
@ -35,9 +35,14 @@ jobs:
|
|||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
|
||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
|
||||
|
|
@ -109,8 +114,8 @@ jobs:
|
|||
echo "Bulding $image"
|
||||
PUSH_IMAGE=0 bash -x ./build.sh skip $image
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
./trivy image --exit-code 1 --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
|
|
@ -178,3 +183,4 @@ jobs:
|
|||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# iimit-access-to-actor: true
|
||||
#
|
||||
|
|
|
|||
702
LICENSE
702
LICENSE
|
|
@ -1,64 +1,694 @@
|
|||
Copyright (c) 2022 Asayer, Inc.
|
||||
Copyright (c) 2021-2025 Asayer, Inc dba OpenReplay
|
||||
|
||||
OpenReplay monorepo uses multiple licenses. Portions of this software are licensed as follows:
|
||||
- All content that resides under the "ee/" directory of this repository, is licensed under the license defined in "ee/LICENSE".
|
||||
- Some directories have a specific LICENSE file and are licensed under the "MIT" license, as defined below.
|
||||
- Content outside of the above mentioned directories or restrictions defaults to the "Elastic License 2.0 (ELv2)" license, as defined below.
|
||||
- All third party components incorporated into the OpenReplay Software are licensed under the original license provided by the owner of the applicable component.
|
||||
- Some directories are licensed under the "MIT" license, as defined below.
|
||||
- Content outside of the above mentioned directories or restrictions defaults to the "GNU Affero General Public License Version 3 (AGPL v3)" license, as defined below.
|
||||
|
||||
Reach out (license@openreplay.com) if you have any questions regarding licenses.
|
||||
|
||||
------------------------------------------------------------------------------------
|
||||
MIT License
|
||||
--------------------------------------------------------------------------------
|
||||
MIT LICENSE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
------------------------------------------------------------------------------------
|
||||
Elastic License 2.0 (ELv2)
|
||||
--------------------------------------------------------------------------------
|
||||
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||
Version 3, 19 November 2007
|
||||
|
||||
**Acceptance**
|
||||
By using the software, you agree to all of the terms and conditions below.
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
**Copyright License**
|
||||
The licensor grants you a non-exclusive, royalty-free, worldwide, non-sublicensable, non-transferable license to use, copy, distribute, make available, and prepare derivative works of the software, in each case subject to the limitations and conditions below
|
||||
Preamble
|
||||
|
||||
**Limitations**
|
||||
You may not provide the software to third parties as a hosted or managed service, where the service provides users with access to any substantial set of the features or functionality of the software.
|
||||
The GNU Affero General Public License is a free, copyleft license for
|
||||
software and other kinds of works, specifically designed to ensure
|
||||
cooperation with the community in the case of network server software.
|
||||
|
||||
You may not move, change, disable, or circumvent the license key functionality in the software, and you may not remove or obscure any functionality in the software that is protected by the license key.
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
our General Public Licenses are intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users.
|
||||
|
||||
You may not alter, remove, or obscure any licensing, copyright, or other notices of the licensor in the software. Any use of the licensor’s trademarks is subject to applicable law.
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
**Patents**
|
||||
The licensor grants you a license, under any patent claims the licensor can license, or becomes able to license, to make, have made, use, sell, offer for sale, import and have imported the software, in each case subject to the limitations and conditions in this license. This license does not cover any patent claims that you cause to be infringed by modifications or additions to the software. If you or your company make any written claim that the software infringes or contributes to infringement of any patent, your patent license for the software granted under these terms ends immediately. If your company makes such a claim, your patent license ends immediately for work on behalf of your company.
|
||||
Developers that use our General Public Licenses protect your rights
|
||||
with two steps: (1) assert copyright on the software, and (2) offer
|
||||
you this License which gives you legal permission to copy, distribute
|
||||
and/or modify the software.
|
||||
|
||||
**Notices**
|
||||
You must ensure that anyone who gets a copy of any part of the software from you also gets a copy of these terms.
|
||||
A secondary benefit of defending all users' freedom is that
|
||||
improvements made in alternate versions of the program, if they
|
||||
receive widespread use, become available for other developers to
|
||||
incorporate. Many developers of free software are heartened and
|
||||
encouraged by the resulting cooperation. However, in the case of
|
||||
software used on network servers, this result may fail to come about.
|
||||
The GNU General Public License permits making a modified version and
|
||||
letting the public access it on a server without ever releasing its
|
||||
source code to the public.
|
||||
|
||||
If you modify the software, you must include in any modified copies of the software prominent notices stating that you have modified the software.
|
||||
The GNU Affero General Public License is designed specifically to
|
||||
ensure that, in such cases, the modified source code becomes available
|
||||
to the community. It requires the operator of a network server to
|
||||
provide the source code of the modified version running there to the
|
||||
users of that server. Therefore, public use of a modified version, on
|
||||
a publicly accessible server, gives the public access to the source
|
||||
code of the modified version.
|
||||
|
||||
**No Other Rights**
|
||||
These terms do not imply any licenses other than those expressly granted in these terms.
|
||||
An older license, called the Affero General Public License and
|
||||
published by Affero, was designed to accomplish similar goals. This is
|
||||
a different license, not a version of the Affero GPL, but Affero has
|
||||
released a new version of the Affero GPL which permits relicensing under
|
||||
this license.
|
||||
|
||||
**Termination**
|
||||
If you use the software in violation of these terms, such use is not licensed, and your licenses will automatically terminate. If the licensor provides you with a notice of your violation, and you cease all violation of this license no later than 30 days after you receive that notice, your licenses will be reinstated retroactively. However, if you violate these terms after such reinstatement, any additional violation of these terms will cause your licenses to terminate automatically and permanently.
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
**No Liability**
|
||||
As far as the law allows, the software comes as is, without any warranty or condition, and the licensor will not be liable to you for any damages arising out of these terms or the use or nature of the software, under any kind of legal claim.
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
**Definitions**
|
||||
The *licensor* is the entity offering these terms, and the *software* is the software the licensor makes available under these terms, including any portion of it.
|
||||
0. Definitions.
|
||||
|
||||
*you* refers to the individual or entity agreeing to these terms.
|
||||
"This License" refers to version 3 of the GNU Affero General Public License.
|
||||
|
||||
*your company* is any legal entity, sole proprietorship, or other kind of organization that you work for, plus all organizations that have control over, are under the control of, or are under common control with that organization. *control* means ownership of substantially all the assets of an entity, or the power to direct its management and policies by vote, contract, or otherwise. Control can be direct or indirect.
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
*your licenses* are all the licenses granted to you for the software under these terms.
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
*use* means anything you do with the software requiring one of your licenses.
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
*trademark* means trademarks, service marks, and similar rights.
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Remote Network Interaction; Use with the GNU General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, if you modify the
|
||||
Program, your modified version must prominently offer all users
|
||||
interacting with it remotely through a computer network (if your version
|
||||
supports such interaction) an opportunity to receive the Corresponding
|
||||
Source of your version by providing access to the Corresponding Source
|
||||
from a network server at no charge, through some standard or customary
|
||||
means of facilitating copying of software. This Corresponding Source
|
||||
shall include the Corresponding Source for any work covered by version 3
|
||||
of the GNU General Public License that is incorporated pursuant to the
|
||||
following paragraph.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the work with which it is combined will remain governed by version
|
||||
3 of the GNU General Public License.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU Affero General Public License from time to time. Such new versions
|
||||
will be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU Affero General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU Affero General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU Affero General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If your software can interact with users remotely through a computer
|
||||
network, you should also make sure that it provides a way for users to
|
||||
get its source. For example, if your program is a web application, its
|
||||
interface could display a "Source" link that leads users to an archive
|
||||
of the code. There are many ways you could offer source, and different
|
||||
solutions will be better for different programs; see section 13 for the
|
||||
specific requirements.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU AGPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
||||
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@
|
|||
</a>
|
||||
</p>
|
||||
|
||||
OpenReplay is a session replay suite you can host yourself, that lets you see what users do on your web app, helping you troubleshoot issues faster.
|
||||
OpenReplay is an open-source session replay suite you can host yourself, that lets you see what users do on your web app, helping you troubleshoot issues faster.
|
||||
|
||||
- **Session replay.** OpenReplay replays what users do, but not only. It also shows you what went under the hood, how your website or app behaves by capturing network activity, console logs, JS errors, store actions/state, page speed metrics, cpu/memory usage and much more.
|
||||
- **Low footprint**. With a ~26KB (.br) tracker that asynchronously sends minimal data for a very limited impact on performance.
|
||||
|
|
@ -55,9 +55,9 @@ OpenReplay is a session replay suite you can host yourself, that lets you see wh
|
|||
## Features
|
||||
|
||||
- **Session replay:** Lets you relive your users' experience, see where they struggle and how it affects their behavior. Each session replay is automatically analyzed based on heuristics, for easy triage.
|
||||
- **Spot:** A Chrome extension that lets record bugs directly from your browser — each recording includes all the technical details developers need to fix them.
|
||||
- **DevTools:** It's like debugging in your own browser. OpenReplay provides you with the full context (network activity, JS errors, store actions/state and 40+ metrics) so you can instantly reproduce bugs and understand performance issues.
|
||||
- **Assist:** Helps you support your users by seeing their live screen and instantly hopping on call (WebRTC) with them without requiring any 3rd-party screen sharing software.
|
||||
- **Feature flags:** Enable or disable a feature, make gradual releases and A/B test all without redeploying your app.
|
||||
- **Omni-search:** Search and filter by almost any user action/criteria, session attribute or technical event, so you can answer any question. No instrumentation required.
|
||||
- **Analytics:** For surfacing the most impactful issues causing conversion and revenue loss.
|
||||
- **Fine-grained privacy controls:** Choose what to capture, what to obscure or what to ignore so user data doesn't even reach your servers.
|
||||
|
|
@ -98,10 +98,6 @@ See our [Contributing Guide](CONTRIBUTING.md) for more details.
|
|||
|
||||
Also, feel free to join our [Slack](https://slack.openreplay.com) to ask questions, discuss ideas or connect with our contributors.
|
||||
|
||||
## Roadmap
|
||||
|
||||
Check out our [roadmap](https://www.notion.so/openreplay/Roadmap-889d2c3d968b4786ab9b281ab2394a94) and keep an eye on what's coming next. You're free to [submit](https://github.com/openreplay/openreplay/issues/new) new ideas and vote on features.
|
||||
|
||||
## License
|
||||
|
||||
This monorepo uses several licenses. See [LICENSE](/LICENSE) for more details.
|
||||
|
|
|
|||
3
api/.gitignore
vendored
3
api/.gitignore
vendored
|
|
@ -175,4 +175,5 @@ SUBNETS.json
|
|||
|
||||
./chalicelib/.configs
|
||||
README/*
|
||||
.local
|
||||
.local
|
||||
/.dev/
|
||||
|
|
@ -1,30 +1,31 @@
|
|||
FROM python:3.11-alpine
|
||||
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
|
||||
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
|
||||
ARG GIT_SHA
|
||||
LABEL GIT_SHA=$GIT_SHA
|
||||
FROM python:3.12-alpine AS builder
|
||||
LABEL maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
|
||||
LABEL maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
|
||||
|
||||
RUN apk add --no-cache build-base tini
|
||||
RUN apk add --no-cache build-base
|
||||
WORKDIR /work
|
||||
COPY requirements.txt ./requirements.txt
|
||||
RUN pip install --no-cache-dir --upgrade uv && \
|
||||
export UV_SYSTEM_PYTHON=true && \
|
||||
uv pip install --no-cache-dir --upgrade pip setuptools wheel && \
|
||||
uv pip install --no-cache-dir --upgrade -r requirements.txt
|
||||
|
||||
FROM python:3.12-alpine
|
||||
ARG GIT_SHA
|
||||
ARG envarg
|
||||
# Add Tini
|
||||
# Startup daemon
|
||||
ENV SOURCE_MAP_VERSION=0.7.4 \
|
||||
APP_NAME=chalice \
|
||||
LISTEN_PORT=8000 \
|
||||
PRIVATE_ENDPOINTS=false \
|
||||
ENTERPRISE_BUILD=${envarg} \
|
||||
GIT_SHA=$GIT_SHA
|
||||
|
||||
APP_NAME=chalice \
|
||||
LISTEN_PORT=8000 \
|
||||
PRIVATE_ENDPOINTS=false \
|
||||
ENTERPRISE_BUILD=${envarg} \
|
||||
GIT_SHA=$GIT_SHA
|
||||
COPY --from=builder /usr/local/lib/python3.12/site-packages /usr/local/lib/python3.12/site-packages
|
||||
COPY --from=builder /usr/local/bin /usr/local/bin
|
||||
WORKDIR /work
|
||||
COPY requirements.txt ./requirements.txt
|
||||
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
||||
|
||||
COPY . .
|
||||
RUN mv env.default .env
|
||||
|
||||
RUN adduser -u 1001 openreplay -D
|
||||
USER 1001
|
||||
RUN apk add --no-cache tini && mv env.default .env
|
||||
|
||||
ENTRYPOINT ["/sbin/tini", "--"]
|
||||
CMD ./entrypoint.sh
|
||||
|
||||
CMD ["./entrypoint.sh"]
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
FROM python:3.11-alpine
|
||||
FROM python:3.12-alpine
|
||||
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
|
||||
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
|
||||
ARG GIT_SHA
|
||||
|
|
@ -16,7 +16,9 @@ ENV APP_NAME=alerts \
|
|||
|
||||
WORKDIR /work
|
||||
COPY requirements-alerts.txt ./requirements.txt
|
||||
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
||||
RUN pip install --no-cache-dir --upgrade uv
|
||||
RUN uv pip install --no-cache-dir --upgrade pip setuptools wheel --system
|
||||
RUN uv pip install --no-cache-dir --upgrade -r requirements.txt --system
|
||||
|
||||
COPY . .
|
||||
RUN mv env.default .env && mv app_alerts.py app.py && mv entrypoint_alerts.sh entrypoint.sh
|
||||
|
|
|
|||
27
api/Pipfile
27
api/Pipfile
|
|
@ -4,23 +4,26 @@ verify_ssl = true
|
|||
name = "pypi"
|
||||
|
||||
[packages]
|
||||
urllib3 = "==1.26.16"
|
||||
urllib3 = "==2.3.0"
|
||||
requests = "==2.32.3"
|
||||
boto3 = "==1.34.158"
|
||||
pyjwt = "==2.9.0"
|
||||
psycopg2-binary = "==2.9.9"
|
||||
elasticsearch = "==8.14.0"
|
||||
boto3 = "==1.36.12"
|
||||
pyjwt = "==2.10.1"
|
||||
psycopg2-binary = "==2.9.10"
|
||||
psycopg = {extras = ["pool", "binary"], version = "==3.2.4"}
|
||||
clickhouse-driver = {extras = ["lz4"], version = "==0.2.9"}
|
||||
clickhouse-connect = "==0.8.15"
|
||||
elasticsearch = "==8.17.1"
|
||||
jira = "==3.8.0"
|
||||
fastapi = "==0.112.0"
|
||||
cachetools = "==5.5.1"
|
||||
fastapi = "==0.115.8"
|
||||
uvicorn = {extras = ["standard"], version = "==0.34.0"}
|
||||
python-decouple = "==3.8"
|
||||
apscheduler = "==3.10.4"
|
||||
redis = "==5.1.0b6"
|
||||
cachetools = "==5.4.0"
|
||||
psycopg = {extras = ["pool", "binary"], version = "==3.2.1"}
|
||||
uvicorn = {extras = ["standard"], version = "==0.30.5"}
|
||||
pydantic = {extras = ["email"], version = "==2.8.2"}
|
||||
pydantic = {extras = ["email"], version = "==2.10.6"}
|
||||
apscheduler = "==3.11.0"
|
||||
redis = "==5.2.1"
|
||||
|
||||
[dev-packages]
|
||||
|
||||
[requires]
|
||||
python_version = "3.12"
|
||||
python_full_version = "3.12.8"
|
||||
|
|
|
|||
10
api/app.py
10
api/app.py
|
|
@ -13,17 +13,16 @@ from psycopg.rows import dict_row
|
|||
from starlette.responses import StreamingResponse
|
||||
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils import pg_client, ch_client
|
||||
from crons import core_crons, core_dynamic_crons
|
||||
from routers import core, core_dynamic
|
||||
from routers.subs import insights, metrics, v1_api, health, usability_tests, spot
|
||||
from routers.subs import insights, metrics, v1_api, health, usability_tests, spot, product_anaytics
|
||||
|
||||
loglevel = config("LOGLEVEL", default=logging.WARNING)
|
||||
print(f">Loglevel set to: {loglevel}")
|
||||
logging.basicConfig(level=loglevel)
|
||||
|
||||
|
||||
|
||||
class ORPYAsyncConnection(AsyncConnection):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
|
|
@ -39,6 +38,7 @@ async def lifespan(app: FastAPI):
|
|||
|
||||
app.schedule = AsyncIOScheduler()
|
||||
await pg_client.init()
|
||||
await ch_client.init()
|
||||
app.schedule.start()
|
||||
|
||||
for job in core_crons.cron_jobs + core_dynamic_crons.cron_jobs:
|
||||
|
|
@ -128,3 +128,7 @@ app.include_router(usability_tests.app_apikey)
|
|||
app.include_router(spot.public_app)
|
||||
app.include_router(spot.app)
|
||||
app.include_router(spot.app_apikey)
|
||||
|
||||
app.include_router(product_anaytics.public_app)
|
||||
app.include_router(product_anaytics.app)
|
||||
app.include_router(product_anaytics.app_apikey)
|
||||
|
|
|
|||
|
|
@ -5,14 +5,14 @@ from apscheduler.schedulers.asyncio import AsyncIOScheduler
|
|||
from decouple import config
|
||||
from fastapi import FastAPI
|
||||
|
||||
from chalicelib.core import alerts_processor
|
||||
from chalicelib.core.alerts import alerts_processor
|
||||
from chalicelib.utils import pg_client
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
# Startup
|
||||
logging.info(">>>>> starting up <<<<<")
|
||||
ap_logger.info(">>>>> starting up <<<<<")
|
||||
await pg_client.init()
|
||||
app.schedule.start()
|
||||
app.schedule.add_job(id="alerts_processor", **{"func": alerts_processor.process, "trigger": "interval",
|
||||
|
|
@ -27,14 +27,22 @@ async def lifespan(app: FastAPI):
|
|||
yield
|
||||
|
||||
# Shutdown
|
||||
logging.info(">>>>> shutting down <<<<<")
|
||||
ap_logger.info(">>>>> shutting down <<<<<")
|
||||
app.schedule.shutdown(wait=False)
|
||||
await pg_client.terminate()
|
||||
|
||||
|
||||
loglevel = config("LOGLEVEL", default=logging.INFO)
|
||||
print(f">Loglevel set to: {loglevel}")
|
||||
logging.basicConfig(level=loglevel)
|
||||
ap_logger = logging.getLogger('apscheduler')
|
||||
ap_logger.setLevel(loglevel)
|
||||
|
||||
app = FastAPI(root_path=config("root_path", default="/alerts"), docs_url=config("docs_url", default=""),
|
||||
redoc_url=config("redoc_url", default=""), lifespan=lifespan)
|
||||
logging.info("============= ALERTS =============")
|
||||
|
||||
app.schedule = AsyncIOScheduler()
|
||||
ap_logger.info("============= ALERTS =============")
|
||||
|
||||
|
||||
@app.get("/")
|
||||
|
|
@ -50,17 +58,8 @@ async def get_health_status():
|
|||
}}
|
||||
|
||||
|
||||
app.schedule = AsyncIOScheduler()
|
||||
|
||||
loglevel = config("LOGLEVEL", default=logging.INFO)
|
||||
print(f">Loglevel set to: {loglevel}")
|
||||
logging.basicConfig(level=loglevel)
|
||||
ap_logger = logging.getLogger('apscheduler')
|
||||
ap_logger.setLevel(loglevel)
|
||||
app.schedule = AsyncIOScheduler()
|
||||
|
||||
if config("LOCAL_DEV", default=False, cast=bool):
|
||||
@app.get('/trigger', tags=["private"])
|
||||
async def trigger_main_cron():
|
||||
logging.info("Triggering main cron")
|
||||
ap_logger.info("Triggering main cron")
|
||||
alerts_processor.process()
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ class JWTAuth(HTTPBearer):
|
|||
if request.url.path in ["/refresh", "/api/refresh"]:
|
||||
return await self.__process_refresh_call(request)
|
||||
|
||||
elif request.url.path in ["/spot/refresh", "/spot/api/refresh"]:
|
||||
elif request.url.path in ["/spot/refresh", "/api/spot/refresh"]:
|
||||
return await self.__process_spot_refresh_call(request)
|
||||
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -31,8 +31,8 @@ class ProjectAuthorizer:
|
|||
logger.debug(f"unauthorized project {self.project_identifier}:{value}")
|
||||
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="project not found.")
|
||||
else:
|
||||
current_project = schemas.CurrentProjectContext(projectId=current_project["projectId"],
|
||||
projectKey=current_project["projectKey"],
|
||||
platform=current_project["platform"],
|
||||
name=current_project["name"])
|
||||
current_project = schemas.ProjectContext(projectId=current_project["projectId"],
|
||||
projectKey=current_project["projectKey"],
|
||||
platform=current_project["platform"],
|
||||
name=current_project["name"])
|
||||
request.state.currentContext.project = current_project
|
||||
|
|
|
|||
10
api/chalicelib/core/alerts/__init__.py
Normal file
10
api/chalicelib/core/alerts/__init__.py
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
import logging
|
||||
|
||||
from decouple import config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
if config("EXP_ALERTS", cast=bool, default=False):
|
||||
logging.info(">>> Using experimental alerts")
|
||||
from . import alerts_processor_ch as alerts_processor
|
||||
else:
|
||||
from . import alerts_processor as alerts_processor
|
||||
|
|
@ -7,11 +7,13 @@ from decouple import config
|
|||
|
||||
import schemas
|
||||
from chalicelib.core import notifications, webhook
|
||||
from chalicelib.core.collaboration_msteams import MSTeams
|
||||
from chalicelib.core.collaboration_slack import Slack
|
||||
from chalicelib.core.collaborations.collaboration_msteams import MSTeams
|
||||
from chalicelib.core.collaborations.collaboration_slack import Slack
|
||||
from chalicelib.utils import pg_client, helper, email_helper, smtp
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get(id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
|
|
@ -1,9 +1,10 @@
|
|||
from chalicelib.core.alerts.modules import TENANT_ID
|
||||
from chalicelib.utils import pg_client, helper
|
||||
|
||||
|
||||
def get_all_alerts():
|
||||
with pg_client.PostgresClient(long_query=True) as cur:
|
||||
query = """SELECT tenant_id,
|
||||
query = f"""SELECT {TENANT_ID} AS tenant_id,
|
||||
alert_id,
|
||||
projects.project_id,
|
||||
projects.name AS project_name,
|
||||
|
|
@ -1,17 +1,15 @@
|
|||
import decimal
|
||||
import logging
|
||||
|
||||
from decouple import config
|
||||
from pydantic_core._pydantic_core import ValidationError
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import alerts
|
||||
from chalicelib.core import alerts_listener
|
||||
from chalicelib.core import sessions
|
||||
from chalicelib.core.alerts import alerts, alerts_listener
|
||||
from chalicelib.core.alerts.modules import alert_helpers
|
||||
from chalicelib.core.sessions import sessions_pg as sessions
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
logging.basicConfig(level=config("LOGLEVEL", default=logger.info))
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
LeftToDb = {
|
||||
schemas.AlertColumn.PERFORMANCE__DOM_CONTENT_LOADED__AVERAGE: {
|
||||
|
|
@ -36,30 +34,6 @@ LeftToDb = {
|
|||
schemas.AlertColumn.PERFORMANCE__TIME_TO_RENDER__AVERAGE: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "AVG(NULLIF(visually_complete,0))"},
|
||||
schemas.AlertColumn.PERFORMANCE__IMAGE_LOAD_TIME__AVERAGE: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "AVG(NULLIF(resources.duration,0))", "condition": "type='img'"},
|
||||
schemas.AlertColumn.PERFORMANCE__REQUEST_LOAD_TIME__AVERAGE: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "AVG(NULLIF(resources.duration,0))", "condition": "type='fetch'"},
|
||||
schemas.AlertColumn.RESOURCES__LOAD_TIME__AVERAGE: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "AVG(NULLIF(resources.duration,0))"},
|
||||
schemas.AlertColumn.RESOURCES__MISSING__COUNT: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COUNT(DISTINCT url_hostpath)", "condition": "success= FALSE AND type='img'"},
|
||||
schemas.AlertColumn.ERRORS__4XX_5XX__COUNT: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)", "formula": "COUNT(session_id)",
|
||||
"condition": "status/100!=2"},
|
||||
schemas.AlertColumn.ERRORS__4XX__COUNT: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COUNT(session_id)", "condition": "status/100=4"},
|
||||
schemas.AlertColumn.ERRORS__5XX__COUNT: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COUNT(session_id)", "condition": "status/100=5"},
|
||||
schemas.AlertColumn.ERRORS__JAVASCRIPT__IMPACTED_SESSIONS__COUNT: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COUNT(DISTINCT session_id)", "condition": "success= FALSE AND type='script'"},
|
||||
schemas.AlertColumn.PERFORMANCE__CRASHES__COUNT: {
|
||||
"table": "public.sessions",
|
||||
"formula": "COUNT(DISTINCT session_id)",
|
||||
|
|
@ -72,35 +46,6 @@ LeftToDb = {
|
|||
"formula": "COUNT(DISTINCT session_id)", "condition": "source!='js_exception'", "joinSessions": False},
|
||||
}
|
||||
|
||||
# This is the frequency of execution for each threshold
|
||||
TimeInterval = {
|
||||
15: 3,
|
||||
30: 5,
|
||||
60: 10,
|
||||
120: 20,
|
||||
240: 30,
|
||||
1440: 60,
|
||||
}
|
||||
|
||||
|
||||
def can_check(a) -> bool:
|
||||
now = TimeUTC.now()
|
||||
|
||||
repetitionBase = a["options"]["currentPeriod"] \
|
||||
if a["detectionMethod"] == schemas.AlertDetectionMethod.CHANGE \
|
||||
and a["options"]["currentPeriod"] > a["options"]["previousPeriod"] \
|
||||
else a["options"]["previousPeriod"]
|
||||
|
||||
if TimeInterval.get(repetitionBase) is None:
|
||||
logger.error(f"repetitionBase: {repetitionBase} NOT FOUND")
|
||||
return False
|
||||
|
||||
return (a["options"]["renotifyInterval"] <= 0 or
|
||||
a["options"].get("lastNotification") is None or
|
||||
a["options"]["lastNotification"] <= 0 or
|
||||
((now - a["options"]["lastNotification"]) > a["options"]["renotifyInterval"] * 60 * 1000)) \
|
||||
and ((now - a["createdAt"]) % (TimeInterval[repetitionBase] * 60 * 1000)) < 60 * 1000
|
||||
|
||||
|
||||
def Build(a):
|
||||
now = TimeUTC.now()
|
||||
|
|
@ -187,11 +132,12 @@ def Build(a):
|
|||
|
||||
|
||||
def process():
|
||||
logger.info("> processing alerts on PG")
|
||||
notifications = []
|
||||
all_alerts = alerts_listener.get_all_alerts()
|
||||
with pg_client.PostgresClient() as cur:
|
||||
for alert in all_alerts:
|
||||
if can_check(alert):
|
||||
if alert_helpers.can_check(alert):
|
||||
query, params = Build(alert)
|
||||
try:
|
||||
query = cur.mogrify(query, params)
|
||||
|
|
@ -207,7 +153,7 @@ def process():
|
|||
result = cur.fetchone()
|
||||
if result["valid"]:
|
||||
logger.info(f"Valid alert, notifying users, alertId:{alert['alertId']} name: {alert['name']}")
|
||||
notifications.append(generate_notification(alert, result))
|
||||
notifications.append(alert_helpers.generate_notification(alert, result))
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"!!!Error while running alert query for alertId:{alert['alertId']} name: {alert['name']}")
|
||||
|
|
@ -221,42 +167,3 @@ def process():
|
|||
WHERE alert_id IN %(ids)s;""", {"ids": tuple([n["alertId"] for n in notifications])}))
|
||||
if len(notifications) > 0:
|
||||
alerts.process_notifications(notifications)
|
||||
|
||||
|
||||
def __format_value(x):
|
||||
if x % 1 == 0:
|
||||
x = int(x)
|
||||
else:
|
||||
x = round(x, 2)
|
||||
return f"{x:,}"
|
||||
|
||||
|
||||
def generate_notification(alert, result):
|
||||
left = __format_value(result['value'])
|
||||
right = __format_value(alert['query']['right'])
|
||||
return {
|
||||
"alertId": alert["alertId"],
|
||||
"tenantId": alert["tenantId"],
|
||||
"title": alert["name"],
|
||||
"description": f"{alert['seriesName']} = {left} ({alert['query']['operator']} {right}).",
|
||||
"buttonText": "Check metrics for more details",
|
||||
"buttonUrl": f"/{alert['projectId']}/metrics",
|
||||
"imageUrl": None,
|
||||
"projectId": alert["projectId"],
|
||||
"projectName": alert["projectName"],
|
||||
"options": {"source": "ALERT", "sourceId": alert["alertId"],
|
||||
"sourceMeta": alert["detectionMethod"],
|
||||
"message": alert["options"]["message"], "projectId": alert["projectId"],
|
||||
"data": {"title": alert["name"],
|
||||
"limitValue": alert["query"]["right"],
|
||||
"actualValue": float(result["value"]) \
|
||||
if isinstance(result["value"], decimal.Decimal) \
|
||||
else result["value"],
|
||||
"operator": alert["query"]["operator"],
|
||||
"trigger": alert["query"]["left"],
|
||||
"alertId": alert["alertId"],
|
||||
"detectionMethod": alert["detectionMethod"],
|
||||
"currentPeriod": alert["options"]["currentPeriod"],
|
||||
"previousPeriod": alert["options"]["previousPeriod"],
|
||||
"createdAt": TimeUTC.now()}},
|
||||
}
|
||||
|
|
@ -1,16 +1,15 @@
|
|||
import logging
|
||||
|
||||
from decouple import config
|
||||
from pydantic_core._pydantic_core import ValidationError
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import alerts
|
||||
from chalicelib.core import alerts_listener, alerts_processor
|
||||
from chalicelib.core import sessions_exp as sessions
|
||||
from chalicelib.utils import pg_client, ch_client, exp_ch_helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.core.alerts import alerts, alerts_listener
|
||||
from chalicelib.core.alerts.modules import alert_helpers
|
||||
from chalicelib.core.sessions import sessions_ch as sessions
|
||||
|
||||
logging.basicConfig(level=config("LOGLEVEL", default=logging.INFO))
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
LeftToDb = {
|
||||
schemas.AlertColumn.PERFORMANCE__DOM_CONTENT_LOADED__AVERAGE: {
|
||||
|
|
@ -53,49 +52,6 @@ LeftToDb = {
|
|||
"formula": "AVG(NULLIF(visually_complete,0))",
|
||||
"eventType": "LOCATION"
|
||||
},
|
||||
schemas.AlertColumn.PERFORMANCE__IMAGE_LOAD_TIME__AVERAGE: {
|
||||
"table": lambda timestamp: f"{exp_ch_helper.get_main_resources_table(timestamp)} AS resources",
|
||||
"formula": "AVG(NULLIF(resources.duration,0))",
|
||||
"condition": "type='img'"
|
||||
},
|
||||
schemas.AlertColumn.PERFORMANCE__REQUEST_LOAD_TIME__AVERAGE: {
|
||||
"table": lambda timestamp: f"{exp_ch_helper.get_main_resources_table(timestamp)} AS resources",
|
||||
"formula": "AVG(NULLIF(resources.duration,0))",
|
||||
"condition": "type='fetch'"
|
||||
},
|
||||
schemas.AlertColumn.RESOURCES__LOAD_TIME__AVERAGE: {
|
||||
"table": lambda timestamp: f"{exp_ch_helper.get_main_resources_table(timestamp)} AS resources",
|
||||
"formula": "AVG(NULLIF(resources.duration,0))"
|
||||
},
|
||||
schemas.AlertColumn.RESOURCES__MISSING__COUNT: {
|
||||
"table": lambda timestamp: f"{exp_ch_helper.get_main_resources_table(timestamp)} AS resources",
|
||||
"formula": "COUNT(DISTINCT url_hostpath)",
|
||||
"condition": "success= FALSE AND type='img'"
|
||||
},
|
||||
schemas.AlertColumn.ERRORS__4XX_5XX__COUNT: {
|
||||
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS requests",
|
||||
"eventType": "REQUEST",
|
||||
"formula": "COUNT(1)",
|
||||
"condition": "intDiv(requests.status, 100)!=2"
|
||||
},
|
||||
schemas.AlertColumn.ERRORS__4XX__COUNT: {
|
||||
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS requests",
|
||||
"eventType": "REQUEST",
|
||||
"formula": "COUNT(1)",
|
||||
"condition": "intDiv(requests.status, 100)==4"
|
||||
},
|
||||
schemas.AlertColumn.ERRORS__5XX__COUNT: {
|
||||
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS requests",
|
||||
"eventType": "REQUEST",
|
||||
"formula": "COUNT(1)",
|
||||
"condition": "intDiv(requests.status, 100)==5"
|
||||
},
|
||||
schemas.AlertColumn.ERRORS__JAVASCRIPT__IMPACTED_SESSIONS__COUNT: {
|
||||
"table": lambda timestamp: f"{exp_ch_helper.get_main_events_table(timestamp)} AS errors",
|
||||
"eventType": "ERROR",
|
||||
"formula": "COUNT(DISTINCT session_id)",
|
||||
"condition": "source='js_exception'"
|
||||
},
|
||||
schemas.AlertColumn.PERFORMANCE__CRASHES__COUNT: {
|
||||
"table": lambda timestamp: f"{exp_ch_helper.get_main_sessions_table(timestamp)} AS sessions",
|
||||
"formula": "COUNT(DISTINCT session_id)",
|
||||
|
|
@ -128,8 +84,8 @@ def Build(a):
|
|||
try:
|
||||
data = schemas.SessionsSearchPayloadSchema.model_validate(a["filter"])
|
||||
except ValidationError:
|
||||
logging.warning("Validation error for:")
|
||||
logging.warning(a["filter"])
|
||||
logger.warning("Validation error for:")
|
||||
logger.warning(a["filter"])
|
||||
raise
|
||||
|
||||
full_args, query_part = sessions.search_query_parts_ch(data=data, error_status=None, errors_only=False,
|
||||
|
|
@ -200,35 +156,36 @@ def Build(a):
|
|||
|
||||
|
||||
def process():
|
||||
logger.info("> processing alerts on CH")
|
||||
notifications = []
|
||||
all_alerts = alerts_listener.get_all_alerts()
|
||||
with pg_client.PostgresClient() as cur, ch_client.ClickHouseClient() as ch_cur:
|
||||
for alert in all_alerts:
|
||||
if alert["query"]["left"] != "CUSTOM":
|
||||
continue
|
||||
if alerts_processor.can_check(alert):
|
||||
if alert_helpers.can_check(alert):
|
||||
query, params = Build(alert)
|
||||
try:
|
||||
query = ch_cur.format(query, params)
|
||||
query = ch_cur.format(query=query, parameters=params)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
logger.error(
|
||||
f"!!!Error while building alert query for alertId:{alert['alertId']} name: {alert['name']}")
|
||||
logging.error(e)
|
||||
logger.error(e)
|
||||
continue
|
||||
logging.debug(alert)
|
||||
logging.debug(query)
|
||||
logger.debug(alert)
|
||||
logger.debug(query)
|
||||
try:
|
||||
result = ch_cur.execute(query)
|
||||
result = ch_cur.execute(query=query)
|
||||
if len(result) > 0:
|
||||
result = result[0]
|
||||
|
||||
if result["valid"]:
|
||||
logging.info("Valid alert, notifying users")
|
||||
notifications.append(alerts_processor.generate_notification(alert, result))
|
||||
logger.info("Valid alert, notifying users")
|
||||
notifications.append(alert_helpers.generate_notification(alert, result))
|
||||
except Exception as e:
|
||||
logging.error(f"!!!Error while running alert query for alertId:{alert['alertId']}")
|
||||
logging.error(str(e))
|
||||
logging.error(query)
|
||||
logger.error(f"!!!Error while running alert query for alertId:{alert['alertId']}")
|
||||
logger.error(str(e))
|
||||
logger.error(query)
|
||||
if len(notifications) > 0:
|
||||
cur.execute(
|
||||
cur.mogrify(f"""UPDATE public.alerts
|
||||
3
api/chalicelib/core/alerts/modules/__init__.py
Normal file
3
api/chalicelib/core/alerts/modules/__init__.py
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
TENANT_ID = "-1"
|
||||
|
||||
from . import helpers as alert_helpers
|
||||
74
api/chalicelib/core/alerts/modules/helpers.py
Normal file
74
api/chalicelib/core/alerts/modules/helpers.py
Normal file
|
|
@ -0,0 +1,74 @@
|
|||
import decimal
|
||||
import logging
|
||||
|
||||
import schemas
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
# This is the frequency of execution for each threshold
|
||||
TimeInterval = {
|
||||
15: 3,
|
||||
30: 5,
|
||||
60: 10,
|
||||
120: 20,
|
||||
240: 30,
|
||||
1440: 60,
|
||||
}
|
||||
|
||||
|
||||
def __format_value(x):
|
||||
if x % 1 == 0:
|
||||
x = int(x)
|
||||
else:
|
||||
x = round(x, 2)
|
||||
return f"{x:,}"
|
||||
|
||||
|
||||
def can_check(a) -> bool:
|
||||
now = TimeUTC.now()
|
||||
|
||||
repetitionBase = a["options"]["currentPeriod"] \
|
||||
if a["detectionMethod"] == schemas.AlertDetectionMethod.CHANGE \
|
||||
and a["options"]["currentPeriod"] > a["options"]["previousPeriod"] \
|
||||
else a["options"]["previousPeriod"]
|
||||
|
||||
if TimeInterval.get(repetitionBase) is None:
|
||||
logger.error(f"repetitionBase: {repetitionBase} NOT FOUND")
|
||||
return False
|
||||
|
||||
return (a["options"]["renotifyInterval"] <= 0 or
|
||||
a["options"].get("lastNotification") is None or
|
||||
a["options"]["lastNotification"] <= 0 or
|
||||
((now - a["options"]["lastNotification"]) > a["options"]["renotifyInterval"] * 60 * 1000)) \
|
||||
and ((now - a["createdAt"]) % (TimeInterval[repetitionBase] * 60 * 1000)) < 60 * 1000
|
||||
|
||||
|
||||
def generate_notification(alert, result):
|
||||
left = __format_value(result['value'])
|
||||
right = __format_value(alert['query']['right'])
|
||||
return {
|
||||
"alertId": alert["alertId"],
|
||||
"tenantId": alert["tenantId"],
|
||||
"title": alert["name"],
|
||||
"description": f"{alert['seriesName']} = {left} ({alert['query']['operator']} {right}).",
|
||||
"buttonText": "Check metrics for more details",
|
||||
"buttonUrl": f"/{alert['projectId']}/metrics",
|
||||
"imageUrl": None,
|
||||
"projectId": alert["projectId"],
|
||||
"projectName": alert["projectName"],
|
||||
"options": {"source": "ALERT", "sourceId": alert["alertId"],
|
||||
"sourceMeta": alert["detectionMethod"],
|
||||
"message": alert["options"]["message"], "projectId": alert["projectId"],
|
||||
"data": {"title": alert["name"],
|
||||
"limitValue": alert["query"]["right"],
|
||||
"actualValue": float(result["value"]) \
|
||||
if isinstance(result["value"], decimal.Decimal) \
|
||||
else result["value"],
|
||||
"operator": alert["query"]["operator"],
|
||||
"trigger": alert["query"]["left"],
|
||||
"alertId": alert["alertId"],
|
||||
"detectionMethod": alert["detectionMethod"],
|
||||
"currentPeriod": alert["options"]["currentPeriod"],
|
||||
"previousPeriod": alert["options"]["previousPeriod"],
|
||||
"createdAt": TimeUTC.now()}},
|
||||
}
|
||||
|
|
@ -1,32 +0,0 @@
|
|||
from chalicelib.utils import pg_client, helper
|
||||
|
||||
|
||||
def get_all_alerts():
|
||||
with pg_client.PostgresClient(long_query=True) as cur:
|
||||
query = """SELECT -1 AS tenant_id,
|
||||
alert_id,
|
||||
projects.project_id,
|
||||
projects.name AS project_name,
|
||||
detection_method,
|
||||
query,
|
||||
options,
|
||||
(EXTRACT(EPOCH FROM alerts.created_at) * 1000)::BIGINT AS created_at,
|
||||
alerts.name,
|
||||
alerts.series_id,
|
||||
filter,
|
||||
change,
|
||||
COALESCE(metrics.name || '.' || (COALESCE(metric_series.name, 'series ' || index)) || '.count',
|
||||
query ->> 'left') AS series_name
|
||||
FROM public.alerts
|
||||
INNER JOIN projects USING (project_id)
|
||||
LEFT JOIN metric_series USING (series_id)
|
||||
LEFT JOIN metrics USING (metric_id)
|
||||
WHERE alerts.deleted_at ISNULL
|
||||
AND alerts.active
|
||||
AND projects.active
|
||||
AND projects.deleted_at ISNULL
|
||||
AND (alerts.series_id ISNULL OR metric_series.deleted_at ISNULL)
|
||||
ORDER BY alerts.created_at;"""
|
||||
cur.execute(query=query)
|
||||
all_alerts = helper.list_to_camel_case(cur.fetchall())
|
||||
return all_alerts
|
||||
|
|
@ -1,3 +1,4 @@
|
|||
import logging
|
||||
from os import access, R_OK
|
||||
from os.path import exists as path_exists, getsize
|
||||
|
||||
|
|
@ -10,22 +11,10 @@ import schemas
|
|||
from chalicelib.core import projects
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
ASSIST_KEY = config("ASSIST_KEY")
|
||||
ASSIST_URL = config("ASSIST_URL") % ASSIST_KEY
|
||||
SESSION_PROJECTION_COLS = """s.project_id,
|
||||
s.session_id::text AS session_id,
|
||||
s.user_uuid,
|
||||
s.user_id,
|
||||
s.user_agent,
|
||||
s.user_os,
|
||||
s.user_browser,
|
||||
s.user_device,
|
||||
s.user_device_type,
|
||||
s.user_country,
|
||||
s.start_ts,
|
||||
s.user_anonymous_id,
|
||||
s.platform
|
||||
"""
|
||||
|
||||
|
||||
def get_live_sessions_ws_user_id(project_id, user_id):
|
||||
|
|
@ -66,21 +55,21 @@ def __get_live_sessions_ws(project_id, data):
|
|||
results = requests.post(ASSIST_URL + config("assist") + f"/{project_key}",
|
||||
json=data, timeout=config("assistTimeout", cast=int, default=5))
|
||||
if results.status_code != 200:
|
||||
print(f"!! issue with the peer-server code:{results.status_code} for __get_live_sessions_ws")
|
||||
print(results.text)
|
||||
logger.error(f"!! issue with the peer-server code:{results.status_code} for __get_live_sessions_ws")
|
||||
logger.error(results.text)
|
||||
return {"total": 0, "sessions": []}
|
||||
live_peers = results.json().get("data", [])
|
||||
except requests.exceptions.Timeout:
|
||||
print("!! Timeout getting Assist response")
|
||||
logger.error("!! Timeout getting Assist response")
|
||||
live_peers = {"total": 0, "sessions": []}
|
||||
except Exception as e:
|
||||
print("!! Issue getting Live-Assist response")
|
||||
print(str(e))
|
||||
print("expected JSON, received:")
|
||||
logger.error("!! Issue getting Live-Assist response")
|
||||
logger.exception(e)
|
||||
logger.error("expected JSON, received:")
|
||||
try:
|
||||
print(results.text)
|
||||
logger.error(results.text)
|
||||
except:
|
||||
print("couldn't get response")
|
||||
logger.error("couldn't get response")
|
||||
live_peers = {"total": 0, "sessions": []}
|
||||
_live_peers = live_peers
|
||||
if "sessions" in live_peers:
|
||||
|
|
@ -106,7 +95,7 @@ def __get_agent_token(project_id, project_key, session_id):
|
|||
"aud": f"openreplay:agent"
|
||||
},
|
||||
key=config("ASSIST_JWT_SECRET"),
|
||||
algorithm=config("jwt_algorithm")
|
||||
algorithm=config("JWT_ALGORITHM")
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -116,8 +105,8 @@ def get_live_session_by_id(project_id, session_id):
|
|||
results = requests.get(ASSIST_URL + config("assist") + f"/{project_key}/{session_id}",
|
||||
timeout=config("assistTimeout", cast=int, default=5))
|
||||
if results.status_code != 200:
|
||||
print(f"!! issue with the peer-server code:{results.status_code} for get_live_session_by_id")
|
||||
print(results.text)
|
||||
logger.error(f"!! issue with the peer-server code:{results.status_code} for get_live_session_by_id")
|
||||
logger.error(results.text)
|
||||
return None
|
||||
results = results.json().get("data")
|
||||
if results is None:
|
||||
|
|
@ -125,16 +114,16 @@ def get_live_session_by_id(project_id, session_id):
|
|||
results["live"] = True
|
||||
results["agentToken"] = __get_agent_token(project_id=project_id, project_key=project_key, session_id=session_id)
|
||||
except requests.exceptions.Timeout:
|
||||
print("!! Timeout getting Assist response")
|
||||
logger.error("!! Timeout getting Assist response")
|
||||
return None
|
||||
except Exception as e:
|
||||
print("!! Issue getting Assist response")
|
||||
print(str(e))
|
||||
print("expected JSON, received:")
|
||||
logger.error("!! Issue getting Assist response")
|
||||
logger.exception(e)
|
||||
logger.error("expected JSON, received:")
|
||||
try:
|
||||
print(results.text)
|
||||
logger.error(results.text)
|
||||
except:
|
||||
print("couldn't get response")
|
||||
logger.error("couldn't get response")
|
||||
return None
|
||||
return results
|
||||
|
||||
|
|
@ -146,21 +135,21 @@ def is_live(project_id, session_id, project_key=None):
|
|||
results = requests.get(ASSIST_URL + config("assistList") + f"/{project_key}/{session_id}",
|
||||
timeout=config("assistTimeout", cast=int, default=5))
|
||||
if results.status_code != 200:
|
||||
print(f"!! issue with the peer-server code:{results.status_code} for is_live")
|
||||
print(results.text)
|
||||
logger.error(f"!! issue with the peer-server code:{results.status_code} for is_live")
|
||||
logger.error(results.text)
|
||||
return False
|
||||
results = results.json().get("data")
|
||||
except requests.exceptions.Timeout:
|
||||
print("!! Timeout getting Assist response")
|
||||
logger.error("!! Timeout getting Assist response")
|
||||
return False
|
||||
except Exception as e:
|
||||
print("!! Issue getting Assist response")
|
||||
print(str(e))
|
||||
print("expected JSON, received:")
|
||||
logger.error("!! Issue getting Assist response")
|
||||
logger.exception(e)
|
||||
logger.error("expected JSON, received:")
|
||||
try:
|
||||
print(results.text)
|
||||
logger.error(results.text)
|
||||
except:
|
||||
print("couldn't get response")
|
||||
logger.error("couldn't get response")
|
||||
return False
|
||||
return str(session_id) == results
|
||||
|
||||
|
|
@ -175,32 +164,27 @@ def autocomplete(project_id, q: str, key: str = None):
|
|||
ASSIST_URL + config("assistList") + f"/{project_key}/autocomplete",
|
||||
params=params, timeout=config("assistTimeout", cast=int, default=5))
|
||||
if results.status_code != 200:
|
||||
print(f"!! issue with the peer-server code:{results.status_code} for autocomplete")
|
||||
print(results.text)
|
||||
logger.error(f"!! issue with the peer-server code:{results.status_code} for autocomplete")
|
||||
logger.error(results.text)
|
||||
return {"errors": [f"Something went wrong wile calling assist:{results.text}"]}
|
||||
results = results.json().get("data", [])
|
||||
except requests.exceptions.Timeout:
|
||||
print("!! Timeout getting Assist response")
|
||||
logger.error("!! Timeout getting Assist response")
|
||||
return {"errors": ["Assist request timeout"]}
|
||||
except Exception as e:
|
||||
print("!! Issue getting Assist response")
|
||||
print(str(e))
|
||||
print("expected JSON, received:")
|
||||
logger.error("!! Issue getting Assist response")
|
||||
logger.exception(e)
|
||||
logger.error("expected JSON, received:")
|
||||
try:
|
||||
print(results.text)
|
||||
logger.error(results.text)
|
||||
except:
|
||||
print("couldn't get response")
|
||||
logger.error("couldn't get response")
|
||||
return {"errors": ["Something went wrong wile calling assist"]}
|
||||
for r in results:
|
||||
r["type"] = __change_keys(r["type"])
|
||||
return {"data": results}
|
||||
|
||||
|
||||
def get_ice_servers():
|
||||
return config("iceServers") if config("iceServers", default=None) is not None \
|
||||
and len(config("iceServers")) > 0 else None
|
||||
|
||||
|
||||
def __get_efs_path():
|
||||
efs_path = config("FS_DIR")
|
||||
if not path_exists(efs_path):
|
||||
|
|
@ -258,24 +242,24 @@ def session_exists(project_id, session_id):
|
|||
results = requests.get(ASSIST_URL + config("assist") + f"/{project_key}/{session_id}",
|
||||
timeout=config("assistTimeout", cast=int, default=5))
|
||||
if results.status_code != 200:
|
||||
print(f"!! issue with the peer-server code:{results.status_code} for session_exists")
|
||||
print(results.text)
|
||||
logger.error(f"!! issue with the peer-server code:{results.status_code} for session_exists")
|
||||
logger.error(results.text)
|
||||
return None
|
||||
results = results.json().get("data")
|
||||
if results is None:
|
||||
return False
|
||||
return True
|
||||
except requests.exceptions.Timeout:
|
||||
print("!! Timeout getting Assist response")
|
||||
logger.error("!! Timeout getting Assist response")
|
||||
return False
|
||||
except Exception as e:
|
||||
print("!! Issue getting Assist response")
|
||||
print(str(e))
|
||||
print("expected JSON, received:")
|
||||
logger.error("!! Issue getting Assist response")
|
||||
logger.exception(e)
|
||||
logger.error("expected JSON, received:")
|
||||
try:
|
||||
print(results.text)
|
||||
logger.error(results.text)
|
||||
except:
|
||||
print("couldn't get response")
|
||||
logger.error("couldn't get response")
|
||||
return False
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ def is_spot_token(token: str) -> bool:
|
|||
audience = decoded_token.get("aud")
|
||||
return audience == spot.AUDIENCE
|
||||
except jwt.InvalidTokenError:
|
||||
logger.error(f"Invalid token: {token}")
|
||||
logger.error(f"Invalid token for is_spot_token: {token}")
|
||||
raise
|
||||
|
||||
|
||||
|
|
@ -29,16 +29,15 @@ def jwt_authorizer(scheme: str, token: str, leeway=0) -> dict | None:
|
|||
return None
|
||||
try:
|
||||
payload = jwt.decode(jwt=token,
|
||||
key=config("jwt_secret") if not is_spot_token(token) else config("JWT_SPOT_SECRET"),
|
||||
algorithms=config("jwt_algorithm"),
|
||||
key=config("JWT_SECRET") if not is_spot_token(token) else config("JWT_SPOT_SECRET"),
|
||||
algorithms=config("JWT_ALGORITHM"),
|
||||
audience=get_supported_audience(),
|
||||
leeway=leeway)
|
||||
except jwt.ExpiredSignatureError:
|
||||
logger.debug("! JWT Expired signature")
|
||||
return None
|
||||
except BaseException as e:
|
||||
logger.warning("! JWT Base Exception")
|
||||
logger.debug(e)
|
||||
logger.warning("! JWT Base Exception", exc_info=e)
|
||||
return None
|
||||
return payload
|
||||
|
||||
|
|
@ -49,15 +48,14 @@ def jwt_refresh_authorizer(scheme: str, token: str):
|
|||
try:
|
||||
payload = jwt.decode(jwt=token,
|
||||
key=config("JWT_REFRESH_SECRET") if not is_spot_token(token) \
|
||||
else config("JWT_SPOT_SECRET"),
|
||||
algorithms=config("jwt_algorithm"),
|
||||
else config("JWT_SPOT_REFRESH_SECRET"),
|
||||
algorithms=config("JWT_ALGORITHM"),
|
||||
audience=get_supported_audience())
|
||||
except jwt.ExpiredSignatureError:
|
||||
logger.debug("! JWT-refresh Expired signature")
|
||||
return None
|
||||
except BaseException as e:
|
||||
logger.warning("! JWT-refresh Base Exception")
|
||||
logger.debug(e)
|
||||
logger.error("! JWT-refresh Base Exception", exc_info=e)
|
||||
return None
|
||||
return payload
|
||||
|
||||
|
|
@ -73,8 +71,8 @@ def generate_jwt(user_id, tenant_id, iat, aud, for_spot=False):
|
|||
"iat": iat,
|
||||
"aud": aud
|
||||
},
|
||||
key=config("jwt_secret") if not for_spot else config("JWT_SPOT_SECRET"),
|
||||
algorithm=config("jwt_algorithm")
|
||||
key=config("JWT_SECRET") if not for_spot else config("JWT_SPOT_SECRET"),
|
||||
algorithm=config("JWT_ALGORITHM")
|
||||
)
|
||||
return token
|
||||
|
||||
|
|
@ -92,7 +90,7 @@ def generate_jwt_refresh(user_id, tenant_id, iat, aud, jwt_jti, for_spot=False):
|
|||
"jti": jwt_jti
|
||||
},
|
||||
key=config("JWT_REFRESH_SECRET") if not for_spot else config("JWT_SPOT_REFRESH_SECRET"),
|
||||
algorithm=config("jwt_algorithm")
|
||||
algorithm=config("JWT_ALGORITHM")
|
||||
)
|
||||
return token
|
||||
|
||||
|
|
|
|||
|
|
@ -61,11 +61,11 @@ def __get_autocomplete_table(value, project_id):
|
|||
try:
|
||||
cur.execute(query)
|
||||
except Exception as err:
|
||||
print("--------- AUTOCOMPLETE SEARCH QUERY EXCEPTION -----------")
|
||||
print(query.decode('UTF-8'))
|
||||
print("--------- VALUE -----------")
|
||||
print(value)
|
||||
print("--------------------")
|
||||
logger.exception("--------- AUTOCOMPLETE SEARCH QUERY EXCEPTION -----------")
|
||||
logger.exception(query.decode('UTF-8'))
|
||||
logger.exception("--------- VALUE -----------")
|
||||
logger.exception(value)
|
||||
logger.exception("--------------------")
|
||||
raise err
|
||||
results = cur.fetchall()
|
||||
for r in results:
|
||||
|
|
@ -85,7 +85,8 @@ def __generic_query(typename, value_length=None):
|
|||
ORDER BY value"""
|
||||
|
||||
if value_length is None or value_length > 2:
|
||||
return f"""(SELECT DISTINCT value, type
|
||||
return f"""SELECT DISTINCT ON(value,type) value, type
|
||||
((SELECT DISTINCT value, type
|
||||
FROM {TABLE}
|
||||
WHERE
|
||||
project_id = %(project_id)s
|
||||
|
|
@ -101,7 +102,7 @@ def __generic_query(typename, value_length=None):
|
|||
AND type='{typename.upper()}'
|
||||
AND value ILIKE %(value)s
|
||||
ORDER BY value
|
||||
LIMIT 5);"""
|
||||
LIMIT 5)) AS raw;"""
|
||||
return f"""SELECT DISTINCT value, type
|
||||
FROM {TABLE}
|
||||
WHERE
|
||||
|
|
@ -124,7 +125,7 @@ def __generic_autocomplete(event: Event):
|
|||
return f
|
||||
|
||||
|
||||
def __generic_autocomplete_metas(typename):
|
||||
def generic_autocomplete_metas(typename):
|
||||
def f(project_id, text):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
params = {"project_id": project_id, "value": helper.string_to_sql_like(text),
|
||||
|
|
@ -326,7 +327,7 @@ def __search_metadata(project_id, value, key=None, source=None):
|
|||
AND {colname} ILIKE %(svalue)s LIMIT 5)""")
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(cur.mogrify(f"""\
|
||||
SELECT key, value, 'METADATA' AS TYPE
|
||||
SELECT DISTINCT ON(key, value) key, value, 'METADATA' AS TYPE
|
||||
FROM({" UNION ALL ".join(sub_from)}) AS all_metas
|
||||
LIMIT 5;""", {"project_id": project_id, "value": helper.string_to_sql_like(value),
|
||||
"svalue": helper.string_to_sql_like("^" + value)}))
|
||||
|
|
@ -1,7 +1,8 @@
|
|||
from chalicelib.utils import pg_client
|
||||
from chalicelib.core import projects, log_tool_datadog, log_tool_stackdriver, log_tool_sentry
|
||||
|
||||
from chalicelib.core import projects
|
||||
from chalicelib.core import users
|
||||
from chalicelib.core.log_tools import datadog, stackdriver, sentry
|
||||
from chalicelib.core.modules import TENANT_CONDITION
|
||||
from chalicelib.utils import pg_client
|
||||
|
||||
|
||||
def get_state(tenant_id):
|
||||
|
|
@ -12,47 +13,61 @@ def get_state(tenant_id):
|
|||
|
||||
if len(pids) > 0:
|
||||
cur.execute(
|
||||
cur.mogrify("""SELECT EXISTS(( SELECT 1
|
||||
cur.mogrify(
|
||||
"""SELECT EXISTS(( SELECT 1
|
||||
FROM public.sessions AS s
|
||||
WHERE s.project_id IN %(ids)s)) AS exists;""",
|
||||
{"ids": tuple(pids)})
|
||||
{"ids": tuple(pids)},
|
||||
)
|
||||
)
|
||||
recorded = cur.fetchone()["exists"]
|
||||
meta = False
|
||||
if recorded:
|
||||
cur.execute("""SELECT EXISTS((SELECT 1
|
||||
query = cur.mogrify(
|
||||
f"""SELECT EXISTS((SELECT 1
|
||||
FROM public.projects AS p
|
||||
LEFT JOIN LATERAL ( SELECT 1
|
||||
FROM public.sessions
|
||||
WHERE sessions.project_id = p.project_id
|
||||
AND sessions.user_id IS NOT NULL
|
||||
LIMIT 1) AS sessions(user_id) ON (TRUE)
|
||||
WHERE p.deleted_at ISNULL
|
||||
WHERE {TENANT_CONDITION} AND p.deleted_at ISNULL
|
||||
AND ( sessions.user_id IS NOT NULL OR p.metadata_1 IS NOT NULL
|
||||
OR p.metadata_2 IS NOT NULL OR p.metadata_3 IS NOT NULL
|
||||
OR p.metadata_4 IS NOT NULL OR p.metadata_5 IS NOT NULL
|
||||
OR p.metadata_6 IS NOT NULL OR p.metadata_7 IS NOT NULL
|
||||
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
|
||||
OR p.metadata_10 IS NOT NULL )
|
||||
)) AS exists;""")
|
||||
)) AS exists;""",
|
||||
{"tenant_id": tenant_id},
|
||||
)
|
||||
cur.execute(query)
|
||||
|
||||
meta = cur.fetchone()["exists"]
|
||||
|
||||
return [
|
||||
{"task": "Install OpenReplay",
|
||||
"done": recorded,
|
||||
"URL": "https://docs.openreplay.com/getting-started/quick-start"},
|
||||
{"task": "Identify Users",
|
||||
"done": meta,
|
||||
"URL": "https://docs.openreplay.com/data-privacy-security/metadata"},
|
||||
{"task": "Invite Team Members",
|
||||
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
|
||||
"URL": "https://app.openreplay.com/client/manage-users"},
|
||||
{"task": "Integrations",
|
||||
"done": len(log_tool_datadog.get_all(tenant_id=tenant_id)) > 0 \
|
||||
or len(log_tool_sentry.get_all(tenant_id=tenant_id)) > 0 \
|
||||
or len(log_tool_stackdriver.get_all(tenant_id=tenant_id)) > 0,
|
||||
"URL": "https://docs.openreplay.com/integrations"}
|
||||
{
|
||||
"task": "Install OpenReplay",
|
||||
"done": recorded,
|
||||
"URL": "https://docs.openreplay.com/getting-started/quick-start",
|
||||
},
|
||||
{
|
||||
"task": "Identify Users",
|
||||
"done": meta,
|
||||
"URL": "https://docs.openreplay.com/data-privacy-security/metadata",
|
||||
},
|
||||
{
|
||||
"task": "Invite Team Members",
|
||||
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
|
||||
"URL": "https://app.openreplay.com/client/manage-users",
|
||||
},
|
||||
{
|
||||
"task": "Integrations",
|
||||
"done": len(datadog.get_all(tenant_id=tenant_id)) > 0
|
||||
or len(sentry.get_all(tenant_id=tenant_id)) > 0
|
||||
or len(stackdriver.get_all(tenant_id=tenant_id)) > 0,
|
||||
"URL": "https://docs.openreplay.com/integrations",
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
|
|
@ -63,52 +78,66 @@ def get_state_installing(tenant_id):
|
|||
|
||||
if len(pids) > 0:
|
||||
cur.execute(
|
||||
cur.mogrify("""SELECT EXISTS(( SELECT 1
|
||||
cur.mogrify(
|
||||
"""SELECT EXISTS(( SELECT 1
|
||||
FROM public.sessions AS s
|
||||
WHERE s.project_id IN %(ids)s)) AS exists;""",
|
||||
{"ids": tuple(pids)})
|
||||
{"ids": tuple(pids)},
|
||||
)
|
||||
)
|
||||
recorded = cur.fetchone()["exists"]
|
||||
|
||||
return {"task": "Install OpenReplay",
|
||||
"done": recorded,
|
||||
"URL": "https://docs.openreplay.com/getting-started/quick-start"}
|
||||
return {
|
||||
"task": "Install OpenReplay",
|
||||
"done": recorded,
|
||||
"URL": "https://docs.openreplay.com/getting-started/quick-start",
|
||||
}
|
||||
|
||||
|
||||
def get_state_identify_users(tenant_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute("""SELECT EXISTS((SELECT 1
|
||||
query = cur.mogrify(
|
||||
f"""SELECT EXISTS((SELECT 1
|
||||
FROM public.projects AS p
|
||||
LEFT JOIN LATERAL ( SELECT 1
|
||||
FROM public.sessions
|
||||
WHERE sessions.project_id = p.project_id
|
||||
AND sessions.user_id IS NOT NULL
|
||||
LIMIT 1) AS sessions(user_id) ON (TRUE)
|
||||
WHERE p.deleted_at ISNULL
|
||||
WHERE {TENANT_CONDITION} AND p.deleted_at ISNULL
|
||||
AND ( sessions.user_id IS NOT NULL OR p.metadata_1 IS NOT NULL
|
||||
OR p.metadata_2 IS NOT NULL OR p.metadata_3 IS NOT NULL
|
||||
OR p.metadata_4 IS NOT NULL OR p.metadata_5 IS NOT NULL
|
||||
OR p.metadata_6 IS NOT NULL OR p.metadata_7 IS NOT NULL
|
||||
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
|
||||
OR p.metadata_10 IS NOT NULL )
|
||||
)) AS exists;""")
|
||||
)) AS exists;""",
|
||||
{"tenant_id": tenant_id},
|
||||
)
|
||||
cur.execute(query)
|
||||
|
||||
meta = cur.fetchone()["exists"]
|
||||
|
||||
return {"task": "Identify Users",
|
||||
"done": meta,
|
||||
"URL": "https://docs.openreplay.com/data-privacy-security/metadata"}
|
||||
return {
|
||||
"task": "Identify Users",
|
||||
"done": meta,
|
||||
"URL": "https://docs.openreplay.com/data-privacy-security/metadata",
|
||||
}
|
||||
|
||||
|
||||
def get_state_manage_users(tenant_id):
|
||||
return {"task": "Invite Team Members",
|
||||
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
|
||||
"URL": "https://app.openreplay.com/client/manage-users"}
|
||||
return {
|
||||
"task": "Invite Team Members",
|
||||
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
|
||||
"URL": "https://app.openreplay.com/client/manage-users",
|
||||
}
|
||||
|
||||
|
||||
def get_state_integrations(tenant_id):
|
||||
return {"task": "Integrations",
|
||||
"done": len(log_tool_datadog.get_all(tenant_id=tenant_id)) > 0 \
|
||||
or len(log_tool_sentry.get_all(tenant_id=tenant_id)) > 0 \
|
||||
or len(log_tool_stackdriver.get_all(tenant_id=tenant_id)) > 0,
|
||||
"URL": "https://docs.openreplay.com/integrations"}
|
||||
return {
|
||||
"task": "Integrations",
|
||||
"done": len(datadog.get_all(tenant_id=tenant_id)) > 0
|
||||
or len(sentry.get_all(tenant_id=tenant_id)) > 0
|
||||
or len(stackdriver.get_all(tenant_id=tenant_id)) > 0,
|
||||
"URL": "https://docs.openreplay.com/integrations",
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
from chalicelib.utils import pg_client, helper
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils.storage import StorageClient
|
||||
from decouple import config
|
||||
|
||||
|
|
|
|||
1
api/chalicelib/core/collaborations/__init__.py
Normal file
1
api/chalicelib/core/collaborations/__init__.py
Normal file
|
|
@ -0,0 +1 @@
|
|||
from . import collaboration_base as _
|
||||
|
|
@ -6,7 +6,7 @@ from fastapi import HTTPException, status
|
|||
|
||||
import schemas
|
||||
from chalicelib.core import webhook
|
||||
from chalicelib.core.collaboration_base import BaseCollaboration
|
||||
from chalicelib.core.collaborations.collaboration_base import BaseCollaboration
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -26,17 +26,23 @@ class MSTeams(BaseCollaboration):
|
|||
|
||||
@classmethod
|
||||
def say_hello(cls, url):
|
||||
r = requests.post(
|
||||
url=url,
|
||||
json={
|
||||
"@type": "MessageCard",
|
||||
"@context": "https://schema.org/extensions",
|
||||
"summary": "Welcome to OpenReplay",
|
||||
"title": "Welcome to OpenReplay"
|
||||
})
|
||||
if r.status_code != 200:
|
||||
logger.warning("MSTeams integration failed")
|
||||
logger.warning(r.text)
|
||||
try:
|
||||
r = requests.post(
|
||||
url=url,
|
||||
json={
|
||||
"@type": "MessageCard",
|
||||
"@context": "https://schema.org/extensions",
|
||||
"summary": "Welcome to OpenReplay",
|
||||
"title": "Welcome to OpenReplay"
|
||||
},
|
||||
timeout=3)
|
||||
if r.status_code != 200:
|
||||
logger.warning("MSTeams integration failed")
|
||||
logger.warning(r.text)
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.warning("!!! MSTeams integration failed")
|
||||
logger.exception(e)
|
||||
return False
|
||||
return True
|
||||
|
||||
|
|
@ -6,7 +6,7 @@ from fastapi import HTTPException, status
|
|||
|
||||
import schemas
|
||||
from chalicelib.core import webhook
|
||||
from chalicelib.core.collaboration_base import BaseCollaboration
|
||||
from chalicelib.core.collaborations.collaboration_base import BaseCollaboration
|
||||
|
||||
|
||||
class Slack(BaseCollaboration):
|
||||
|
|
@ -1,59 +0,0 @@
|
|||
import logging
|
||||
from typing import Union
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import metrics
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_metric(key: Union[schemas.MetricOfWebVitals, schemas.MetricOfErrors, \
|
||||
schemas.MetricOfPerformance, schemas.MetricOfResources], project_id: int, data: dict):
|
||||
supported = {schemas.MetricOfWebVitals.COUNT_SESSIONS: metrics.get_processed_sessions,
|
||||
schemas.MetricOfWebVitals.AVG_IMAGE_LOAD_TIME: metrics.get_application_activity_avg_image_load_time,
|
||||
schemas.MetricOfWebVitals.AVG_PAGE_LOAD_TIME: metrics.get_application_activity_avg_page_load_time,
|
||||
schemas.MetricOfWebVitals.AVG_REQUEST_LOAD_TIME: metrics.get_application_activity_avg_request_load_time,
|
||||
schemas.MetricOfWebVitals.AVG_DOM_CONTENT_LOAD_START: metrics.get_page_metrics_avg_dom_content_load_start,
|
||||
schemas.MetricOfWebVitals.AVG_FIRST_CONTENTFUL_PIXEL: metrics.get_page_metrics_avg_first_contentful_pixel,
|
||||
schemas.MetricOfWebVitals.AVG_VISITED_PAGES: metrics.get_user_activity_avg_visited_pages,
|
||||
schemas.MetricOfWebVitals.AVG_SESSION_DURATION: metrics.get_user_activity_avg_session_duration,
|
||||
schemas.MetricOfWebVitals.AVG_PAGES_DOM_BUILDTIME: metrics.get_pages_dom_build_time,
|
||||
schemas.MetricOfWebVitals.AVG_PAGES_RESPONSE_TIME: metrics.get_pages_response_time,
|
||||
schemas.MetricOfWebVitals.AVG_RESPONSE_TIME: metrics.get_top_metrics_avg_response_time,
|
||||
schemas.MetricOfWebVitals.AVG_FIRST_PAINT: metrics.get_top_metrics_avg_first_paint,
|
||||
schemas.MetricOfWebVitals.AVG_DOM_CONTENT_LOADED: metrics.get_top_metrics_avg_dom_content_loaded,
|
||||
schemas.MetricOfWebVitals.AVG_TILL_FIRST_BYTE: metrics.get_top_metrics_avg_till_first_bit,
|
||||
schemas.MetricOfWebVitals.AVG_TIME_TO_INTERACTIVE: metrics.get_top_metrics_avg_time_to_interactive,
|
||||
schemas.MetricOfWebVitals.COUNT_REQUESTS: metrics.get_top_metrics_count_requests,
|
||||
schemas.MetricOfWebVitals.AVG_TIME_TO_RENDER: metrics.get_time_to_render,
|
||||
schemas.MetricOfWebVitals.AVG_USED_JS_HEAP_SIZE: metrics.get_memory_consumption,
|
||||
schemas.MetricOfWebVitals.AVG_CPU: metrics.get_avg_cpu,
|
||||
schemas.MetricOfWebVitals.AVG_FPS: metrics.get_avg_fps,
|
||||
schemas.MetricOfErrors.IMPACTED_SESSIONS_BY_JS_ERRORS: metrics.get_impacted_sessions_by_js_errors,
|
||||
schemas.MetricOfErrors.DOMAINS_ERRORS_4XX: metrics.get_domains_errors_4xx,
|
||||
schemas.MetricOfErrors.DOMAINS_ERRORS_5XX: metrics.get_domains_errors_5xx,
|
||||
schemas.MetricOfErrors.ERRORS_PER_DOMAINS: metrics.get_errors_per_domains,
|
||||
schemas.MetricOfErrors.CALLS_ERRORS: metrics.get_calls_errors,
|
||||
schemas.MetricOfErrors.ERRORS_PER_TYPE: metrics.get_errors_per_type,
|
||||
schemas.MetricOfErrors.RESOURCES_BY_PARTY: metrics.get_resources_by_party,
|
||||
schemas.MetricOfPerformance.SPEED_LOCATION: metrics.get_speed_index_location,
|
||||
schemas.MetricOfPerformance.SLOWEST_DOMAINS: metrics.get_slowest_domains,
|
||||
schemas.MetricOfPerformance.SESSIONS_PER_BROWSER: metrics.get_sessions_per_browser,
|
||||
schemas.MetricOfPerformance.TIME_TO_RENDER: metrics.get_time_to_render,
|
||||
schemas.MetricOfPerformance.IMPACTED_SESSIONS_BY_SLOW_PAGES: metrics.get_impacted_sessions_by_slow_pages,
|
||||
schemas.MetricOfPerformance.MEMORY_CONSUMPTION: metrics.get_memory_consumption,
|
||||
schemas.MetricOfPerformance.CPU: metrics.get_avg_cpu,
|
||||
schemas.MetricOfPerformance.FPS: metrics.get_avg_fps,
|
||||
schemas.MetricOfPerformance.CRASHES: metrics.get_crashes,
|
||||
schemas.MetricOfPerformance.RESOURCES_VS_VISUALLY_COMPLETE: metrics.get_resources_vs_visually_complete,
|
||||
schemas.MetricOfPerformance.PAGES_DOM_BUILDTIME: metrics.get_pages_dom_build_time,
|
||||
schemas.MetricOfPerformance.PAGES_RESPONSE_TIME: metrics.get_pages_response_time,
|
||||
schemas.MetricOfPerformance.PAGES_RESPONSE_TIME_DISTRIBUTION: metrics.get_pages_response_time_distribution,
|
||||
schemas.MetricOfResources.MISSING_RESOURCES: metrics.get_missing_resources_trend,
|
||||
schemas.MetricOfResources.SLOWEST_RESOURCES: metrics.get_slowest_resources,
|
||||
schemas.MetricOfResources.RESOURCES_LOADING_TIME: metrics.get_resources_loading_time,
|
||||
schemas.MetricOfResources.RESOURCE_TYPE_VS_RESPONSE_END: metrics.resource_type_vs_response_end,
|
||||
schemas.MetricOfResources.RESOURCES_COUNT_BY_TYPE: metrics.get_resources_count_by_type,
|
||||
schemas.MetricOfWebVitals.COUNT_USERS: metrics.get_unique_users, }
|
||||
|
||||
return supported.get(key, lambda *args: None)(project_id=project_id, **data)
|
||||
|
|
@ -1,5 +1,8 @@
|
|||
import logging
|
||||
from chalicelib.utils import helper, pg_client
|
||||
|
||||
from chalicelib.utils import pg_client
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DatabaseRequestHandler:
|
||||
|
|
|
|||
|
|
@ -1,714 +0,0 @@
|
|||
import json
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import sourcemaps, sessions
|
||||
from chalicelib.utils import errors_helper
|
||||
from chalicelib.utils import pg_client, helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.utils.metrics_helper import __get_step_size
|
||||
|
||||
|
||||
def get(error_id, family=False):
|
||||
if family:
|
||||
return get_batch([error_id])
|
||||
with pg_client.PostgresClient() as cur:
|
||||
# trying: return only 1 error, without event details
|
||||
query = cur.mogrify(
|
||||
# "SELECT * FROM events.errors AS e INNER JOIN public.errors AS re USING(error_id) WHERE error_id = %(error_id)s;",
|
||||
"SELECT * FROM public.errors WHERE error_id = %(error_id)s LIMIT 1;",
|
||||
{"error_id": error_id})
|
||||
cur.execute(query=query)
|
||||
result = cur.fetchone()
|
||||
if result is not None:
|
||||
result["stacktrace_parsed_at"] = TimeUTC.datetime_to_timestamp(result["stacktrace_parsed_at"])
|
||||
return helper.dict_to_camel_case(result)
|
||||
|
||||
|
||||
def get_batch(error_ids):
|
||||
if len(error_ids) == 0:
|
||||
return []
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
"""
|
||||
WITH RECURSIVE error_family AS (
|
||||
SELECT *
|
||||
FROM public.errors
|
||||
WHERE error_id IN %(error_ids)s
|
||||
UNION
|
||||
SELECT child_errors.*
|
||||
FROM public.errors AS child_errors
|
||||
INNER JOIN error_family ON error_family.error_id = child_errors.parent_error_id OR error_family.parent_error_id = child_errors.error_id
|
||||
)
|
||||
SELECT *
|
||||
FROM error_family;""",
|
||||
{"error_ids": tuple(error_ids)})
|
||||
cur.execute(query=query)
|
||||
errors = cur.fetchall()
|
||||
for e in errors:
|
||||
e["stacktrace_parsed_at"] = TimeUTC.datetime_to_timestamp(e["stacktrace_parsed_at"])
|
||||
return helper.list_to_camel_case(errors)
|
||||
|
||||
|
||||
def __flatten_sort_key_count_version(data, merge_nested=False):
|
||||
if data is None:
|
||||
return []
|
||||
return sorted(
|
||||
[
|
||||
{
|
||||
"name": f'{o["name"]}@{v["version"]}',
|
||||
"count": v["count"]
|
||||
} for o in data for v in o["partition"]
|
||||
],
|
||||
key=lambda o: o["count"], reverse=True) if merge_nested else \
|
||||
[
|
||||
{
|
||||
"name": o["name"],
|
||||
"count": o["count"],
|
||||
} for o in data
|
||||
]
|
||||
|
||||
|
||||
def __process_tags(row):
|
||||
return [
|
||||
{"name": "browser", "partitions": __flatten_sort_key_count_version(data=row.get("browsers_partition"))},
|
||||
{"name": "browser.ver",
|
||||
"partitions": __flatten_sort_key_count_version(data=row.pop("browsers_partition"), merge_nested=True)},
|
||||
{"name": "OS", "partitions": __flatten_sort_key_count_version(data=row.get("os_partition"))},
|
||||
{"name": "OS.ver",
|
||||
"partitions": __flatten_sort_key_count_version(data=row.pop("os_partition"), merge_nested=True)},
|
||||
{"name": "device.family", "partitions": __flatten_sort_key_count_version(data=row.get("device_partition"))},
|
||||
{"name": "device",
|
||||
"partitions": __flatten_sort_key_count_version(data=row.pop("device_partition"), merge_nested=True)},
|
||||
{"name": "country", "partitions": row.pop("country_partition")}
|
||||
]
|
||||
|
||||
|
||||
def get_details(project_id, error_id, user_id, **data):
|
||||
pg_sub_query24 = __get_basic_constraints(time_constraint=False, chart=True, step_size_name="step_size24")
|
||||
pg_sub_query24.append("error_id = %(error_id)s")
|
||||
pg_sub_query30_session = __get_basic_constraints(time_constraint=True, chart=False,
|
||||
startTime_arg_name="startDate30",
|
||||
endTime_arg_name="endDate30", project_key="sessions.project_id")
|
||||
pg_sub_query30_session.append("sessions.start_ts >= %(startDate30)s")
|
||||
pg_sub_query30_session.append("sessions.start_ts <= %(endDate30)s")
|
||||
pg_sub_query30_session.append("error_id = %(error_id)s")
|
||||
pg_sub_query30_err = __get_basic_constraints(time_constraint=True, chart=False, startTime_arg_name="startDate30",
|
||||
endTime_arg_name="endDate30", project_key="errors.project_id")
|
||||
pg_sub_query30_err.append("sessions.project_id = %(project_id)s")
|
||||
pg_sub_query30_err.append("sessions.start_ts >= %(startDate30)s")
|
||||
pg_sub_query30_err.append("sessions.start_ts <= %(endDate30)s")
|
||||
pg_sub_query30_err.append("error_id = %(error_id)s")
|
||||
pg_sub_query30_err.append("source ='js_exception'")
|
||||
pg_sub_query30 = __get_basic_constraints(time_constraint=False, chart=True, step_size_name="step_size30")
|
||||
pg_sub_query30.append("error_id = %(error_id)s")
|
||||
pg_basic_query = __get_basic_constraints(time_constraint=False)
|
||||
pg_basic_query.append("error_id = %(error_id)s")
|
||||
with pg_client.PostgresClient() as cur:
|
||||
data["startDate24"] = TimeUTC.now(-1)
|
||||
data["endDate24"] = TimeUTC.now()
|
||||
data["startDate30"] = TimeUTC.now(-30)
|
||||
data["endDate30"] = TimeUTC.now()
|
||||
density24 = int(data.get("density24", 24))
|
||||
step_size24 = __get_step_size(data["startDate24"], data["endDate24"], density24, factor=1)
|
||||
density30 = int(data.get("density30", 30))
|
||||
step_size30 = __get_step_size(data["startDate30"], data["endDate30"], density30, factor=1)
|
||||
params = {
|
||||
"startDate24": data['startDate24'],
|
||||
"endDate24": data['endDate24'],
|
||||
"startDate30": data['startDate30'],
|
||||
"endDate30": data['endDate30'],
|
||||
"project_id": project_id,
|
||||
"userId": user_id,
|
||||
"step_size24": step_size24,
|
||||
"step_size30": step_size30,
|
||||
"error_id": error_id}
|
||||
|
||||
main_pg_query = f"""\
|
||||
SELECT error_id,
|
||||
name,
|
||||
message,
|
||||
users,
|
||||
sessions,
|
||||
last_occurrence,
|
||||
first_occurrence,
|
||||
last_session_id,
|
||||
browsers_partition,
|
||||
os_partition,
|
||||
device_partition,
|
||||
country_partition,
|
||||
chart24,
|
||||
chart30,
|
||||
custom_tags
|
||||
FROM (SELECT error_id,
|
||||
name,
|
||||
message,
|
||||
COUNT(DISTINCT user_id) AS users,
|
||||
COUNT(DISTINCT session_id) AS sessions
|
||||
FROM public.errors
|
||||
INNER JOIN events.errors AS s_errors USING (error_id)
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_err)}
|
||||
GROUP BY error_id, name, message) AS details
|
||||
INNER JOIN (SELECT MAX(timestamp) AS last_occurrence,
|
||||
MIN(timestamp) AS first_occurrence
|
||||
FROM events.errors
|
||||
WHERE error_id = %(error_id)s) AS time_details ON (TRUE)
|
||||
INNER JOIN (SELECT session_id AS last_session_id,
|
||||
coalesce(custom_tags, '[]')::jsonb AS custom_tags
|
||||
FROM events.errors
|
||||
LEFT JOIN LATERAL (
|
||||
SELECT jsonb_agg(jsonb_build_object(errors_tags.key, errors_tags.value)) AS custom_tags
|
||||
FROM errors_tags
|
||||
WHERE errors_tags.error_id = %(error_id)s
|
||||
AND errors_tags.session_id = errors.session_id
|
||||
AND errors_tags.message_id = errors.message_id) AS errors_tags ON (TRUE)
|
||||
WHERE error_id = %(error_id)s
|
||||
ORDER BY errors.timestamp DESC
|
||||
LIMIT 1) AS last_session_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(browser_details) AS browsers_partition
|
||||
FROM (SELECT *
|
||||
FROM (SELECT user_browser AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors
|
||||
INNER JOIN sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
GROUP BY user_browser
|
||||
ORDER BY count DESC) AS count_per_browser_query
|
||||
INNER JOIN LATERAL (SELECT JSONB_AGG(version_details) AS partition
|
||||
FROM (SELECT user_browser_version AS version,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
AND sessions.user_browser = count_per_browser_query.name
|
||||
GROUP BY user_browser_version
|
||||
ORDER BY count DESC) AS version_details
|
||||
) AS browser_version_details ON (TRUE)) AS browser_details) AS browser_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(os_details) AS os_partition
|
||||
FROM (SELECT *
|
||||
FROM (SELECT user_os AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
GROUP BY user_os
|
||||
ORDER BY count DESC) AS count_per_os_details
|
||||
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_version_details) AS partition
|
||||
FROM (SELECT COALESCE(user_os_version,'unknown') AS version, COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
AND sessions.user_os = count_per_os_details.name
|
||||
GROUP BY user_os_version
|
||||
ORDER BY count DESC) AS count_per_version_details
|
||||
GROUP BY count_per_os_details.name ) AS os_version_details
|
||||
ON (TRUE)) AS os_details) AS os_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(device_details) AS device_partition
|
||||
FROM (SELECT *
|
||||
FROM (SELECT user_device_type AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
GROUP BY user_device_type
|
||||
ORDER BY count DESC) AS count_per_device_details
|
||||
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_device_v_details) AS partition
|
||||
FROM (SELECT CASE
|
||||
WHEN user_device = '' OR user_device ISNULL
|
||||
THEN 'unknown'
|
||||
ELSE user_device END AS version,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
AND sessions.user_device_type = count_per_device_details.name
|
||||
GROUP BY user_device
|
||||
ORDER BY count DESC) AS count_per_device_v_details
|
||||
GROUP BY count_per_device_details.name ) AS device_version_details
|
||||
ON (TRUE)) AS device_details) AS device_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(count_per_country_details) AS country_partition
|
||||
FROM (SELECT user_country AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
GROUP BY user_country
|
||||
ORDER BY count DESC) AS count_per_country_details) AS country_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart24
|
||||
FROM (SELECT generated_timestamp AS timestamp,
|
||||
COUNT(session_id) AS count
|
||||
FROM generate_series(%(startDate24)s, %(endDate24)s, %(step_size24)s) AS generated_timestamp
|
||||
LEFT JOIN LATERAL (SELECT DISTINCT session_id
|
||||
FROM events.errors
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query24)}
|
||||
) AS chart_details ON (TRUE)
|
||||
GROUP BY generated_timestamp
|
||||
ORDER BY generated_timestamp) AS chart_details) AS chart_details24 ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart30
|
||||
FROM (SELECT generated_timestamp AS timestamp,
|
||||
COUNT(session_id) AS count
|
||||
FROM generate_series(%(startDate30)s, %(endDate30)s, %(step_size30)s) AS generated_timestamp
|
||||
LEFT JOIN LATERAL (SELECT DISTINCT session_id
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30)}) AS chart_details
|
||||
ON (TRUE)
|
||||
GROUP BY timestamp
|
||||
ORDER BY timestamp) AS chart_details) AS chart_details30 ON (TRUE);
|
||||
"""
|
||||
|
||||
# print("--------------------")
|
||||
# print(cur.mogrify(main_pg_query, params))
|
||||
# print("--------------------")
|
||||
cur.execute(cur.mogrify(main_pg_query, params))
|
||||
row = cur.fetchone()
|
||||
if row is None:
|
||||
return {"errors": ["error not found"]}
|
||||
row["tags"] = __process_tags(row)
|
||||
|
||||
query = cur.mogrify(
|
||||
f"""SELECT error_id, status, session_id, start_ts,
|
||||
parent_error_id,session_id, user_anonymous_id,
|
||||
user_id, user_uuid, user_browser, user_browser_version,
|
||||
user_os, user_os_version, user_device, payload,
|
||||
FALSE AS favorite,
|
||||
True AS viewed
|
||||
FROM public.errors AS pe
|
||||
INNER JOIN events.errors AS ee USING (error_id)
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
WHERE pe.project_id = %(project_id)s
|
||||
AND error_id = %(error_id)s
|
||||
ORDER BY start_ts DESC
|
||||
LIMIT 1;""",
|
||||
{"project_id": project_id, "error_id": error_id, "user_id": user_id})
|
||||
cur.execute(query=query)
|
||||
status = cur.fetchone()
|
||||
|
||||
if status is not None:
|
||||
row["stack"] = errors_helper.format_first_stack_frame(status).pop("stack")
|
||||
row["status"] = status.pop("status")
|
||||
row["parent_error_id"] = status.pop("parent_error_id")
|
||||
row["favorite"] = status.pop("favorite")
|
||||
row["viewed"] = status.pop("viewed")
|
||||
row["last_hydrated_session"] = status
|
||||
else:
|
||||
row["stack"] = []
|
||||
row["last_hydrated_session"] = None
|
||||
row["status"] = "untracked"
|
||||
row["parent_error_id"] = None
|
||||
row["favorite"] = False
|
||||
row["viewed"] = False
|
||||
return {"data": helper.dict_to_camel_case(row)}
|
||||
|
||||
|
||||
def get_details_chart(project_id, error_id, user_id, **data):
|
||||
pg_sub_query = __get_basic_constraints()
|
||||
pg_sub_query.append("error_id = %(error_id)s")
|
||||
pg_sub_query_chart = __get_basic_constraints(time_constraint=False, chart=True)
|
||||
pg_sub_query_chart.append("error_id = %(error_id)s")
|
||||
with pg_client.PostgresClient() as cur:
|
||||
if data.get("startDate") is None:
|
||||
data["startDate"] = TimeUTC.now(-7)
|
||||
else:
|
||||
data["startDate"] = int(data["startDate"])
|
||||
if data.get("endDate") is None:
|
||||
data["endDate"] = TimeUTC.now()
|
||||
else:
|
||||
data["endDate"] = int(data["endDate"])
|
||||
density = int(data.get("density", 7))
|
||||
step_size = __get_step_size(data["startDate"], data["endDate"], density, factor=1)
|
||||
params = {
|
||||
"startDate": data['startDate'],
|
||||
"endDate": data['endDate'],
|
||||
"project_id": project_id,
|
||||
"userId": user_id,
|
||||
"step_size": step_size,
|
||||
"error_id": error_id}
|
||||
|
||||
main_pg_query = f"""\
|
||||
SELECT %(error_id)s AS error_id,
|
||||
browsers_partition,
|
||||
os_partition,
|
||||
device_partition,
|
||||
country_partition,
|
||||
chart
|
||||
FROM (SELECT jsonb_agg(browser_details) AS browsers_partition
|
||||
FROM (SELECT *
|
||||
FROM (SELECT user_browser AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
GROUP BY user_browser
|
||||
ORDER BY count DESC) AS count_per_browser_query
|
||||
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_version_details) AS partition
|
||||
FROM (SELECT user_browser_version AS version,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
AND user_browser = count_per_browser_query.name
|
||||
GROUP BY user_browser_version
|
||||
ORDER BY count DESC) AS count_per_version_details) AS browesr_version_details
|
||||
ON (TRUE)) AS browser_details) AS browser_details
|
||||
INNER JOIN (SELECT jsonb_agg(os_details) AS os_partition
|
||||
FROM (SELECT *
|
||||
FROM (SELECT user_os AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
GROUP BY user_os
|
||||
ORDER BY count DESC) AS count_per_os_details
|
||||
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_version_query) AS partition
|
||||
FROM (SELECT COALESCE(user_os_version, 'unknown') AS version,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
AND user_os = count_per_os_details.name
|
||||
GROUP BY user_os_version
|
||||
ORDER BY count DESC) AS count_per_version_query
|
||||
) AS os_version_query ON (TRUE)) AS os_details) AS os_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(device_details) AS device_partition
|
||||
FROM (SELECT *
|
||||
FROM (SELECT user_device_type AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
GROUP BY user_device_type
|
||||
ORDER BY count DESC) AS count_per_device_details
|
||||
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_device_details) AS partition
|
||||
FROM (SELECT CASE
|
||||
WHEN user_device = '' OR user_device ISNULL
|
||||
THEN 'unknown'
|
||||
ELSE user_device END AS version,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
AND user_device_type = count_per_device_details.name
|
||||
GROUP BY user_device_type, user_device
|
||||
ORDER BY count DESC) AS count_per_device_details
|
||||
) AS device_version_details ON (TRUE)) AS device_details) AS device_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(count_per_country_details) AS country_partition
|
||||
FROM (SELECT user_country AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
GROUP BY user_country
|
||||
ORDER BY count DESC) AS count_per_country_details) AS country_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart
|
||||
FROM (SELECT generated_timestamp AS timestamp,
|
||||
COUNT(session_id) AS count
|
||||
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp
|
||||
LEFT JOIN LATERAL (SELECT DISTINCT session_id
|
||||
FROM events.errors
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||
) AS chart_details ON (TRUE)
|
||||
GROUP BY generated_timestamp
|
||||
ORDER BY generated_timestamp) AS chart_details) AS chart_details ON (TRUE);"""
|
||||
|
||||
cur.execute(cur.mogrify(main_pg_query, params))
|
||||
row = cur.fetchone()
|
||||
if row is None:
|
||||
return {"errors": ["error not found"]}
|
||||
row["tags"] = __process_tags(row)
|
||||
return {"data": helper.dict_to_camel_case(row)}
|
||||
|
||||
|
||||
def __get_basic_constraints(platform=None, time_constraint=True, startTime_arg_name="startDate",
|
||||
endTime_arg_name="endDate", chart=False, step_size_name="step_size",
|
||||
project_key="project_id"):
|
||||
if project_key is None:
|
||||
ch_sub_query = []
|
||||
else:
|
||||
ch_sub_query = [f"{project_key} =%(project_id)s"]
|
||||
if time_constraint:
|
||||
ch_sub_query += [f"timestamp >= %({startTime_arg_name})s",
|
||||
f"timestamp < %({endTime_arg_name})s"]
|
||||
if chart:
|
||||
ch_sub_query += [f"timestamp >= generated_timestamp",
|
||||
f"timestamp < generated_timestamp + %({step_size_name})s"]
|
||||
if platform == schemas.PlatformType.MOBILE:
|
||||
ch_sub_query.append("user_device_type = 'mobile'")
|
||||
elif platform == schemas.PlatformType.DESKTOP:
|
||||
ch_sub_query.append("user_device_type = 'desktop'")
|
||||
return ch_sub_query
|
||||
|
||||
|
||||
def __get_sort_key(key):
|
||||
return {
|
||||
schemas.ErrorSort.OCCURRENCE: "max_datetime",
|
||||
schemas.ErrorSort.USERS_COUNT: "users",
|
||||
schemas.ErrorSort.SESSIONS_COUNT: "sessions"
|
||||
}.get(key, 'max_datetime')
|
||||
|
||||
|
||||
def search(data: schemas.SearchErrorsSchema, project_id, user_id):
|
||||
empty_response = {
|
||||
'total': 0,
|
||||
'errors': []
|
||||
}
|
||||
|
||||
platform = None
|
||||
for f in data.filters:
|
||||
if f.type == schemas.FilterType.PLATFORM and len(f.value) > 0:
|
||||
platform = f.value[0]
|
||||
pg_sub_query = __get_basic_constraints(platform, project_key="sessions.project_id")
|
||||
pg_sub_query += ["sessions.start_ts>=%(startDate)s", "sessions.start_ts<%(endDate)s", "source ='js_exception'",
|
||||
"pe.project_id=%(project_id)s"]
|
||||
# To ignore Script error
|
||||
pg_sub_query.append("pe.message!='Script error.'")
|
||||
pg_sub_query_chart = __get_basic_constraints(platform, time_constraint=False, chart=True, project_key=None)
|
||||
if platform:
|
||||
pg_sub_query_chart += ["start_ts>=%(startDate)s", "start_ts<%(endDate)s", "project_id=%(project_id)s"]
|
||||
pg_sub_query_chart.append("errors.error_id =details.error_id")
|
||||
statuses = []
|
||||
error_ids = None
|
||||
if data.startTimestamp is None:
|
||||
data.startTimestamp = TimeUTC.now(-30)
|
||||
if data.endTimestamp is None:
|
||||
data.endTimestamp = TimeUTC.now(1)
|
||||
if len(data.events) > 0 or len(data.filters) > 0:
|
||||
print("-- searching for sessions before errors")
|
||||
statuses = sessions.search_sessions(data=data, project_id=project_id, user_id=user_id, errors_only=True,
|
||||
error_status=data.status)
|
||||
if len(statuses) == 0:
|
||||
return empty_response
|
||||
error_ids = [e["errorId"] for e in statuses]
|
||||
with pg_client.PostgresClient() as cur:
|
||||
step_size = __get_step_size(data.startTimestamp, data.endTimestamp, data.density, factor=1)
|
||||
sort = __get_sort_key('datetime')
|
||||
if data.sort is not None:
|
||||
sort = __get_sort_key(data.sort)
|
||||
order = schemas.SortOrderType.DESC
|
||||
if data.order is not None:
|
||||
order = data.order
|
||||
extra_join = ""
|
||||
|
||||
params = {
|
||||
"startDate": data.startTimestamp,
|
||||
"endDate": data.endTimestamp,
|
||||
"project_id": project_id,
|
||||
"userId": user_id,
|
||||
"step_size": step_size}
|
||||
if data.status != schemas.ErrorStatus.ALL:
|
||||
pg_sub_query.append("status = %(error_status)s")
|
||||
params["error_status"] = data.status
|
||||
if data.limit is not None and data.page is not None:
|
||||
params["errors_offset"] = (data.page - 1) * data.limit
|
||||
params["errors_limit"] = data.limit
|
||||
else:
|
||||
params["errors_offset"] = 0
|
||||
params["errors_limit"] = 200
|
||||
|
||||
if error_ids is not None:
|
||||
params["error_ids"] = tuple(error_ids)
|
||||
pg_sub_query.append("error_id IN %(error_ids)s")
|
||||
# if data.bookmarked:
|
||||
# pg_sub_query.append("ufe.user_id = %(userId)s")
|
||||
# extra_join += " INNER JOIN public.user_favorite_errors AS ufe USING (error_id)"
|
||||
if data.query is not None and len(data.query) > 0:
|
||||
pg_sub_query.append("(pe.name ILIKE %(error_query)s OR pe.message ILIKE %(error_query)s)")
|
||||
params["error_query"] = helper.values_for_operator(value=data.query,
|
||||
op=schemas.SearchEventOperator.CONTAINS)
|
||||
|
||||
main_pg_query = f"""SELECT full_count,
|
||||
error_id,
|
||||
name,
|
||||
message,
|
||||
users,
|
||||
sessions,
|
||||
last_occurrence,
|
||||
first_occurrence,
|
||||
chart
|
||||
FROM (SELECT COUNT(details) OVER () AS full_count, details.*
|
||||
FROM (SELECT error_id,
|
||||
name,
|
||||
message,
|
||||
COUNT(DISTINCT COALESCE(user_id,user_uuid::text)) AS users,
|
||||
COUNT(DISTINCT session_id) AS sessions,
|
||||
MAX(timestamp) AS max_datetime,
|
||||
MIN(timestamp) AS min_datetime
|
||||
FROM events.errors
|
||||
INNER JOIN public.errors AS pe USING (error_id)
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
{extra_join}
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
GROUP BY error_id, name, message
|
||||
ORDER BY {sort} {order}) AS details
|
||||
LIMIT %(errors_limit)s OFFSET %(errors_offset)s
|
||||
) AS details
|
||||
INNER JOIN LATERAL (SELECT MAX(timestamp) AS last_occurrence,
|
||||
MIN(timestamp) AS first_occurrence
|
||||
FROM events.errors
|
||||
WHERE errors.error_id = details.error_id) AS time_details ON (TRUE)
|
||||
INNER JOIN LATERAL (SELECT jsonb_agg(chart_details) AS chart
|
||||
FROM (SELECT generated_timestamp AS timestamp,
|
||||
COUNT(session_id) AS count
|
||||
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp
|
||||
LEFT JOIN LATERAL (SELECT DISTINCT session_id
|
||||
FROM events.errors
|
||||
{"INNER JOIN public.sessions USING(session_id)" if platform else ""}
|
||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||
) AS sessions ON (TRUE)
|
||||
GROUP BY timestamp
|
||||
ORDER BY timestamp) AS chart_details) AS chart_details ON (TRUE);"""
|
||||
|
||||
# print("--------------------")
|
||||
# print(cur.mogrify(main_pg_query, params))
|
||||
# print("--------------------")
|
||||
|
||||
cur.execute(cur.mogrify(main_pg_query, params))
|
||||
rows = cur.fetchall()
|
||||
total = 0 if len(rows) == 0 else rows[0]["full_count"]
|
||||
|
||||
if total == 0:
|
||||
rows = []
|
||||
else:
|
||||
if len(statuses) == 0:
|
||||
query = cur.mogrify(
|
||||
"""SELECT error_id,
|
||||
COALESCE((SELECT TRUE
|
||||
FROM public.user_viewed_errors AS ve
|
||||
WHERE errors.error_id = ve.error_id
|
||||
AND ve.user_id = %(user_id)s LIMIT 1), FALSE) AS viewed
|
||||
FROM public.errors
|
||||
WHERE project_id = %(project_id)s AND error_id IN %(error_ids)s;""",
|
||||
{"project_id": project_id, "error_ids": tuple([r["error_id"] for r in rows]),
|
||||
"user_id": user_id})
|
||||
cur.execute(query=query)
|
||||
statuses = helper.list_to_camel_case(cur.fetchall())
|
||||
statuses = {
|
||||
s["errorId"]: s for s in statuses
|
||||
}
|
||||
|
||||
for r in rows:
|
||||
r.pop("full_count")
|
||||
if r["error_id"] in statuses:
|
||||
r["viewed"] = statuses[r["error_id"]]["viewed"]
|
||||
else:
|
||||
r["viewed"] = False
|
||||
|
||||
return {
|
||||
'total': total,
|
||||
'errors': helper.list_to_camel_case(rows)
|
||||
}
|
||||
|
||||
|
||||
def __save_stacktrace(error_id, data):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
"""UPDATE public.errors
|
||||
SET stacktrace=%(data)s::jsonb, stacktrace_parsed_at=timezone('utc'::text, now())
|
||||
WHERE error_id = %(error_id)s;""",
|
||||
{"error_id": error_id, "data": json.dumps(data)})
|
||||
cur.execute(query=query)
|
||||
|
||||
|
||||
def get_trace(project_id, error_id):
|
||||
error = get(error_id=error_id, family=False)
|
||||
if error is None:
|
||||
return {"errors": ["error not found"]}
|
||||
if error.get("source", "") != "js_exception":
|
||||
return {"errors": ["this source of errors doesn't have a sourcemap"]}
|
||||
if error.get("payload") is None:
|
||||
return {"errors": ["null payload"]}
|
||||
if error.get("stacktrace") is not None:
|
||||
return {"sourcemapUploaded": True,
|
||||
"trace": error.get("stacktrace"),
|
||||
"preparsed": True}
|
||||
trace, all_exists = sourcemaps.get_traces_group(project_id=project_id, payload=error["payload"])
|
||||
if all_exists:
|
||||
__save_stacktrace(error_id=error_id, data=trace)
|
||||
return {"sourcemapUploaded": all_exists,
|
||||
"trace": trace,
|
||||
"preparsed": False}
|
||||
|
||||
|
||||
def get_sessions(start_date, end_date, project_id, user_id, error_id):
|
||||
extra_constraints = ["s.project_id = %(project_id)s",
|
||||
"s.start_ts >= %(startDate)s",
|
||||
"s.start_ts <= %(endDate)s",
|
||||
"e.error_id = %(error_id)s"]
|
||||
if start_date is None:
|
||||
start_date = TimeUTC.now(-7)
|
||||
if end_date is None:
|
||||
end_date = TimeUTC.now()
|
||||
|
||||
params = {
|
||||
"startDate": start_date,
|
||||
"endDate": end_date,
|
||||
"project_id": project_id,
|
||||
"userId": user_id,
|
||||
"error_id": error_id}
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
f"""SELECT s.project_id,
|
||||
s.session_id::text AS session_id,
|
||||
s.user_uuid,
|
||||
s.user_id,
|
||||
s.user_agent,
|
||||
s.user_os,
|
||||
s.user_browser,
|
||||
s.user_device,
|
||||
s.user_country,
|
||||
s.start_ts,
|
||||
s.duration,
|
||||
s.events_count,
|
||||
s.pages_count,
|
||||
s.errors_count,
|
||||
s.issue_types,
|
||||
COALESCE((SELECT TRUE
|
||||
FROM public.user_favorite_sessions AS fs
|
||||
WHERE s.session_id = fs.session_id
|
||||
AND fs.user_id = %(userId)s LIMIT 1), FALSE) AS favorite,
|
||||
COALESCE((SELECT TRUE
|
||||
FROM public.user_viewed_sessions AS fs
|
||||
WHERE s.session_id = fs.session_id
|
||||
AND fs.user_id = %(userId)s LIMIT 1), FALSE) AS viewed
|
||||
FROM public.sessions AS s INNER JOIN events.errors AS e USING (session_id)
|
||||
WHERE {" AND ".join(extra_constraints)}
|
||||
ORDER BY s.start_ts DESC;""",
|
||||
params)
|
||||
cur.execute(query=query)
|
||||
sessions_list = []
|
||||
total = cur.rowcount
|
||||
row = cur.fetchone()
|
||||
while row is not None and len(sessions_list) < 100:
|
||||
sessions_list.append(row)
|
||||
row = cur.fetchone()
|
||||
|
||||
return {
|
||||
'total': total,
|
||||
'sessions': helper.list_to_camel_case(sessions_list)
|
||||
}
|
||||
|
||||
|
||||
ACTION_STATE = {
|
||||
"unsolve": 'unresolved',
|
||||
"solve": 'resolved',
|
||||
"ignore": 'ignored'
|
||||
}
|
||||
|
||||
|
||||
def change_state(project_id, user_id, error_id, action):
|
||||
errors = get(error_id, family=True)
|
||||
print(len(errors))
|
||||
status = ACTION_STATE.get(action)
|
||||
if errors is None or len(errors) == 0:
|
||||
return {"errors": ["error not found"]}
|
||||
if errors[0]["status"] == status:
|
||||
return {"errors": [f"error is already {status}"]}
|
||||
|
||||
if errors[0]["status"] == ACTION_STATE["solve"] and status == ACTION_STATE["ignore"]:
|
||||
return {"errors": [f"state transition not permitted {errors[0]['status']} -> {status}"]}
|
||||
|
||||
params = {
|
||||
"userId": user_id,
|
||||
"error_ids": tuple([e["errorId"] for e in errors]),
|
||||
"status": status}
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
"""UPDATE public.errors
|
||||
SET status = %(status)s
|
||||
WHERE error_id IN %(error_ids)s
|
||||
RETURNING status""",
|
||||
params)
|
||||
cur.execute(query=query)
|
||||
row = cur.fetchone()
|
||||
if row is not None:
|
||||
for e in errors:
|
||||
e["status"] = row["status"]
|
||||
return {"data": errors}
|
||||
13
api/chalicelib/core/errors/__init__.py
Normal file
13
api/chalicelib/core/errors/__init__.py
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
import logging
|
||||
|
||||
from decouple import config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
from . import errors_pg as errors_legacy
|
||||
|
||||
if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
|
||||
logger.info(">>> Using experimental error search")
|
||||
from . import errors_ch as errors
|
||||
else:
|
||||
from . import errors_pg as errors
|
||||
409
api/chalicelib/core/errors/errors_ch.py
Normal file
409
api/chalicelib/core/errors/errors_ch.py
Normal file
|
|
@ -0,0 +1,409 @@
|
|||
import schemas
|
||||
from chalicelib.core import metadata
|
||||
from chalicelib.core.errors import errors_legacy
|
||||
from chalicelib.core.errors.modules import errors_helper
|
||||
from chalicelib.core.errors.modules import sessions
|
||||
from chalicelib.utils import ch_client, exp_ch_helper
|
||||
from chalicelib.utils import helper, metrics_helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
|
||||
def _multiple_values(values, value_key="value"):
|
||||
query_values = {}
|
||||
if values is not None and isinstance(values, list):
|
||||
for i in range(len(values)):
|
||||
k = f"{value_key}_{i}"
|
||||
query_values[k] = values[i]
|
||||
return query_values
|
||||
|
||||
|
||||
def __get_sql_operator(op: schemas.SearchEventOperator):
|
||||
return {
|
||||
schemas.SearchEventOperator.IS: "=",
|
||||
schemas.SearchEventOperator.IS_ANY: "IN",
|
||||
schemas.SearchEventOperator.ON: "=",
|
||||
schemas.SearchEventOperator.ON_ANY: "IN",
|
||||
schemas.SearchEventOperator.IS_NOT: "!=",
|
||||
schemas.SearchEventOperator.NOT_ON: "!=",
|
||||
schemas.SearchEventOperator.CONTAINS: "ILIKE",
|
||||
schemas.SearchEventOperator.NOT_CONTAINS: "NOT ILIKE",
|
||||
schemas.SearchEventOperator.STARTS_WITH: "ILIKE",
|
||||
schemas.SearchEventOperator.ENDS_WITH: "ILIKE",
|
||||
}.get(op, "=")
|
||||
|
||||
|
||||
def _isAny_opreator(op: schemas.SearchEventOperator):
|
||||
return op in [schemas.SearchEventOperator.ON_ANY, schemas.SearchEventOperator.IS_ANY]
|
||||
|
||||
|
||||
def _isUndefined_operator(op: schemas.SearchEventOperator):
|
||||
return op in [schemas.SearchEventOperator.IS_UNDEFINED]
|
||||
|
||||
|
||||
def __is_negation_operator(op: schemas.SearchEventOperator):
|
||||
return op in [schemas.SearchEventOperator.IS_NOT,
|
||||
schemas.SearchEventOperator.NOT_ON,
|
||||
schemas.SearchEventOperator.NOT_CONTAINS]
|
||||
|
||||
|
||||
def _multiple_conditions(condition, values, value_key="value", is_not=False):
|
||||
query = []
|
||||
for i in range(len(values)):
|
||||
k = f"{value_key}_{i}"
|
||||
query.append(condition.replace(value_key, k))
|
||||
return "(" + (" AND " if is_not else " OR ").join(query) + ")"
|
||||
|
||||
|
||||
def get(error_id, family=False):
|
||||
return errors_legacy.get(error_id=error_id, family=family)
|
||||
|
||||
|
||||
def get_batch(error_ids):
|
||||
return errors_legacy.get_batch(error_ids=error_ids)
|
||||
|
||||
|
||||
def __get_basic_constraints_events(platform=None, time_constraint=True, startTime_arg_name="startDate",
|
||||
endTime_arg_name="endDate", type_condition=True, project_key="project_id",
|
||||
table_name=None):
|
||||
ch_sub_query = [f"{project_key} =toUInt16(%(project_id)s)"]
|
||||
if table_name is not None:
|
||||
table_name = table_name + "."
|
||||
else:
|
||||
table_name = ""
|
||||
if type_condition:
|
||||
ch_sub_query.append(f"{table_name}`$event_name`='ERROR'")
|
||||
if time_constraint:
|
||||
ch_sub_query += [f"{table_name}created_at >= toDateTime(%({startTime_arg_name})s/1000)",
|
||||
f"{table_name}created_at < toDateTime(%({endTime_arg_name})s/1000)"]
|
||||
# if platform == schemas.PlatformType.MOBILE:
|
||||
# ch_sub_query.append("user_device_type = 'mobile'")
|
||||
# elif platform == schemas.PlatformType.DESKTOP:
|
||||
# ch_sub_query.append("user_device_type = 'desktop'")
|
||||
return ch_sub_query
|
||||
|
||||
|
||||
def __get_sort_key(key):
|
||||
return {
|
||||
schemas.ErrorSort.OCCURRENCE: "max_datetime",
|
||||
schemas.ErrorSort.USERS_COUNT: "users",
|
||||
schemas.ErrorSort.SESSIONS_COUNT: "sessions"
|
||||
}.get(key, 'max_datetime')
|
||||
|
||||
|
||||
def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, user_id):
|
||||
MAIN_EVENTS_TABLE = exp_ch_helper.get_main_events_table(data.startTimestamp)
|
||||
MAIN_SESSIONS_TABLE = exp_ch_helper.get_main_sessions_table(data.startTimestamp)
|
||||
|
||||
platform = None
|
||||
for f in data.filters:
|
||||
if f.type == schemas.FilterType.PLATFORM and len(f.value) > 0:
|
||||
platform = f.value[0]
|
||||
ch_sessions_sub_query = errors_helper.__get_basic_constraints_ch(platform, type_condition=False)
|
||||
# ignore platform for errors table
|
||||
ch_sub_query = __get_basic_constraints_events(None, type_condition=True)
|
||||
ch_sub_query.append("JSONExtractString(toString(`$properties`), 'source') = 'js_exception'")
|
||||
|
||||
# To ignore Script error
|
||||
ch_sub_query.append("JSONExtractString(toString(`$properties`), 'message') != 'Script error.'")
|
||||
error_ids = None
|
||||
|
||||
if data.startTimestamp is None:
|
||||
data.startTimestamp = TimeUTC.now(-7)
|
||||
if data.endTimestamp is None:
|
||||
data.endTimestamp = TimeUTC.now(1)
|
||||
|
||||
subquery_part = ""
|
||||
params = {}
|
||||
if len(data.events) > 0:
|
||||
errors_condition_count = 0
|
||||
for i, e in enumerate(data.events):
|
||||
if e.type == schemas.EventType.ERROR:
|
||||
errors_condition_count += 1
|
||||
is_any = _isAny_opreator(e.operator)
|
||||
op = __get_sql_operator(e.operator)
|
||||
e_k = f"e_value{i}"
|
||||
params = {**params, **_multiple_values(e.value, value_key=e_k)}
|
||||
if not is_any and len(e.value) > 0 and e.value[1] not in [None, "*", ""]:
|
||||
ch_sub_query.append(
|
||||
_multiple_conditions(f"(message {op} %({e_k})s OR name {op} %({e_k})s)",
|
||||
e.value, value_key=e_k))
|
||||
if len(data.events) > errors_condition_count:
|
||||
subquery_part_args, subquery_part = sessions.search_query_parts_ch(data=data, error_status=data.status,
|
||||
errors_only=True,
|
||||
project_id=project.project_id,
|
||||
user_id=user_id,
|
||||
issue=None,
|
||||
favorite_only=False)
|
||||
subquery_part = f"INNER JOIN {subquery_part} USING(session_id)"
|
||||
params = {**params, **subquery_part_args}
|
||||
if len(data.filters) > 0:
|
||||
meta_keys = None
|
||||
# to reduce include a sub-query of sessions inside events query, in order to reduce the selected data
|
||||
for i, f in enumerate(data.filters):
|
||||
if not isinstance(f.value, list):
|
||||
f.value = [f.value]
|
||||
filter_type = f.type
|
||||
f.value = helper.values_for_operator(value=f.value, op=f.operator)
|
||||
f_k = f"f_value{i}"
|
||||
params = {**params, f_k: f.value, **_multiple_values(f.value, value_key=f_k)}
|
||||
op = __get_sql_operator(f.operator) \
|
||||
if filter_type not in [schemas.FilterType.EVENTS_COUNT] else f.operator
|
||||
is_any = _isAny_opreator(f.operator)
|
||||
is_undefined = _isUndefined_operator(f.operator)
|
||||
if not is_any and not is_undefined and len(f.value) == 0:
|
||||
continue
|
||||
is_not = False
|
||||
if __is_negation_operator(f.operator):
|
||||
is_not = True
|
||||
if filter_type == schemas.FilterType.USER_BROWSER:
|
||||
if is_any:
|
||||
ch_sessions_sub_query.append('isNotNull(s.user_browser)')
|
||||
else:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f's.user_browser {op} %({f_k})s', f.value, is_not=is_not,
|
||||
value_key=f_k))
|
||||
|
||||
elif filter_type in [schemas.FilterType.USER_OS, schemas.FilterType.USER_OS_MOBILE]:
|
||||
if is_any:
|
||||
ch_sessions_sub_query.append('isNotNull(s.user_os)')
|
||||
else:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f's.user_os {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
|
||||
|
||||
elif filter_type in [schemas.FilterType.USER_DEVICE, schemas.FilterType.USER_DEVICE_MOBILE]:
|
||||
if is_any:
|
||||
ch_sessions_sub_query.append('isNotNull(s.user_device)')
|
||||
else:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f's.user_device {op} %({f_k})s', f.value, is_not=is_not,
|
||||
value_key=f_k))
|
||||
|
||||
elif filter_type in [schemas.FilterType.USER_COUNTRY, schemas.FilterType.USER_COUNTRY_MOBILE]:
|
||||
if is_any:
|
||||
ch_sessions_sub_query.append('isNotNull(s.user_country)')
|
||||
else:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f's.user_country {op} %({f_k})s', f.value, is_not=is_not,
|
||||
value_key=f_k))
|
||||
|
||||
elif filter_type in [schemas.FilterType.UTM_SOURCE]:
|
||||
if is_any:
|
||||
ch_sessions_sub_query.append('isNotNull(s.utm_source)')
|
||||
elif is_undefined:
|
||||
ch_sessions_sub_query.append('isNull(s.utm_source)')
|
||||
else:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f's.utm_source {op} toString(%({f_k})s)', f.value, is_not=is_not,
|
||||
value_key=f_k))
|
||||
|
||||
elif filter_type in [schemas.FilterType.UTM_MEDIUM]:
|
||||
if is_any:
|
||||
ch_sessions_sub_query.append('isNotNull(s.utm_medium)')
|
||||
elif is_undefined:
|
||||
ch_sessions_sub_query.append('isNull(s.utm_medium)')
|
||||
else:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f's.utm_medium {op} toString(%({f_k})s)', f.value, is_not=is_not,
|
||||
value_key=f_k))
|
||||
elif filter_type in [schemas.FilterType.UTM_CAMPAIGN]:
|
||||
if is_any:
|
||||
ch_sessions_sub_query.append('isNotNull(s.utm_campaign)')
|
||||
elif is_undefined:
|
||||
ch_sessions_sub_query.append('isNull(s.utm_campaign)')
|
||||
else:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f's.utm_campaign {op} toString(%({f_k})s)', f.value, is_not=is_not,
|
||||
value_key=f_k))
|
||||
|
||||
elif filter_type == schemas.FilterType.DURATION:
|
||||
if len(f.value) > 0 and f.value[0] is not None:
|
||||
ch_sessions_sub_query.append("s.duration >= %(minDuration)s")
|
||||
params["minDuration"] = f.value[0]
|
||||
if len(f.value) > 1 and f.value[1] is not None and int(f.value[1]) > 0:
|
||||
ch_sessions_sub_query.append("s.duration <= %(maxDuration)s")
|
||||
params["maxDuration"] = f.value[1]
|
||||
|
||||
elif filter_type == schemas.FilterType.REFERRER:
|
||||
# extra_from += f"INNER JOIN {events.EventType.LOCATION.table} AS p USING(session_id)"
|
||||
if is_any:
|
||||
referrer_constraint = 'isNotNull(s.base_referrer)'
|
||||
else:
|
||||
referrer_constraint = _multiple_conditions(f"s.base_referrer {op} %({f_k})s", f.value,
|
||||
is_not=is_not, value_key=f_k)
|
||||
elif filter_type == schemas.FilterType.METADATA:
|
||||
# get metadata list only if you need it
|
||||
if meta_keys is None:
|
||||
meta_keys = metadata.get(project_id=project.project_id)
|
||||
meta_keys = {m["key"]: m["index"] for m in meta_keys}
|
||||
if f.source in meta_keys.keys():
|
||||
if is_any:
|
||||
ch_sessions_sub_query.append(f"isNotNull(s.{metadata.index_to_colname(meta_keys[f.source])})")
|
||||
elif is_undefined:
|
||||
ch_sessions_sub_query.append(f"isNull(s.{metadata.index_to_colname(meta_keys[f.source])})")
|
||||
else:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(
|
||||
f"s.{metadata.index_to_colname(meta_keys[f.source])} {op} toString(%({f_k})s)",
|
||||
f.value, is_not=is_not, value_key=f_k))
|
||||
|
||||
elif filter_type in [schemas.FilterType.USER_ID, schemas.FilterType.USER_ID_MOBILE]:
|
||||
if is_any:
|
||||
ch_sessions_sub_query.append('isNotNull(s.user_id)')
|
||||
elif is_undefined:
|
||||
ch_sessions_sub_query.append('isNull(s.user_id)')
|
||||
else:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f"s.user_id {op} toString(%({f_k})s)", f.value, is_not=is_not,
|
||||
value_key=f_k))
|
||||
elif filter_type in [schemas.FilterType.USER_ANONYMOUS_ID,
|
||||
schemas.FilterType.USER_ANONYMOUS_ID_MOBILE]:
|
||||
if is_any:
|
||||
ch_sessions_sub_query.append('isNotNull(s.user_anonymous_id)')
|
||||
elif is_undefined:
|
||||
ch_sessions_sub_query.append('isNull(s.user_anonymous_id)')
|
||||
else:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f"s.user_anonymous_id {op} toString(%({f_k})s)", f.value,
|
||||
is_not=is_not,
|
||||
value_key=f_k))
|
||||
|
||||
elif filter_type in [schemas.FilterType.REV_ID, schemas.FilterType.REV_ID_MOBILE]:
|
||||
if is_any:
|
||||
ch_sessions_sub_query.append('isNotNull(s.rev_id)')
|
||||
elif is_undefined:
|
||||
ch_sessions_sub_query.append('isNull(s.rev_id)')
|
||||
else:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f"s.rev_id {op} toString(%({f_k})s)", f.value, is_not=is_not,
|
||||
value_key=f_k))
|
||||
|
||||
elif filter_type == schemas.FilterType.PLATFORM:
|
||||
# op = __get_sql_operator(f.operator)
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f"s.user_device_type {op} %({f_k})s", f.value, is_not=is_not,
|
||||
value_key=f_k))
|
||||
# elif filter_type == schemas.FilterType.issue:
|
||||
# if is_any:
|
||||
# ch_sessions_sub_query.append("notEmpty(s.issue_types)")
|
||||
# else:
|
||||
# ch_sessions_sub_query.append(f"hasAny(s.issue_types,%({f_k})s)")
|
||||
# # _multiple_conditions(f"%({f_k})s {op} ANY (s.issue_types)", f.value, is_not=is_not,
|
||||
# # value_key=f_k))
|
||||
#
|
||||
# if is_not:
|
||||
# extra_constraints[-1] = f"not({extra_constraints[-1]})"
|
||||
# ss_constraints[-1] = f"not({ss_constraints[-1]})"
|
||||
elif filter_type == schemas.FilterType.EVENTS_COUNT:
|
||||
ch_sessions_sub_query.append(
|
||||
_multiple_conditions(f"s.events_count {op} %({f_k})s", f.value, is_not=is_not,
|
||||
value_key=f_k))
|
||||
|
||||
with ch_client.ClickHouseClient() as ch:
|
||||
step_size = metrics_helper.get_step_size(data.startTimestamp, data.endTimestamp, data.density)
|
||||
sort = __get_sort_key('datetime')
|
||||
if data.sort is not None:
|
||||
sort = __get_sort_key(data.sort)
|
||||
order = "DESC"
|
||||
if data.order is not None:
|
||||
order = data.order
|
||||
params = {
|
||||
**params,
|
||||
"startDate": data.startTimestamp,
|
||||
"endDate": data.endTimestamp,
|
||||
"project_id": project.project_id,
|
||||
"userId": user_id,
|
||||
"step_size": step_size}
|
||||
if data.limit is not None and data.page is not None:
|
||||
params["errors_offset"] = (data.page - 1) * data.limit
|
||||
params["errors_limit"] = data.limit
|
||||
else:
|
||||
params["errors_offset"] = 0
|
||||
params["errors_limit"] = 200
|
||||
# if data.bookmarked:
|
||||
# cur.execute(cur.mogrify(f"""SELECT error_id
|
||||
# FROM public.user_favorite_errors
|
||||
# WHERE user_id = %(userId)s
|
||||
# {"" if error_ids is None else "AND error_id IN %(error_ids)s"}""",
|
||||
# {"userId": user_id, "error_ids": tuple(error_ids or [])}))
|
||||
# error_ids = cur.fetchall()
|
||||
# if len(error_ids) == 0:
|
||||
# return empty_response
|
||||
# error_ids = [e["error_id"] for e in error_ids]
|
||||
|
||||
if error_ids is not None:
|
||||
params["error_ids"] = tuple(error_ids)
|
||||
ch_sub_query.append("error_id IN %(error_ids)s")
|
||||
|
||||
main_ch_query = f"""\
|
||||
SELECT details.error_id as error_id,
|
||||
name, message, users, total,
|
||||
sessions, last_occurrence, first_occurrence, chart
|
||||
FROM (SELECT error_id,
|
||||
JSONExtractString(toString(`$properties`), 'name') AS name,
|
||||
JSONExtractString(toString(`$properties`), 'message') AS message,
|
||||
COUNT(DISTINCT user_id) AS users,
|
||||
COUNT(DISTINCT events.session_id) AS sessions,
|
||||
MAX(created_at) AS max_datetime,
|
||||
MIN(created_at) AS min_datetime,
|
||||
COUNT(DISTINCT error_id)
|
||||
OVER() AS total
|
||||
FROM {MAIN_EVENTS_TABLE} AS events
|
||||
INNER JOIN (SELECT session_id, coalesce(user_id,toString(user_uuid)) AS user_id
|
||||
FROM {MAIN_SESSIONS_TABLE} AS s
|
||||
{subquery_part}
|
||||
WHERE {" AND ".join(ch_sessions_sub_query)}) AS sessions
|
||||
ON (events.session_id = sessions.session_id)
|
||||
WHERE {" AND ".join(ch_sub_query)}
|
||||
GROUP BY error_id, name, message
|
||||
ORDER BY {sort} {order}
|
||||
LIMIT %(errors_limit)s OFFSET %(errors_offset)s) AS details
|
||||
INNER JOIN (SELECT error_id,
|
||||
toUnixTimestamp(MAX(created_at))*1000 AS last_occurrence,
|
||||
toUnixTimestamp(MIN(created_at))*1000 AS first_occurrence
|
||||
FROM {MAIN_EVENTS_TABLE}
|
||||
WHERE project_id=%(project_id)s
|
||||
AND `$event_name`='ERROR'
|
||||
GROUP BY error_id) AS time_details
|
||||
ON details.error_id=time_details.error_id
|
||||
INNER JOIN (SELECT error_id, groupArray([timestamp, count]) AS chart
|
||||
FROM (SELECT error_id,
|
||||
gs.generate_series AS timestamp,
|
||||
COUNT(DISTINCT session_id) AS count
|
||||
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS gs
|
||||
LEFT JOIN {MAIN_EVENTS_TABLE} ON(TRUE)
|
||||
WHERE {" AND ".join(ch_sub_query)}
|
||||
AND created_at >= toDateTime(timestamp / 1000)
|
||||
AND created_at < toDateTime((timestamp + %(step_size)s) / 1000)
|
||||
GROUP BY error_id, timestamp
|
||||
ORDER BY timestamp) AS sub_table
|
||||
GROUP BY error_id) AS chart_details ON details.error_id=chart_details.error_id;"""
|
||||
|
||||
# print("------------")
|
||||
# print(ch.format(main_ch_query, params))
|
||||
# print("------------")
|
||||
query = ch.format(query=main_ch_query, parameters=params)
|
||||
|
||||
rows = ch.execute(query=query)
|
||||
total = rows[0]["total"] if len(rows) > 0 else 0
|
||||
|
||||
for r in rows:
|
||||
r["chart"] = list(r["chart"])
|
||||
for i in range(len(r["chart"])):
|
||||
r["chart"][i] = {"timestamp": r["chart"][i][0], "count": r["chart"][i][1]}
|
||||
|
||||
return {
|
||||
'total': total,
|
||||
'errors': helper.list_to_camel_case(rows)
|
||||
}
|
||||
|
||||
|
||||
def get_trace(project_id, error_id):
|
||||
return errors_legacy.get_trace(project_id=project_id, error_id=error_id)
|
||||
|
||||
|
||||
def get_sessions(start_date, end_date, project_id, user_id, error_id):
|
||||
return errors_legacy.get_sessions(start_date=start_date,
|
||||
end_date=end_date,
|
||||
project_id=project_id,
|
||||
user_id=user_id,
|
||||
error_id=error_id)
|
||||
248
api/chalicelib/core/errors/errors_details.py
Normal file
248
api/chalicelib/core/errors/errors_details.py
Normal file
|
|
@ -0,0 +1,248 @@
|
|||
from chalicelib.core.errors.modules import errors_helper
|
||||
|
||||
from chalicelib.utils import pg_client, helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.utils.metrics_helper import get_step_size
|
||||
|
||||
|
||||
def __flatten_sort_key_count_version(data, merge_nested=False):
|
||||
if data is None:
|
||||
return []
|
||||
return sorted(
|
||||
[
|
||||
{
|
||||
"name": f'{o["name"]}@{v["version"]}',
|
||||
"count": v["count"]
|
||||
} for o in data for v in o["partition"]
|
||||
],
|
||||
key=lambda o: o["count"], reverse=True) if merge_nested else \
|
||||
[
|
||||
{
|
||||
"name": o["name"],
|
||||
"count": o["count"],
|
||||
} for o in data
|
||||
]
|
||||
|
||||
|
||||
def __process_tags(row):
|
||||
return [
|
||||
{"name": "browser", "partitions": __flatten_sort_key_count_version(data=row.get("browsers_partition"))},
|
||||
{"name": "browser.ver",
|
||||
"partitions": __flatten_sort_key_count_version(data=row.pop("browsers_partition"), merge_nested=True)},
|
||||
{"name": "OS", "partitions": __flatten_sort_key_count_version(data=row.get("os_partition"))},
|
||||
{"name": "OS.ver",
|
||||
"partitions": __flatten_sort_key_count_version(data=row.pop("os_partition"), merge_nested=True)},
|
||||
{"name": "device.family", "partitions": __flatten_sort_key_count_version(data=row.get("device_partition"))},
|
||||
{"name": "device",
|
||||
"partitions": __flatten_sort_key_count_version(data=row.pop("device_partition"), merge_nested=True)},
|
||||
{"name": "country", "partitions": row.pop("country_partition")}
|
||||
]
|
||||
|
||||
|
||||
def get_details(project_id, error_id, user_id, **data):
|
||||
pg_sub_query24 = errors_helper.__get_basic_constraints(time_constraint=False, chart=True,
|
||||
step_size_name="step_size24")
|
||||
pg_sub_query24.append("error_id = %(error_id)s")
|
||||
pg_sub_query30_session = errors_helper.__get_basic_constraints(time_constraint=True, chart=False,
|
||||
startTime_arg_name="startDate30",
|
||||
endTime_arg_name="endDate30",
|
||||
project_key="sessions.project_id")
|
||||
pg_sub_query30_session.append("sessions.start_ts >= %(startDate30)s")
|
||||
pg_sub_query30_session.append("sessions.start_ts <= %(endDate30)s")
|
||||
pg_sub_query30_session.append("error_id = %(error_id)s")
|
||||
pg_sub_query30_err = errors_helper.__get_basic_constraints(time_constraint=True, chart=False,
|
||||
startTime_arg_name="startDate30",
|
||||
endTime_arg_name="endDate30",
|
||||
project_key="errors.project_id")
|
||||
pg_sub_query30_err.append("sessions.project_id = %(project_id)s")
|
||||
pg_sub_query30_err.append("sessions.start_ts >= %(startDate30)s")
|
||||
pg_sub_query30_err.append("sessions.start_ts <= %(endDate30)s")
|
||||
pg_sub_query30_err.append("error_id = %(error_id)s")
|
||||
pg_sub_query30_err.append("source ='js_exception'")
|
||||
pg_sub_query30 = errors_helper.__get_basic_constraints(time_constraint=False, chart=True,
|
||||
step_size_name="step_size30")
|
||||
pg_sub_query30.append("error_id = %(error_id)s")
|
||||
pg_basic_query = errors_helper.__get_basic_constraints(time_constraint=False)
|
||||
pg_basic_query.append("error_id = %(error_id)s")
|
||||
with pg_client.PostgresClient() as cur:
|
||||
data["startDate24"] = TimeUTC.now(-1)
|
||||
data["endDate24"] = TimeUTC.now()
|
||||
data["startDate30"] = TimeUTC.now(-30)
|
||||
data["endDate30"] = TimeUTC.now()
|
||||
density24 = int(data.get("density24", 24))
|
||||
step_size24 = get_step_size(data["startDate24"], data["endDate24"], density24, factor=1)
|
||||
density30 = int(data.get("density30", 30))
|
||||
step_size30 = get_step_size(data["startDate30"], data["endDate30"], density30, factor=1)
|
||||
params = {
|
||||
"startDate24": data['startDate24'],
|
||||
"endDate24": data['endDate24'],
|
||||
"startDate30": data['startDate30'],
|
||||
"endDate30": data['endDate30'],
|
||||
"project_id": project_id,
|
||||
"userId": user_id,
|
||||
"step_size24": step_size24,
|
||||
"step_size30": step_size30,
|
||||
"error_id": error_id}
|
||||
|
||||
main_pg_query = f"""\
|
||||
SELECT error_id,
|
||||
name,
|
||||
message,
|
||||
users,
|
||||
sessions,
|
||||
last_occurrence,
|
||||
first_occurrence,
|
||||
last_session_id,
|
||||
browsers_partition,
|
||||
os_partition,
|
||||
device_partition,
|
||||
country_partition,
|
||||
chart24,
|
||||
chart30
|
||||
FROM (SELECT error_id,
|
||||
name,
|
||||
message,
|
||||
COUNT(DISTINCT user_id) AS users,
|
||||
COUNT(DISTINCT session_id) AS sessions
|
||||
FROM public.errors
|
||||
INNER JOIN events.errors AS s_errors USING (error_id)
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_err)}
|
||||
GROUP BY error_id, name, message) AS details
|
||||
INNER JOIN (SELECT MAX(timestamp) AS last_occurrence,
|
||||
MIN(timestamp) AS first_occurrence
|
||||
FROM events.errors
|
||||
WHERE error_id = %(error_id)s) AS time_details ON (TRUE)
|
||||
INNER JOIN (SELECT session_id AS last_session_id
|
||||
FROM events.errors
|
||||
WHERE error_id = %(error_id)s
|
||||
ORDER BY errors.timestamp DESC
|
||||
LIMIT 1) AS last_session_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(browser_details) AS browsers_partition
|
||||
FROM (SELECT *
|
||||
FROM (SELECT user_browser AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors
|
||||
INNER JOIN sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
GROUP BY user_browser
|
||||
ORDER BY count DESC) AS count_per_browser_query
|
||||
INNER JOIN LATERAL (SELECT JSONB_AGG(version_details) AS partition
|
||||
FROM (SELECT user_browser_version AS version,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
AND sessions.user_browser = count_per_browser_query.name
|
||||
GROUP BY user_browser_version
|
||||
ORDER BY count DESC) AS version_details
|
||||
) AS browser_version_details ON (TRUE)) AS browser_details) AS browser_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(os_details) AS os_partition
|
||||
FROM (SELECT *
|
||||
FROM (SELECT user_os AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
GROUP BY user_os
|
||||
ORDER BY count DESC) AS count_per_os_details
|
||||
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_version_details) AS partition
|
||||
FROM (SELECT COALESCE(user_os_version,'unknown') AS version, COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
AND sessions.user_os = count_per_os_details.name
|
||||
GROUP BY user_os_version
|
||||
ORDER BY count DESC) AS count_per_version_details
|
||||
GROUP BY count_per_os_details.name ) AS os_version_details
|
||||
ON (TRUE)) AS os_details) AS os_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(device_details) AS device_partition
|
||||
FROM (SELECT *
|
||||
FROM (SELECT user_device_type AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
GROUP BY user_device_type
|
||||
ORDER BY count DESC) AS count_per_device_details
|
||||
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_device_v_details) AS partition
|
||||
FROM (SELECT CASE
|
||||
WHEN user_device = '' OR user_device ISNULL
|
||||
THEN 'unknown'
|
||||
ELSE user_device END AS version,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
AND sessions.user_device_type = count_per_device_details.name
|
||||
GROUP BY user_device
|
||||
ORDER BY count DESC) AS count_per_device_v_details
|
||||
GROUP BY count_per_device_details.name ) AS device_version_details
|
||||
ON (TRUE)) AS device_details) AS device_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(count_per_country_details) AS country_partition
|
||||
FROM (SELECT user_country AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
GROUP BY user_country
|
||||
ORDER BY count DESC) AS count_per_country_details) AS country_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart24
|
||||
FROM (SELECT generated_timestamp AS timestamp,
|
||||
COUNT(session_id) AS count
|
||||
FROM generate_series(%(startDate24)s, %(endDate24)s, %(step_size24)s) AS generated_timestamp
|
||||
LEFT JOIN LATERAL (SELECT DISTINCT session_id
|
||||
FROM events.errors
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query24)}
|
||||
) AS chart_details ON (TRUE)
|
||||
GROUP BY generated_timestamp
|
||||
ORDER BY generated_timestamp) AS chart_details) AS chart_details24 ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart30
|
||||
FROM (SELECT generated_timestamp AS timestamp,
|
||||
COUNT(session_id) AS count
|
||||
FROM generate_series(%(startDate30)s, %(endDate30)s, %(step_size30)s) AS generated_timestamp
|
||||
LEFT JOIN LATERAL (SELECT DISTINCT session_id
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query30)}) AS chart_details
|
||||
ON (TRUE)
|
||||
GROUP BY timestamp
|
||||
ORDER BY timestamp) AS chart_details) AS chart_details30 ON (TRUE);
|
||||
"""
|
||||
|
||||
# print("--------------------")
|
||||
# print(cur.mogrify(main_pg_query, params))
|
||||
# print("--------------------")
|
||||
cur.execute(cur.mogrify(main_pg_query, params))
|
||||
row = cur.fetchone()
|
||||
if row is None:
|
||||
return {"errors": ["error not found"]}
|
||||
row["tags"] = __process_tags(row)
|
||||
|
||||
query = cur.mogrify(
|
||||
f"""SELECT error_id, status, session_id, start_ts,
|
||||
parent_error_id,session_id, user_anonymous_id,
|
||||
user_id, user_uuid, user_browser, user_browser_version,
|
||||
user_os, user_os_version, user_device, payload,
|
||||
FALSE AS favorite,
|
||||
True AS viewed
|
||||
FROM public.errors AS pe
|
||||
INNER JOIN events.errors AS ee USING (error_id)
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
WHERE pe.project_id = %(project_id)s
|
||||
AND error_id = %(error_id)s
|
||||
ORDER BY start_ts DESC
|
||||
LIMIT 1;""",
|
||||
{"project_id": project_id, "error_id": error_id, "user_id": user_id})
|
||||
cur.execute(query=query)
|
||||
status = cur.fetchone()
|
||||
|
||||
if status is not None:
|
||||
row["stack"] = errors_helper.format_first_stack_frame(status).pop("stack")
|
||||
row["status"] = status.pop("status")
|
||||
row["parent_error_id"] = status.pop("parent_error_id")
|
||||
row["favorite"] = status.pop("favorite")
|
||||
row["viewed"] = status.pop("viewed")
|
||||
row["last_hydrated_session"] = status
|
||||
else:
|
||||
row["stack"] = []
|
||||
row["last_hydrated_session"] = None
|
||||
row["status"] = "untracked"
|
||||
row["parent_error_id"] = None
|
||||
row["favorite"] = False
|
||||
row["viewed"] = False
|
||||
return {"data": helper.dict_to_camel_case(row)}
|
||||
294
api/chalicelib/core/errors/errors_pg.py
Normal file
294
api/chalicelib/core/errors/errors_pg.py
Normal file
|
|
@ -0,0 +1,294 @@
|
|||
import json
|
||||
from typing import List
|
||||
|
||||
import schemas
|
||||
from chalicelib.core.errors.modules import errors_helper
|
||||
from chalicelib.core.sessions import sessions_search
|
||||
from chalicelib.core.sourcemaps import sourcemaps
|
||||
from chalicelib.utils import pg_client, helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.utils.metrics_helper import get_step_size
|
||||
|
||||
|
||||
def get(error_id, family=False) -> dict | List[dict]:
|
||||
if family:
|
||||
return get_batch([error_id])
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
"""SELECT *
|
||||
FROM public.errors
|
||||
WHERE error_id = %(error_id)s
|
||||
LIMIT 1;""",
|
||||
{"error_id": error_id})
|
||||
cur.execute(query=query)
|
||||
result = cur.fetchone()
|
||||
if result is not None:
|
||||
result["stacktrace_parsed_at"] = TimeUTC.datetime_to_timestamp(result["stacktrace_parsed_at"])
|
||||
return helper.dict_to_camel_case(result)
|
||||
|
||||
|
||||
def get_batch(error_ids):
|
||||
if len(error_ids) == 0:
|
||||
return []
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
"""
|
||||
WITH RECURSIVE error_family AS (
|
||||
SELECT *
|
||||
FROM public.errors
|
||||
WHERE error_id IN %(error_ids)s
|
||||
UNION
|
||||
SELECT child_errors.*
|
||||
FROM public.errors AS child_errors
|
||||
INNER JOIN error_family ON error_family.error_id = child_errors.parent_error_id OR error_family.parent_error_id = child_errors.error_id
|
||||
)
|
||||
SELECT *
|
||||
FROM error_family;""",
|
||||
{"error_ids": tuple(error_ids)})
|
||||
cur.execute(query=query)
|
||||
errors = cur.fetchall()
|
||||
for e in errors:
|
||||
e["stacktrace_parsed_at"] = TimeUTC.datetime_to_timestamp(e["stacktrace_parsed_at"])
|
||||
return helper.list_to_camel_case(errors)
|
||||
|
||||
|
||||
def __get_sort_key(key):
|
||||
return {
|
||||
schemas.ErrorSort.OCCURRENCE: "max_datetime",
|
||||
schemas.ErrorSort.USERS_COUNT: "users",
|
||||
schemas.ErrorSort.SESSIONS_COUNT: "sessions"
|
||||
}.get(key, 'max_datetime')
|
||||
|
||||
|
||||
def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, user_id):
|
||||
empty_response = {
|
||||
'total': 0,
|
||||
'errors': []
|
||||
}
|
||||
|
||||
platform = None
|
||||
for f in data.filters:
|
||||
if f.type == schemas.FilterType.PLATFORM and len(f.value) > 0:
|
||||
platform = f.value[0]
|
||||
pg_sub_query = errors_helper.__get_basic_constraints(platform, project_key="sessions.project_id")
|
||||
pg_sub_query += ["sessions.start_ts>=%(startDate)s", "sessions.start_ts<%(endDate)s", "source ='js_exception'",
|
||||
"pe.project_id=%(project_id)s"]
|
||||
# To ignore Script error
|
||||
pg_sub_query.append("pe.message!='Script error.'")
|
||||
pg_sub_query_chart = errors_helper.__get_basic_constraints(platform, time_constraint=False, chart=True,
|
||||
project_key=None)
|
||||
if platform:
|
||||
pg_sub_query_chart += ["start_ts>=%(startDate)s", "start_ts<%(endDate)s", "project_id=%(project_id)s"]
|
||||
pg_sub_query_chart.append("errors.error_id =details.error_id")
|
||||
statuses = []
|
||||
error_ids = None
|
||||
if data.startTimestamp is None:
|
||||
data.startTimestamp = TimeUTC.now(-30)
|
||||
if data.endTimestamp is None:
|
||||
data.endTimestamp = TimeUTC.now(1)
|
||||
if len(data.events) > 0 or len(data.filters) > 0:
|
||||
print("-- searching for sessions before errors")
|
||||
statuses = sessions_search.search_sessions(data=data, project=project, user_id=user_id, errors_only=True,
|
||||
error_status=data.status)
|
||||
if len(statuses) == 0:
|
||||
return empty_response
|
||||
error_ids = [e["errorId"] for e in statuses]
|
||||
with pg_client.PostgresClient() as cur:
|
||||
step_size = get_step_size(data.startTimestamp, data.endTimestamp, data.density, factor=1)
|
||||
sort = __get_sort_key('datetime')
|
||||
if data.sort is not None:
|
||||
sort = __get_sort_key(data.sort)
|
||||
order = schemas.SortOrderType.DESC
|
||||
if data.order is not None:
|
||||
order = data.order
|
||||
extra_join = ""
|
||||
|
||||
params = {
|
||||
"startDate": data.startTimestamp,
|
||||
"endDate": data.endTimestamp,
|
||||
"project_id": project.project_id,
|
||||
"userId": user_id,
|
||||
"step_size": step_size}
|
||||
if data.status != schemas.ErrorStatus.ALL:
|
||||
pg_sub_query.append("status = %(error_status)s")
|
||||
params["error_status"] = data.status
|
||||
if data.limit is not None and data.page is not None:
|
||||
params["errors_offset"] = (data.page - 1) * data.limit
|
||||
params["errors_limit"] = data.limit
|
||||
else:
|
||||
params["errors_offset"] = 0
|
||||
params["errors_limit"] = 200
|
||||
|
||||
if error_ids is not None:
|
||||
params["error_ids"] = tuple(error_ids)
|
||||
pg_sub_query.append("error_id IN %(error_ids)s")
|
||||
# if data.bookmarked:
|
||||
# pg_sub_query.append("ufe.user_id = %(userId)s")
|
||||
# extra_join += " INNER JOIN public.user_favorite_errors AS ufe USING (error_id)"
|
||||
if data.query is not None and len(data.query) > 0:
|
||||
pg_sub_query.append("(pe.name ILIKE %(error_query)s OR pe.message ILIKE %(error_query)s)")
|
||||
params["error_query"] = helper.values_for_operator(value=data.query,
|
||||
op=schemas.SearchEventOperator.CONTAINS)
|
||||
|
||||
main_pg_query = f"""SELECT full_count,
|
||||
error_id,
|
||||
name,
|
||||
message,
|
||||
users,
|
||||
sessions,
|
||||
last_occurrence,
|
||||
first_occurrence,
|
||||
chart
|
||||
FROM (SELECT COUNT(details) OVER () AS full_count, details.*
|
||||
FROM (SELECT error_id,
|
||||
name,
|
||||
message,
|
||||
COUNT(DISTINCT COALESCE(user_id,user_uuid::text)) AS users,
|
||||
COUNT(DISTINCT session_id) AS sessions,
|
||||
MAX(timestamp) AS max_datetime,
|
||||
MIN(timestamp) AS min_datetime
|
||||
FROM events.errors
|
||||
INNER JOIN public.errors AS pe USING (error_id)
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
{extra_join}
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
GROUP BY error_id, name, message
|
||||
ORDER BY {sort} {order}) AS details
|
||||
LIMIT %(errors_limit)s OFFSET %(errors_offset)s
|
||||
) AS details
|
||||
INNER JOIN LATERAL (SELECT MAX(timestamp) AS last_occurrence,
|
||||
MIN(timestamp) AS first_occurrence
|
||||
FROM events.errors
|
||||
WHERE errors.error_id = details.error_id) AS time_details ON (TRUE)
|
||||
INNER JOIN LATERAL (SELECT jsonb_agg(chart_details) AS chart
|
||||
FROM (SELECT generated_timestamp AS timestamp,
|
||||
COUNT(session_id) AS count
|
||||
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp
|
||||
LEFT JOIN LATERAL (SELECT DISTINCT session_id
|
||||
FROM events.errors
|
||||
{"INNER JOIN public.sessions USING(session_id)" if platform else ""}
|
||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||
) AS sessions ON (TRUE)
|
||||
GROUP BY timestamp
|
||||
ORDER BY timestamp) AS chart_details) AS chart_details ON (TRUE);"""
|
||||
|
||||
# print("--------------------")
|
||||
# print(cur.mogrify(main_pg_query, params))
|
||||
# print("--------------------")
|
||||
|
||||
cur.execute(cur.mogrify(main_pg_query, params))
|
||||
rows = cur.fetchall()
|
||||
total = 0 if len(rows) == 0 else rows[0]["full_count"]
|
||||
|
||||
if total == 0:
|
||||
rows = []
|
||||
else:
|
||||
if len(statuses) == 0:
|
||||
query = cur.mogrify(
|
||||
"""SELECT error_id
|
||||
FROM public.errors
|
||||
WHERE project_id = %(project_id)s AND error_id IN %(error_ids)s;""",
|
||||
{"project_id": project.project_id, "error_ids": tuple([r["error_id"] for r in rows]),
|
||||
"user_id": user_id})
|
||||
cur.execute(query=query)
|
||||
statuses = helper.list_to_camel_case(cur.fetchall())
|
||||
statuses = {
|
||||
s["errorId"]: s for s in statuses
|
||||
}
|
||||
|
||||
for r in rows:
|
||||
r.pop("full_count")
|
||||
|
||||
return {
|
||||
'total': total,
|
||||
'errors': helper.list_to_camel_case(rows)
|
||||
}
|
||||
|
||||
|
||||
def __save_stacktrace(error_id, data):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
"""UPDATE public.errors
|
||||
SET stacktrace=%(data)s::jsonb, stacktrace_parsed_at=timezone('utc'::text, now())
|
||||
WHERE error_id = %(error_id)s;""",
|
||||
{"error_id": error_id, "data": json.dumps(data)})
|
||||
cur.execute(query=query)
|
||||
|
||||
|
||||
def get_trace(project_id, error_id):
|
||||
error = get(error_id=error_id, family=False)
|
||||
if error is None:
|
||||
return {"errors": ["error not found"]}
|
||||
if error.get("source", "") != "js_exception":
|
||||
return {"errors": ["this source of errors doesn't have a sourcemap"]}
|
||||
if error.get("payload") is None:
|
||||
return {"errors": ["null payload"]}
|
||||
if error.get("stacktrace") is not None:
|
||||
return {"sourcemapUploaded": True,
|
||||
"trace": error.get("stacktrace"),
|
||||
"preparsed": True}
|
||||
trace, all_exists = sourcemaps.get_traces_group(project_id=project_id, payload=error["payload"])
|
||||
if all_exists:
|
||||
__save_stacktrace(error_id=error_id, data=trace)
|
||||
return {"sourcemapUploaded": all_exists,
|
||||
"trace": trace,
|
||||
"preparsed": False}
|
||||
|
||||
|
||||
def get_sessions(start_date, end_date, project_id, user_id, error_id):
|
||||
extra_constraints = ["s.project_id = %(project_id)s",
|
||||
"s.start_ts >= %(startDate)s",
|
||||
"s.start_ts <= %(endDate)s",
|
||||
"e.error_id = %(error_id)s"]
|
||||
if start_date is None:
|
||||
start_date = TimeUTC.now(-7)
|
||||
if end_date is None:
|
||||
end_date = TimeUTC.now()
|
||||
|
||||
params = {
|
||||
"startDate": start_date,
|
||||
"endDate": end_date,
|
||||
"project_id": project_id,
|
||||
"userId": user_id,
|
||||
"error_id": error_id}
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
f"""SELECT s.project_id,
|
||||
s.session_id::text AS session_id,
|
||||
s.user_uuid,
|
||||
s.user_id,
|
||||
s.user_agent,
|
||||
s.user_os,
|
||||
s.user_browser,
|
||||
s.user_device,
|
||||
s.user_country,
|
||||
s.start_ts,
|
||||
s.duration,
|
||||
s.events_count,
|
||||
s.pages_count,
|
||||
s.errors_count,
|
||||
s.issue_types,
|
||||
COALESCE((SELECT TRUE
|
||||
FROM public.user_favorite_sessions AS fs
|
||||
WHERE s.session_id = fs.session_id
|
||||
AND fs.user_id = %(userId)s LIMIT 1), FALSE) AS favorite,
|
||||
COALESCE((SELECT TRUE
|
||||
FROM public.user_viewed_sessions AS fs
|
||||
WHERE s.session_id = fs.session_id
|
||||
AND fs.user_id = %(userId)s LIMIT 1), FALSE) AS viewed
|
||||
FROM public.sessions AS s INNER JOIN events.errors AS e USING (session_id)
|
||||
WHERE {" AND ".join(extra_constraints)}
|
||||
ORDER BY s.start_ts DESC;""",
|
||||
params)
|
||||
cur.execute(query=query)
|
||||
sessions_list = []
|
||||
total = cur.rowcount
|
||||
row = cur.fetchone()
|
||||
while row is not None and len(sessions_list) < 100:
|
||||
sessions_list.append(row)
|
||||
row = cur.fetchone()
|
||||
|
||||
return {
|
||||
'total': total,
|
||||
'sessions': helper.list_to_camel_case(sessions_list)
|
||||
}
|
||||
11
api/chalicelib/core/errors/modules/__init__.py
Normal file
11
api/chalicelib/core/errors/modules/__init__.py
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
import logging
|
||||
|
||||
from decouple import config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
from . import helper as errors_helper
|
||||
|
||||
if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
|
||||
import chalicelib.core.sessions.sessions_ch as sessions
|
||||
else:
|
||||
import chalicelib.core.sessions.sessions_pg as sessions
|
||||
58
api/chalicelib/core/errors/modules/helper.py
Normal file
58
api/chalicelib/core/errors/modules/helper.py
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
from typing import Optional
|
||||
|
||||
import schemas
|
||||
from chalicelib.core.sourcemaps import sourcemaps
|
||||
|
||||
|
||||
def __get_basic_constraints(platform: Optional[schemas.PlatformType] = None, time_constraint: bool = True,
|
||||
startTime_arg_name: str = "startDate", endTime_arg_name: str = "endDate",
|
||||
chart: bool = False, step_size_name: str = "step_size",
|
||||
project_key: Optional[str] = "project_id"):
|
||||
if project_key is None:
|
||||
ch_sub_query = []
|
||||
else:
|
||||
ch_sub_query = [f"{project_key} =%(project_id)s"]
|
||||
if time_constraint:
|
||||
ch_sub_query += [f"timestamp >= %({startTime_arg_name})s",
|
||||
f"timestamp < %({endTime_arg_name})s"]
|
||||
if chart:
|
||||
ch_sub_query += [f"timestamp >= generated_timestamp",
|
||||
f"timestamp < generated_timestamp + %({step_size_name})s"]
|
||||
if platform == schemas.PlatformType.MOBILE:
|
||||
ch_sub_query.append("user_device_type = 'mobile'")
|
||||
elif platform == schemas.PlatformType.DESKTOP:
|
||||
ch_sub_query.append("user_device_type = 'desktop'")
|
||||
return ch_sub_query
|
||||
|
||||
|
||||
def __get_basic_constraints_ch(platform=None, time_constraint=True, startTime_arg_name="startDate",
|
||||
endTime_arg_name="endDate", type_condition=True, project_key="project_id",
|
||||
table_name=None):
|
||||
ch_sub_query = [f"{project_key} =toUInt16(%(project_id)s)"]
|
||||
if table_name is not None:
|
||||
table_name = table_name + "."
|
||||
else:
|
||||
table_name = ""
|
||||
if type_condition:
|
||||
ch_sub_query.append(f"{table_name}`$event_name`='ERROR'")
|
||||
if time_constraint:
|
||||
ch_sub_query += [f"{table_name}datetime >= toDateTime(%({startTime_arg_name})s/1000)",
|
||||
f"{table_name}datetime < toDateTime(%({endTime_arg_name})s/1000)"]
|
||||
if platform == schemas.PlatformType.MOBILE:
|
||||
ch_sub_query.append("user_device_type = 'mobile'")
|
||||
elif platform == schemas.PlatformType.DESKTOP:
|
||||
ch_sub_query.append("user_device_type = 'desktop'")
|
||||
return ch_sub_query
|
||||
|
||||
|
||||
def format_first_stack_frame(error):
|
||||
error["stack"] = sourcemaps.format_payload(error.pop("payload"), truncate_to_first=True)
|
||||
for s in error["stack"]:
|
||||
for c in s.get("context", []):
|
||||
for sci, sc in enumerate(c):
|
||||
if isinstance(sc, str) and len(sc) > 1000:
|
||||
c[sci] = sc[:1000]
|
||||
# convert bytes to string:
|
||||
if isinstance(s["filename"], bytes):
|
||||
s["filename"] = s["filename"].decode("utf-8")
|
||||
return error
|
||||
|
|
@ -1,48 +0,0 @@
|
|||
from chalicelib.utils import pg_client
|
||||
|
||||
|
||||
def add_favorite_error(project_id, user_id, error_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(f"""INSERT INTO public.user_favorite_errors(user_id, error_id)
|
||||
VALUES (%(userId)s,%(error_id)s);""",
|
||||
{"userId": user_id, "error_id": error_id})
|
||||
)
|
||||
return {"errorId": error_id, "favorite": True}
|
||||
|
||||
|
||||
def remove_favorite_error(project_id, user_id, error_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(f"""DELETE FROM public.user_favorite_errors
|
||||
WHERE
|
||||
user_id = %(userId)s
|
||||
AND error_id = %(error_id)s;""",
|
||||
{"userId": user_id, "error_id": error_id})
|
||||
)
|
||||
return {"errorId": error_id, "favorite": False}
|
||||
|
||||
|
||||
def favorite_error(project_id, user_id, error_id):
|
||||
exists, favorite = error_exists_and_favorite(user_id=user_id, error_id=error_id)
|
||||
if not exists:
|
||||
return {"errors": ["cannot bookmark non-rehydrated errors"]}
|
||||
if favorite:
|
||||
return remove_favorite_error(project_id=project_id, user_id=user_id, error_id=error_id)
|
||||
return add_favorite_error(project_id=project_id, user_id=user_id, error_id=error_id)
|
||||
|
||||
|
||||
def error_exists_and_favorite(user_id, error_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(
|
||||
"""SELECT errors.error_id AS exists, ufe.error_id AS favorite
|
||||
FROM public.errors
|
||||
LEFT JOIN (SELECT error_id FROM public.user_favorite_errors WHERE user_id = %(userId)s) AS ufe USING (error_id)
|
||||
WHERE error_id = %(error_id)s;""",
|
||||
{"userId": user_id, "error_id": error_id})
|
||||
)
|
||||
r = cur.fetchone()
|
||||
if r is None:
|
||||
return False, False
|
||||
return True, r.get("favorite") is not None
|
||||
|
|
@ -1,37 +0,0 @@
|
|||
from chalicelib.utils import pg_client
|
||||
|
||||
|
||||
def add_viewed_error(project_id, user_id, error_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""INSERT INTO public.user_viewed_errors(user_id, error_id)
|
||||
VALUES (%(userId)s,%(error_id)s);""",
|
||||
{"userId": user_id, "error_id": error_id})
|
||||
)
|
||||
|
||||
|
||||
def viewed_error_exists(user_id, error_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
"""SELECT
|
||||
errors.error_id AS hydrated,
|
||||
COALESCE((SELECT TRUE
|
||||
FROM public.user_viewed_errors AS ve
|
||||
WHERE ve.error_id = %(error_id)s
|
||||
AND ve.user_id = %(userId)s LIMIT 1), FALSE) AS viewed
|
||||
FROM public.errors
|
||||
WHERE error_id = %(error_id)s""",
|
||||
{"userId": user_id, "error_id": error_id})
|
||||
cur.execute(
|
||||
query=query
|
||||
)
|
||||
r = cur.fetchone()
|
||||
if r:
|
||||
return r.get("viewed")
|
||||
return True
|
||||
|
||||
|
||||
def viewed_error(project_id, user_id, error_id):
|
||||
if viewed_error_exists(user_id=user_id, error_id=error_id):
|
||||
return None
|
||||
return add_viewed_error(project_id=project_id, user_id=user_id, error_id=error_id)
|
||||
|
|
@ -1,9 +1,10 @@
|
|||
from functools import cache
|
||||
from typing import Optional
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import autocomplete
|
||||
from chalicelib.core import issues
|
||||
from chalicelib.core import sessions_metas
|
||||
from chalicelib.core.autocomplete import autocomplete
|
||||
from chalicelib.core.sessions import sessions_metas
|
||||
from chalicelib.utils import pg_client, helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.utils.event_filter_definition import SupportedFilter, Event
|
||||
|
|
@ -102,7 +103,7 @@ def _search_tags(project_id, value, key=None, source=None):
|
|||
with pg_client.PostgresClient() as cur:
|
||||
query = f"""
|
||||
SELECT public.tags.name
|
||||
'{events.EventType.TAG.ui_type}' AS type
|
||||
'TAG' AS type
|
||||
FROM public.tags
|
||||
WHERE public.tags.project_id = %(project_id)s
|
||||
ORDER BY SIMILARITY(public.tags.name, %(value)s) DESC
|
||||
|
|
@ -137,52 +138,57 @@ class EventType:
|
|||
column=None) # column=None because errors are searched by name or message
|
||||
|
||||
|
||||
SUPPORTED_TYPES = {
|
||||
EventType.CLICK.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK),
|
||||
query=autocomplete.__generic_query(typename=EventType.CLICK.ui_type)),
|
||||
EventType.INPUT.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT),
|
||||
query=autocomplete.__generic_query(typename=EventType.INPUT.ui_type)),
|
||||
EventType.LOCATION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.LOCATION),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.LOCATION.ui_type)),
|
||||
EventType.CUSTOM.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CUSTOM),
|
||||
query=autocomplete.__generic_query(typename=EventType.CUSTOM.ui_type)),
|
||||
EventType.REQUEST.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.REQUEST),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.REQUEST.ui_type)),
|
||||
EventType.GRAPHQL.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.GRAPHQL),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.GRAPHQL.ui_type)),
|
||||
EventType.STATEACTION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.STATEACTION),
|
||||
@cache
|
||||
def supported_types():
|
||||
return {
|
||||
EventType.CLICK.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK),
|
||||
query=autocomplete.__generic_query(typename=EventType.CLICK.ui_type)),
|
||||
EventType.INPUT.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT),
|
||||
query=autocomplete.__generic_query(typename=EventType.INPUT.ui_type)),
|
||||
EventType.LOCATION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.LOCATION),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.LOCATION.ui_type)),
|
||||
EventType.CUSTOM.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CUSTOM),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.CUSTOM.ui_type)),
|
||||
EventType.REQUEST.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.REQUEST),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.STATEACTION.ui_type)),
|
||||
EventType.TAG.ui_type: SupportedFilter(get=_search_tags, query=None),
|
||||
EventType.ERROR.ui_type: SupportedFilter(get=autocomplete.__search_errors,
|
||||
query=None),
|
||||
EventType.METADATA.ui_type: SupportedFilter(get=autocomplete.__search_metadata,
|
||||
query=None),
|
||||
# MOBILE
|
||||
EventType.CLICK_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.CLICK_MOBILE.ui_type)),
|
||||
EventType.SWIPE_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.SWIPE_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.SWIPE_MOBILE.ui_type)),
|
||||
EventType.INPUT_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.INPUT_MOBILE.ui_type)),
|
||||
EventType.VIEW_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.VIEW_MOBILE),
|
||||
typename=EventType.REQUEST.ui_type)),
|
||||
EventType.GRAPHQL.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.GRAPHQL),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.VIEW_MOBILE.ui_type)),
|
||||
EventType.CUSTOM_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CUSTOM_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.CUSTOM_MOBILE.ui_type)),
|
||||
EventType.REQUEST_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.REQUEST_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.REQUEST_MOBILE.ui_type)),
|
||||
EventType.CRASH_MOBILE.ui_type: SupportedFilter(get=autocomplete.__search_errors_mobile,
|
||||
typename=EventType.GRAPHQL.ui_type)),
|
||||
EventType.STATEACTION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.STATEACTION),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.STATEACTION.ui_type)),
|
||||
EventType.TAG.ui_type: SupportedFilter(get=_search_tags, query=None),
|
||||
EventType.ERROR.ui_type: SupportedFilter(get=autocomplete.__search_errors,
|
||||
query=None),
|
||||
EventType.METADATA.ui_type: SupportedFilter(get=autocomplete.__search_metadata,
|
||||
query=None),
|
||||
}
|
||||
# MOBILE
|
||||
EventType.CLICK_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.CLICK_MOBILE.ui_type)),
|
||||
EventType.SWIPE_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.SWIPE_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.SWIPE_MOBILE.ui_type)),
|
||||
EventType.INPUT_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.INPUT_MOBILE.ui_type)),
|
||||
EventType.VIEW_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.VIEW_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.VIEW_MOBILE.ui_type)),
|
||||
EventType.CUSTOM_MOBILE.ui_type: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete(EventType.CUSTOM_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.CUSTOM_MOBILE.ui_type)),
|
||||
EventType.REQUEST_MOBILE.ui_type: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete(EventType.REQUEST_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.REQUEST_MOBILE.ui_type)),
|
||||
EventType.CRASH_MOBILE.ui_type: SupportedFilter(get=autocomplete.__search_errors_mobile,
|
||||
query=None),
|
||||
}
|
||||
|
||||
|
||||
def get_errors_by_session_id(session_id, project_id):
|
||||
|
|
@ -202,20 +208,17 @@ def search(text, event_type, project_id, source, key):
|
|||
if not event_type:
|
||||
return {"data": autocomplete.__get_autocomplete_table(text, project_id)}
|
||||
|
||||
if event_type in SUPPORTED_TYPES.keys():
|
||||
rows = SUPPORTED_TYPES[event_type].get(project_id=project_id, value=text, key=key, source=source)
|
||||
# for IOS events autocomplete
|
||||
# if event_type + "_IOS" in SUPPORTED_TYPES.keys():
|
||||
# rows += SUPPORTED_TYPES[event_type + "_IOS"].get(project_id=project_id, value=text, key=key,source=source)
|
||||
elif event_type + "_MOBILE" in SUPPORTED_TYPES.keys():
|
||||
rows = SUPPORTED_TYPES[event_type + "_MOBILE"].get(project_id=project_id, value=text, key=key, source=source)
|
||||
elif event_type in sessions_metas.SUPPORTED_TYPES.keys():
|
||||
if event_type in supported_types().keys():
|
||||
rows = supported_types()[event_type].get(project_id=project_id, value=text, key=key, source=source)
|
||||
elif event_type + "_MOBILE" in supported_types().keys():
|
||||
rows = supported_types()[event_type + "_MOBILE"].get(project_id=project_id, value=text, key=key, source=source)
|
||||
elif event_type in sessions_metas.supported_types().keys():
|
||||
return sessions_metas.search(text, event_type, project_id)
|
||||
elif event_type.endswith("_IOS") \
|
||||
and event_type[:-len("_IOS")] in sessions_metas.SUPPORTED_TYPES.keys():
|
||||
and event_type[:-len("_IOS")] in sessions_metas.supported_types().keys():
|
||||
return sessions_metas.search(text, event_type, project_id)
|
||||
elif event_type.endswith("_MOBILE") \
|
||||
and event_type[:-len("_MOBILE")] in sessions_metas.SUPPORTED_TYPES.keys():
|
||||
and event_type[:-len("_MOBILE")] in sessions_metas.supported_types().keys():
|
||||
return sessions_metas.search(text, event_type, project_id)
|
||||
else:
|
||||
return {"errors": ["unsupported event"]}
|
||||
|
|
|
|||
|
|
@ -1,11 +1,14 @@
|
|||
import json
|
||||
import logging
|
||||
from typing import Any, List, Dict, Optional
|
||||
|
||||
import schemas
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from typing import Any, List, Dict, Optional
|
||||
from fastapi import HTTPException, status
|
||||
import json
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
feature_flag_columns = (
|
||||
"feature_flag_id",
|
||||
|
|
@ -299,7 +302,8 @@ def create_conditions(feature_flag_id: int, conditions: List[schemas.FeatureFlag
|
|||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
params = [
|
||||
(feature_flag_id, c.name, c.rollout_percentage, json.dumps([filter_.model_dump() for filter_ in c.filters]))
|
||||
(feature_flag_id, c.name, c.rollout_percentage,
|
||||
json.dumps([filter_.model_dump() for filter_ in c.filters]))
|
||||
for c in conditions]
|
||||
query = cur.mogrify(sql, params)
|
||||
cur.execute(query)
|
||||
|
|
@ -455,7 +459,8 @@ def create_variants(feature_flag_id: int, variants: List[schemas.FeatureFlagVari
|
|||
"""
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
params = [(feature_flag_id, v.value, v.description, json.dumps(v.payload), v.rollout_percentage) for v in variants]
|
||||
params = [(feature_flag_id, v.value, v.description, json.dumps(v.payload), v.rollout_percentage) for v in
|
||||
variants]
|
||||
query = cur.mogrify(sql, params)
|
||||
cur.execute(query)
|
||||
rows = cur.fetchall()
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
from urllib.parse import urlparse
|
||||
import logging
|
||||
|
||||
import redis
|
||||
import requests
|
||||
|
|
@ -7,11 +7,13 @@ from decouple import config
|
|||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def app_connection_string(name, port, path):
|
||||
namespace = config("POD_NAMESPACE", default="app")
|
||||
conn_string = config("CLUSTER_URL", default="svc.cluster.local")
|
||||
return f"http://{'.'.join(filter(None, [name, namespace, conn_string]))}:{port}/{path}"
|
||||
return f"http://{name}.{namespace}.{conn_string}:{port}/{path}"
|
||||
|
||||
|
||||
HEALTH_ENDPOINTS = {
|
||||
|
|
@ -25,7 +27,6 @@ HEALTH_ENDPOINTS = {
|
|||
"http": app_connection_string("http-openreplay", 8888, "metrics"),
|
||||
"ingress-nginx": app_connection_string("ingress-nginx-openreplay", 80, "healthz"),
|
||||
"integrations": app_connection_string("integrations-openreplay", 8888, "metrics"),
|
||||
"peers": app_connection_string("peers-openreplay", 8888, "health"),
|
||||
"sink": app_connection_string("sink-openreplay", 8888, "metrics"),
|
||||
"sourcemapreader": app_connection_string(
|
||||
"sourcemapreader-openreplay", 8888, "health"
|
||||
|
|
@ -37,78 +38,66 @@ HEALTH_ENDPOINTS = {
|
|||
def __check_database_pg(*_):
|
||||
fail_response = {
|
||||
"health": False,
|
||||
"details": {
|
||||
"errors": ["Postgres health-check failed"]
|
||||
}
|
||||
"details": {"errors": ["Postgres health-check failed"]},
|
||||
}
|
||||
with pg_client.PostgresClient() as cur:
|
||||
try:
|
||||
cur.execute("SHOW server_version;")
|
||||
server_version = cur.fetchone()
|
||||
# server_version = cur.fetchone()
|
||||
except Exception as e:
|
||||
print("!! health failed: postgres not responding")
|
||||
print(str(e))
|
||||
logger.error("!! health failed: postgres not responding")
|
||||
logger.exception(e)
|
||||
return fail_response
|
||||
try:
|
||||
cur.execute("SELECT openreplay_version() AS version;")
|
||||
schema_version = cur.fetchone()
|
||||
# schema_version = cur.fetchone()
|
||||
except Exception as e:
|
||||
print("!! health failed: openreplay_version not defined")
|
||||
print(str(e))
|
||||
logger.error("!! health failed: openreplay_version not defined")
|
||||
logger.exception(e)
|
||||
return fail_response
|
||||
return {
|
||||
"health": True,
|
||||
"details": {
|
||||
# "version": server_version["server_version"],
|
||||
# "schema": schema_version["version"]
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def __not_supported(*_):
|
||||
return {"errors": ["not supported"]}
|
||||
|
||||
|
||||
def __always_healthy(*_):
|
||||
return {
|
||||
"health": True,
|
||||
"details": {}
|
||||
}
|
||||
return {"health": True, "details": {}}
|
||||
|
||||
|
||||
def __check_be_service(service_name):
|
||||
def fn(*_):
|
||||
fail_response = {
|
||||
"health": False,
|
||||
"details": {
|
||||
"errors": ["server health-check failed"]
|
||||
}
|
||||
"details": {"errors": ["server health-check failed"]},
|
||||
}
|
||||
try:
|
||||
results = requests.get(HEALTH_ENDPOINTS.get(service_name), timeout=2)
|
||||
if results.status_code != 200:
|
||||
print(f"!! issue with the {service_name}-health code:{results.status_code}")
|
||||
print(results.text)
|
||||
logger.error(
|
||||
f"!! issue with the {service_name}-health code:{results.status_code}"
|
||||
)
|
||||
logger.error(results.text)
|
||||
# fail_response["details"]["errors"].append(results.text)
|
||||
return fail_response
|
||||
except requests.exceptions.Timeout:
|
||||
print(f"!! Timeout getting {service_name}-health")
|
||||
logger.error(f"!! Timeout getting {service_name}-health")
|
||||
# fail_response["details"]["errors"].append("timeout")
|
||||
return fail_response
|
||||
except Exception as e:
|
||||
print(f"!! Issue getting {service_name}-health response")
|
||||
print(str(e))
|
||||
logger.error(f"!! Issue getting {service_name}-health response")
|
||||
logger.exception(e)
|
||||
try:
|
||||
print(results.text)
|
||||
logger.error(results.text)
|
||||
# fail_response["details"]["errors"].append(results.text)
|
||||
except Exception:
|
||||
print("couldn't get response")
|
||||
logger.error("couldn't get response")
|
||||
# fail_response["details"]["errors"].append(str(e))
|
||||
return fail_response
|
||||
return {
|
||||
"health": True,
|
||||
"details": {}
|
||||
}
|
||||
return {"health": True, "details": {}}
|
||||
|
||||
return fn
|
||||
|
||||
|
|
@ -116,18 +105,18 @@ def __check_be_service(service_name):
|
|||
def __check_redis(*_):
|
||||
fail_response = {
|
||||
"health": False,
|
||||
"details": {"errors": ["server health-check failed"]}
|
||||
"details": {"errors": ["server health-check failed"]},
|
||||
}
|
||||
if config("REDIS_STRING", default=None) is None:
|
||||
# fail_response["details"]["errors"].append("REDIS_STRING not defined in env-vars")
|
||||
return fail_response
|
||||
|
||||
try:
|
||||
r = redis.from_url(config("REDIS_STRING"))
|
||||
r = redis.from_url(config("REDIS_STRING"), socket_timeout=2)
|
||||
r.ping()
|
||||
except Exception as e:
|
||||
print("!! Issue getting redis-health response")
|
||||
print(str(e))
|
||||
logger.error("!! Issue getting redis-health response")
|
||||
logger.exception(e)
|
||||
# fail_response["details"]["errors"].append(str(e))
|
||||
return fail_response
|
||||
|
||||
|
|
@ -135,53 +124,43 @@ def __check_redis(*_):
|
|||
"health": True,
|
||||
"details": {
|
||||
# "version": r.execute_command('INFO')['redis_version']
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def __check_SSL(*_):
|
||||
fail_response = {
|
||||
"health": False,
|
||||
"details": {
|
||||
"errors": ["SSL Certificate health-check failed"]
|
||||
}
|
||||
"details": {"errors": ["SSL Certificate health-check failed"]},
|
||||
}
|
||||
try:
|
||||
requests.get(config("SITE_URL"), verify=True, allow_redirects=True)
|
||||
except Exception as e:
|
||||
print("!! health failed: SSL Certificate")
|
||||
print(str(e))
|
||||
logger.error("!! health failed: SSL Certificate")
|
||||
logger.exception(e)
|
||||
return fail_response
|
||||
return {
|
||||
"health": True,
|
||||
"details": {}
|
||||
}
|
||||
return {"health": True, "details": {}}
|
||||
|
||||
|
||||
def __get_sessions_stats(*_):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
constraints = ["projects.deleted_at IS NULL"]
|
||||
query = cur.mogrify(f"""SELECT COALESCE(SUM(sessions_count),0) AS s_c,
|
||||
query = cur.mogrify(
|
||||
f"""SELECT COALESCE(SUM(sessions_count),0) AS s_c,
|
||||
COALESCE(SUM(events_count),0) AS e_c
|
||||
FROM public.projects_stats
|
||||
INNER JOIN public.projects USING(project_id)
|
||||
WHERE {" AND ".join(constraints)};""")
|
||||
WHERE {" AND ".join(constraints)};"""
|
||||
)
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
return {
|
||||
"numberOfSessionsCaptured": row["s_c"],
|
||||
"numberOfEventCaptured": row["e_c"]
|
||||
}
|
||||
return {"numberOfSessionsCaptured": row["s_c"], "numberOfEventCaptured": row["e_c"]}
|
||||
|
||||
|
||||
def get_health(tenant_id=None):
|
||||
health_map = {
|
||||
"databases": {
|
||||
"postgres": __check_database_pg
|
||||
},
|
||||
"ingestionPipeline": {
|
||||
"redis": __check_redis
|
||||
},
|
||||
"databases": {"postgres": __check_database_pg},
|
||||
"ingestionPipeline": {"redis": __check_redis},
|
||||
"backendServices": {
|
||||
"alerts": __check_be_service("alerts"),
|
||||
"assets": __check_be_service("assets"),
|
||||
|
|
@ -194,13 +173,12 @@ def get_health(tenant_id=None):
|
|||
"http": __check_be_service("http"),
|
||||
"ingress-nginx": __always_healthy,
|
||||
"integrations": __check_be_service("integrations"),
|
||||
"peers": __check_be_service("peers"),
|
||||
"sink": __check_be_service("sink"),
|
||||
"sourcemapreader": __check_be_service("sourcemapreader"),
|
||||
"storage": __check_be_service("storage")
|
||||
"storage": __check_be_service("storage"),
|
||||
},
|
||||
"details": __get_sessions_stats,
|
||||
"ssl": __check_SSL
|
||||
"ssl": __check_SSL,
|
||||
}
|
||||
return __process_health(health_map=health_map)
|
||||
|
||||
|
|
@ -212,10 +190,16 @@ def __process_health(health_map):
|
|||
response.pop(parent_key)
|
||||
elif isinstance(health_map[parent_key], dict):
|
||||
for element_key in health_map[parent_key]:
|
||||
if config(f"SKIP_H_{parent_key.upper()}_{element_key.upper()}", cast=bool, default=False):
|
||||
if config(
|
||||
f"SKIP_H_{parent_key.upper()}_{element_key.upper()}",
|
||||
cast=bool,
|
||||
default=False,
|
||||
):
|
||||
response[parent_key].pop(element_key)
|
||||
else:
|
||||
response[parent_key][element_key] = health_map[parent_key][element_key]()
|
||||
response[parent_key][element_key] = health_map[parent_key][
|
||||
element_key
|
||||
]()
|
||||
else:
|
||||
response[parent_key] = health_map[parent_key]()
|
||||
return response
|
||||
|
|
@ -223,7 +207,8 @@ def __process_health(health_map):
|
|||
|
||||
def cron():
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify("""SELECT projects.project_id,
|
||||
query = cur.mogrify(
|
||||
"""SELECT projects.project_id,
|
||||
projects.created_at,
|
||||
projects.sessions_last_check_at,
|
||||
projects.first_recorded_session_at,
|
||||
|
|
@ -231,7 +216,8 @@ def cron():
|
|||
FROM public.projects
|
||||
LEFT JOIN public.projects_stats USING (project_id)
|
||||
WHERE projects.deleted_at IS NULL
|
||||
ORDER BY project_id;""")
|
||||
ORDER BY project_id;"""
|
||||
)
|
||||
cur.execute(query)
|
||||
rows = cur.fetchall()
|
||||
for r in rows:
|
||||
|
|
@ -252,20 +238,24 @@ def cron():
|
|||
count_start_from = r["last_update_at"]
|
||||
|
||||
count_start_from = TimeUTC.datetime_to_timestamp(count_start_from)
|
||||
params = {"project_id": r["project_id"],
|
||||
"start_ts": count_start_from,
|
||||
"end_ts": TimeUTC.now(),
|
||||
"sessions_count": 0,
|
||||
"events_count": 0}
|
||||
params = {
|
||||
"project_id": r["project_id"],
|
||||
"start_ts": count_start_from,
|
||||
"end_ts": TimeUTC.now(),
|
||||
"sessions_count": 0,
|
||||
"events_count": 0,
|
||||
}
|
||||
|
||||
query = cur.mogrify("""SELECT COUNT(1) AS sessions_count,
|
||||
query = cur.mogrify(
|
||||
"""SELECT COUNT(1) AS sessions_count,
|
||||
COALESCE(SUM(events_count),0) AS events_count
|
||||
FROM public.sessions
|
||||
WHERE project_id=%(project_id)s
|
||||
AND start_ts>=%(start_ts)s
|
||||
AND start_ts<=%(end_ts)s
|
||||
AND duration IS NOT NULL;""",
|
||||
params)
|
||||
params,
|
||||
)
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
if row is not None:
|
||||
|
|
@ -273,56 +263,68 @@ def cron():
|
|||
params["events_count"] = row["events_count"]
|
||||
|
||||
if insert:
|
||||
query = cur.mogrify("""INSERT INTO public.projects_stats(project_id, sessions_count, events_count, last_update_at)
|
||||
query = cur.mogrify(
|
||||
"""INSERT INTO public.projects_stats(project_id, sessions_count, events_count, last_update_at)
|
||||
VALUES (%(project_id)s, %(sessions_count)s, %(events_count)s, (now() AT TIME ZONE 'utc'::text));""",
|
||||
params)
|
||||
params,
|
||||
)
|
||||
else:
|
||||
query = cur.mogrify("""UPDATE public.projects_stats
|
||||
query = cur.mogrify(
|
||||
"""UPDATE public.projects_stats
|
||||
SET sessions_count=sessions_count+%(sessions_count)s,
|
||||
events_count=events_count+%(events_count)s,
|
||||
last_update_at=(now() AT TIME ZONE 'utc'::text)
|
||||
WHERE project_id=%(project_id)s;""",
|
||||
params)
|
||||
params,
|
||||
)
|
||||
cur.execute(query)
|
||||
|
||||
|
||||
# this cron is used to correct the sessions&events count every week
|
||||
def weekly_cron():
|
||||
with pg_client.PostgresClient(long_query=True) as cur:
|
||||
query = cur.mogrify("""SELECT project_id,
|
||||
query = cur.mogrify(
|
||||
"""SELECT project_id,
|
||||
projects_stats.last_update_at
|
||||
FROM public.projects
|
||||
LEFT JOIN public.projects_stats USING (project_id)
|
||||
WHERE projects.deleted_at IS NULL
|
||||
ORDER BY project_id;""")
|
||||
ORDER BY project_id;"""
|
||||
)
|
||||
cur.execute(query)
|
||||
rows = cur.fetchall()
|
||||
for r in rows:
|
||||
if r["last_update_at"] is None:
|
||||
continue
|
||||
|
||||
params = {"project_id": r["project_id"],
|
||||
"end_ts": TimeUTC.now(),
|
||||
"sessions_count": 0,
|
||||
"events_count": 0}
|
||||
params = {
|
||||
"project_id": r["project_id"],
|
||||
"end_ts": TimeUTC.now(),
|
||||
"sessions_count": 0,
|
||||
"events_count": 0,
|
||||
}
|
||||
|
||||
query = cur.mogrify("""SELECT COUNT(1) AS sessions_count,
|
||||
query = cur.mogrify(
|
||||
"""SELECT COUNT(1) AS sessions_count,
|
||||
COALESCE(SUM(events_count),0) AS events_count
|
||||
FROM public.sessions
|
||||
WHERE project_id=%(project_id)s
|
||||
AND start_ts<=%(end_ts)s
|
||||
AND duration IS NOT NULL;""",
|
||||
params)
|
||||
params,
|
||||
)
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
if row is not None:
|
||||
params["sessions_count"] = row["sessions_count"]
|
||||
params["events_count"] = row["events_count"]
|
||||
|
||||
query = cur.mogrify("""UPDATE public.projects_stats
|
||||
query = cur.mogrify(
|
||||
"""UPDATE public.projects_stats
|
||||
SET sessions_count=%(sessions_count)s,
|
||||
events_count=%(events_count)s,
|
||||
last_update_at=(now() AT TIME ZONE 'utc'::text)
|
||||
WHERE project_id=%(project_id)s;""",
|
||||
params)
|
||||
params,
|
||||
)
|
||||
cur.execute(query)
|
||||
|
|
|
|||
|
|
@ -1,12 +1,12 @@
|
|||
import schemas
|
||||
from chalicelib.core import integration_base
|
||||
from chalicelib.core.integration_github_issue import GithubIntegrationIssue
|
||||
from chalicelib.core.issue_tracking import base
|
||||
from chalicelib.core.issue_tracking.github_issue import GithubIntegrationIssue
|
||||
from chalicelib.utils import pg_client, helper
|
||||
|
||||
PROVIDER = schemas.IntegrationType.GITHUB
|
||||
|
||||
|
||||
class GitHubIntegration(integration_base.BaseIntegration):
|
||||
class GitHubIntegration(base.BaseIntegration):
|
||||
|
||||
def __init__(self, tenant_id, user_id):
|
||||
self.__tenant_id = tenant_id
|
||||
|
|
@ -1,12 +1,12 @@
|
|||
from chalicelib.core.integration_base_issue import BaseIntegrationIssue
|
||||
from chalicelib.core.issue_tracking.base_issue import BaseIntegrationIssue
|
||||
from chalicelib.utils import github_client_v3
|
||||
from chalicelib.utils.github_client_v3 import github_formatters as formatter
|
||||
|
||||
|
||||
class GithubIntegrationIssue(BaseIntegrationIssue):
|
||||
def __init__(self, integration_token):
|
||||
self.__client = github_client_v3.githubV3Request(integration_token)
|
||||
super(GithubIntegrationIssue, self).__init__("GITHUB", integration_token)
|
||||
def __init__(self, token):
|
||||
self.__client = github_client_v3.githubV3Request(token)
|
||||
super(GithubIntegrationIssue, self).__init__("GITHUB", token)
|
||||
|
||||
def get_current_user(self):
|
||||
return formatter.user(self.__client.get("/user"))
|
||||
|
|
@ -28,9 +28,9 @@ class GithubIntegrationIssue(BaseIntegrationIssue):
|
|||
|
||||
return meta
|
||||
|
||||
def create_new_assignment(self, integration_project_id, title, description, assignee,
|
||||
def create_new_assignment(self, project_id, title, description, assignee,
|
||||
issue_type):
|
||||
repoId = integration_project_id
|
||||
repoId = project_id
|
||||
assignees = [assignee]
|
||||
labels = [str(issue_type)]
|
||||
|
||||
|
|
@ -59,11 +59,11 @@ class GithubIntegrationIssue(BaseIntegrationIssue):
|
|||
def get_by_ids(self, saved_issues):
|
||||
results = []
|
||||
for i in saved_issues:
|
||||
results.append(self.get(integration_project_id=i["integrationProjectId"], assignment_id=i["id"]))
|
||||
results.append(self.get(project_id=i["integrationProjectId"], assignment_id=i["id"]))
|
||||
return {"issues": results}
|
||||
|
||||
def get(self, integration_project_id, assignment_id):
|
||||
repoId = integration_project_id
|
||||
def get(self, project_id, assignment_id):
|
||||
repoId = project_id
|
||||
issueNumber = assignment_id
|
||||
issue = self.__client.get(f"/repositories/{repoId}/issues/{issueNumber}")
|
||||
issue = formatter.issue(issue)
|
||||
|
|
@ -72,17 +72,17 @@ class GithubIntegrationIssue(BaseIntegrationIssue):
|
|||
self.__client.get(f"/repositories/{repoId}/issues/{issueNumber}/comments")]
|
||||
return issue
|
||||
|
||||
def comment(self, integration_project_id, assignment_id, comment):
|
||||
repoId = integration_project_id
|
||||
def comment(self, project_id, assignment_id, comment):
|
||||
repoId = project_id
|
||||
issueNumber = assignment_id
|
||||
commentCreated = self.__client.post(f"/repositories/{repoId}/issues/{issueNumber}/comments",
|
||||
body={"body": comment})
|
||||
return formatter.comment(commentCreated)
|
||||
|
||||
def get_metas(self, integration_project_id):
|
||||
def get_metas(self, project_id):
|
||||
current_user = self.get_current_user()
|
||||
try:
|
||||
users = self.__client.get(f"/repositories/{integration_project_id}/collaborators")
|
||||
users = self.__client.get(f"/repositories/{project_id}/collaborators")
|
||||
except Exception as e:
|
||||
users = []
|
||||
users = [formatter.user(u) for u in users]
|
||||
|
|
@ -92,7 +92,7 @@ class GithubIntegrationIssue(BaseIntegrationIssue):
|
|||
return {"provider": self.provider.lower(),
|
||||
'users': users,
|
||||
'issueTypes': [formatter.label(l) for l in
|
||||
self.__client.get(f"/repositories/{integration_project_id}/labels")]
|
||||
self.__client.get(f"/repositories/{project_id}/labels")]
|
||||
}
|
||||
|
||||
def get_projects(self):
|
||||
|
|
@ -1,4 +1,5 @@
|
|||
import schemas
|
||||
from chalicelib.core.modules import TENANT_CONDITION
|
||||
from chalicelib.utils import pg_client
|
||||
|
||||
|
||||
|
|
@ -51,10 +52,13 @@ def get_global_integrations_status(tenant_id, user_id, project_id):
|
|||
AND provider='elasticsearch')) AS {schemas.IntegrationType.ELASTICSEARCH.value},
|
||||
EXISTS((SELECT 1
|
||||
FROM public.webhooks
|
||||
WHERE type='slack' AND deleted_at ISNULL)) AS {schemas.IntegrationType.SLACK.value},
|
||||
WHERE type='slack' AND deleted_at ISNULL AND {TENANT_CONDITION})) AS {schemas.IntegrationType.SLACK.value},
|
||||
EXISTS((SELECT 1
|
||||
FROM public.webhooks
|
||||
WHERE type='msteams' AND deleted_at ISNULL)) AS {schemas.IntegrationType.MS_TEAMS.value};""",
|
||||
WHERE type='msteams' AND deleted_at ISNULL AND {TENANT_CONDITION})) AS {schemas.IntegrationType.MS_TEAMS.value},
|
||||
EXISTS((SELECT 1
|
||||
FROM public.integrations
|
||||
WHERE project_id=%(project_id)s AND provider='dynatrace')) AS {schemas.IntegrationType.DYNATRACE.value};""",
|
||||
{"user_id": user_id, "tenant_id": tenant_id, "project_id": project_id})
|
||||
)
|
||||
current_integrations = cur.fetchone()
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
from chalicelib.core import integration_github, integration_jira_cloud
|
||||
from chalicelib.core.issue_tracking import github, jira_cloud
|
||||
from chalicelib.utils import pg_client
|
||||
|
||||
SUPPORTED_TOOLS = [integration_github.PROVIDER, integration_jira_cloud.PROVIDER]
|
||||
SUPPORTED_TOOLS = [github.PROVIDER, jira_cloud.PROVIDER]
|
||||
|
||||
|
||||
def get_available_integrations(user_id):
|
||||
|
|
@ -23,7 +23,7 @@ def get_available_integrations(user_id):
|
|||
|
||||
def __get_default_integration(user_id):
|
||||
current_integrations = get_available_integrations(user_id)
|
||||
return integration_github.PROVIDER if current_integrations["github"] else integration_jira_cloud.PROVIDER if \
|
||||
return github.PROVIDER if current_integrations["github"] else jira_cloud.PROVIDER if \
|
||||
current_integrations["jira"] else None
|
||||
|
||||
|
||||
|
|
@ -35,11 +35,11 @@ def get_integration(tenant_id, user_id, tool=None, for_delete=False):
|
|||
tool = tool.upper()
|
||||
if tool not in SUPPORTED_TOOLS:
|
||||
return {"errors": [f"issue tracking tool not supported yet, available: {SUPPORTED_TOOLS}"]}, None
|
||||
if tool == integration_jira_cloud.PROVIDER:
|
||||
integration = integration_jira_cloud.JIRAIntegration(tenant_id=tenant_id, user_id=user_id)
|
||||
if tool == jira_cloud.PROVIDER:
|
||||
integration = jira_cloud.JIRAIntegration(tenant_id=tenant_id, user_id=user_id)
|
||||
if not for_delete and integration.integration is not None and not integration.integration.get("valid", True):
|
||||
return {"errors": ["JIRA: connexion issue/unauthorized"]}, integration
|
||||
return None, integration
|
||||
elif tool == integration_github.PROVIDER:
|
||||
return None, integration_github.GitHubIntegration(tenant_id=tenant_id, user_id=user_id)
|
||||
elif tool == github.PROVIDER:
|
||||
return None, github.GitHubIntegration(tenant_id=tenant_id, user_id=user_id)
|
||||
return {"errors": ["lost integration"]}, None
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
import schemas
|
||||
from chalicelib.core import integration_base
|
||||
from chalicelib.core.integration_jira_cloud_issue import JIRACloudIntegrationIssue
|
||||
from chalicelib.core.issue_tracking import base
|
||||
from chalicelib.core.issue_tracking.jira_cloud_issue import JIRACloudIntegrationIssue
|
||||
from chalicelib.utils import pg_client, helper
|
||||
|
||||
PROVIDER = schemas.IntegrationType.JIRA
|
||||
|
|
@ -10,7 +10,7 @@ def obfuscate_string(string):
|
|||
return "*" * (len(string) - 4) + string[-4:]
|
||||
|
||||
|
||||
class JIRAIntegration(integration_base.BaseIntegration):
|
||||
class JIRAIntegration(base.BaseIntegration):
|
||||
def __init__(self, tenant_id, user_id):
|
||||
self.__tenant_id = tenant_id
|
||||
# TODO: enable super-constructor when OAuth is done
|
||||
|
|
@ -41,6 +41,7 @@ class JIRAIntegration(integration_base.BaseIntegration):
|
|||
except Exception as e:
|
||||
self._issue_handler = None
|
||||
self.integration["valid"] = False
|
||||
return {"errors": ["Something went wrong, please check your JIRA credentials."]}
|
||||
return self._issue_handler
|
||||
|
||||
# TODO: remove this once jira-oauth is done
|
||||
|
|
@ -49,8 +50,8 @@ class JIRAIntegration(integration_base.BaseIntegration):
|
|||
cur.execute(
|
||||
cur.mogrify(
|
||||
"""SELECT username, token, url
|
||||
FROM public.jira_cloud
|
||||
WHERE user_id=%(user_id)s;""",
|
||||
FROM public.jira_cloud
|
||||
WHERE user_id = %(user_id)s;""",
|
||||
{"user_id": self._user_id})
|
||||
)
|
||||
data = helper.dict_to_camel_case(cur.fetchone())
|
||||
|
|
@ -94,10 +95,9 @@ class JIRAIntegration(integration_base.BaseIntegration):
|
|||
def add(self, username, token, url, obfuscate=False):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
INSERT INTO public.jira_cloud(username, token, user_id,url)
|
||||
VALUES (%(username)s, %(token)s, %(user_id)s,%(url)s)
|
||||
RETURNING username, token, url;""",
|
||||
cur.mogrify(""" \
|
||||
INSERT INTO public.jira_cloud(username, token, user_id, url)
|
||||
VALUES (%(username)s, %(token)s, %(user_id)s, %(url)s) RETURNING username, token, url;""",
|
||||
{"user_id": self._user_id, "username": username,
|
||||
"token": token, "url": url})
|
||||
)
|
||||
|
|
@ -111,9 +111,10 @@ class JIRAIntegration(integration_base.BaseIntegration):
|
|||
def delete(self):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
DELETE FROM public.jira_cloud
|
||||
WHERE user_id=%(user_id)s;""",
|
||||
cur.mogrify(""" \
|
||||
DELETE
|
||||
FROM public.jira_cloud
|
||||
WHERE user_id = %(user_id)s;""",
|
||||
{"user_id": self._user_id})
|
||||
)
|
||||
return {"state": "success"}
|
||||
|
|
@ -124,7 +125,7 @@ class JIRAIntegration(integration_base.BaseIntegration):
|
|||
changes={
|
||||
"username": data.username,
|
||||
"token": data.token if len(data.token) > 0 and data.token.find("***") == -1 \
|
||||
else self.integration.token,
|
||||
else self.integration["token"],
|
||||
"url": str(data.url)
|
||||
},
|
||||
obfuscate=True
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
from chalicelib.utils import jira_client
|
||||
from chalicelib.core.integration_base_issue import BaseIntegrationIssue
|
||||
from chalicelib.core.issue_tracking.base_issue import BaseIntegrationIssue
|
||||
|
||||
|
||||
class JIRACloudIntegrationIssue(BaseIntegrationIssue):
|
||||
|
|
@ -9,8 +9,8 @@ class JIRACloudIntegrationIssue(BaseIntegrationIssue):
|
|||
self._client = jira_client.JiraManager(self.url, self.username, token, None)
|
||||
super(JIRACloudIntegrationIssue, self).__init__("JIRA", token)
|
||||
|
||||
def create_new_assignment(self, integration_project_id, title, description, assignee, issue_type):
|
||||
self._client.set_jira_project_id(integration_project_id)
|
||||
def create_new_assignment(self, project_id, title, description, assignee, issue_type):
|
||||
self._client.set_jira_project_id(project_id)
|
||||
data = {
|
||||
'summary': title,
|
||||
'description': description,
|
||||
|
|
@ -28,26 +28,26 @@ class JIRACloudIntegrationIssue(BaseIntegrationIssue):
|
|||
projects_map[i["integrationProjectId"]].append(i["id"])
|
||||
|
||||
results = []
|
||||
for integration_project_id in projects_map:
|
||||
self._client.set_jira_project_id(integration_project_id)
|
||||
for project_id in projects_map:
|
||||
self._client.set_jira_project_id(project_id)
|
||||
jql = 'labels = OpenReplay'
|
||||
if len(projects_map[integration_project_id]) > 0:
|
||||
jql += f" AND ID IN ({','.join(projects_map[integration_project_id])})"
|
||||
if len(projects_map[project_id]) > 0:
|
||||
jql += f" AND ID IN ({','.join(projects_map[project_id])})"
|
||||
issues = self._client.get_issues(jql, offset=0)
|
||||
results += issues
|
||||
return {"issues": results}
|
||||
|
||||
def get(self, integration_project_id, assignment_id):
|
||||
self._client.set_jira_project_id(integration_project_id)
|
||||
def get(self, project_id, assignment_id):
|
||||
self._client.set_jira_project_id(project_id)
|
||||
return self._client.get_issue_v3(assignment_id)
|
||||
|
||||
def comment(self, integration_project_id, assignment_id, comment):
|
||||
self._client.set_jira_project_id(integration_project_id)
|
||||
def comment(self, project_id, assignment_id, comment):
|
||||
self._client.set_jira_project_id(project_id)
|
||||
return self._client.add_comment_v3(assignment_id, comment)
|
||||
|
||||
def get_metas(self, integration_project_id):
|
||||
def get_metas(self, project_id):
|
||||
meta = {}
|
||||
self._client.set_jira_project_id(integration_project_id)
|
||||
self._client.set_jira_project_id(project_id)
|
||||
meta['issueTypes'] = self._client.get_issue_types()
|
||||
meta['users'] = self._client.get_assignable_users()
|
||||
return {"provider": self.provider.lower(), **meta}
|
||||
|
|
@ -1,32 +1,5 @@
|
|||
from chalicelib.utils import pg_client, helper
|
||||
|
||||
ISSUE_TYPES = ['click_rage', 'dead_click', 'excessive_scrolling', 'bad_request', 'missing_resource', 'memory', 'cpu',
|
||||
'slow_resource', 'slow_page_load', 'crash', 'ml_cpu', 'ml_memory', 'ml_dead_click', 'ml_click_rage',
|
||||
'ml_mouse_thrashing', 'ml_excessive_scrolling', 'ml_slow_resources', 'custom', 'js_exception',
|
||||
'custom_event_error', 'js_error']
|
||||
ORDER_QUERY = """\
|
||||
(CASE WHEN type = 'js_exception' THEN 0
|
||||
WHEN type = 'bad_request' THEN 1
|
||||
WHEN type = 'missing_resource' THEN 2
|
||||
WHEN type = 'click_rage' THEN 3
|
||||
WHEN type = 'dead_click' THEN 4
|
||||
WHEN type = 'memory' THEN 5
|
||||
WHEN type = 'cpu' THEN 6
|
||||
WHEN type = 'crash' THEN 7
|
||||
ELSE -1 END)::INTEGER
|
||||
"""
|
||||
NAME_QUERY = """\
|
||||
(CASE WHEN type = 'js_exception' THEN 'Errors'
|
||||
WHEN type = 'bad_request' THEN 'Bad Requests'
|
||||
WHEN type = 'missing_resource' THEN 'Missing Images'
|
||||
WHEN type = 'click_rage' THEN 'Click Rage'
|
||||
WHEN type = 'dead_click' THEN 'Dead Clicks'
|
||||
WHEN type = 'memory' THEN 'High Memory'
|
||||
WHEN type = 'cpu' THEN 'High CPU'
|
||||
WHEN type = 'crash' THEN 'Crashes'
|
||||
ELSE type::text END)::text
|
||||
"""
|
||||
|
||||
|
||||
def get(project_id, issue_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
|
|
@ -62,20 +35,6 @@ def get_by_session_id(session_id, project_id, issue_type=None):
|
|||
return helper.list_to_camel_case(cur.fetchall())
|
||||
|
||||
|
||||
def get_types_by_project(project_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(f"""SELECT type,
|
||||
{ORDER_QUERY}>=0 AS visible,
|
||||
{ORDER_QUERY} AS order,
|
||||
{NAME_QUERY} AS name
|
||||
FROM (SELECT DISTINCT type
|
||||
FROM public.issues
|
||||
WHERE project_id = %(project_id)s) AS types
|
||||
ORDER BY "order";""", {"project_id": project_id}))
|
||||
return helper.list_to_camel_case(cur.fetchall())
|
||||
|
||||
|
||||
def get_all_types():
|
||||
return [
|
||||
{
|
||||
|
|
|
|||
|
|
@ -1,6 +1,10 @@
|
|||
import logging
|
||||
|
||||
from chalicelib.core.sessions import sessions_mobs, sessions_devtool
|
||||
from chalicelib.utils import pg_client, helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.core import sessions_mobs, sessions_devtool
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Actions:
|
||||
|
|
@ -150,23 +154,23 @@ def get_scheduled_jobs():
|
|||
def execute_jobs():
|
||||
jobs = get_scheduled_jobs()
|
||||
for job in jobs:
|
||||
print(f"Executing jobId:{job['jobId']}")
|
||||
logger.info(f"Executing jobId:{job['jobId']}")
|
||||
try:
|
||||
if job["action"] == Actions.DELETE_USER_DATA:
|
||||
session_ids = __get_session_ids_by_user_ids(project_id=job["projectId"],
|
||||
user_ids=[job["referenceId"]])
|
||||
if len(session_ids) > 0:
|
||||
print(f"Deleting {len(session_ids)} sessions")
|
||||
logger.info(f"Deleting {len(session_ids)} sessions")
|
||||
__delete_sessions_by_session_ids(session_ids=session_ids)
|
||||
__delete_session_mobs_by_session_ids(session_ids=session_ids, project_id=job["projectId"])
|
||||
else:
|
||||
raise Exception(f"The action '{job['action']}' not supported.")
|
||||
|
||||
job["status"] = JobStatus.COMPLETED
|
||||
print(f"Job completed {job['jobId']}")
|
||||
logger.info(f"Job completed {job['jobId']}")
|
||||
except Exception as e:
|
||||
job["status"] = JobStatus.FAILED
|
||||
job["errors"] = str(e)
|
||||
print(f"Job failed {job['jobId']}")
|
||||
logger.error(f"Job failed {job['jobId']}")
|
||||
|
||||
update(job["jobId"], job)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
from chalicelib.core import log_tools
|
||||
import requests
|
||||
|
||||
from chalicelib.core.log_tools import log_tools
|
||||
from schemas import schemas
|
||||
|
||||
IN_TY = "bugsnag"
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
import boto3
|
||||
from chalicelib.core import log_tools
|
||||
from chalicelib.core.log_tools import log_tools
|
||||
from schemas import schemas
|
||||
|
||||
IN_TY = "cloudwatch"
|
||||
|
|
@ -19,16 +19,6 @@ def __find_groups(client, token):
|
|||
return response["logGroups"] + __find_groups(client, response["nextToken"])
|
||||
|
||||
|
||||
def __make_stream_filter(start_time, end_time):
|
||||
def __valid_stream(stream):
|
||||
return "firstEventTimestamp" in stream and not (
|
||||
stream['firstEventTimestamp'] <= start_time and stream["lastEventTimestamp"] <= start_time
|
||||
or stream['firstEventTimestamp'] >= end_time and stream["lastEventTimestamp"] >= end_time
|
||||
)
|
||||
|
||||
return __valid_stream
|
||||
|
||||
|
||||
def __find_streams(project_id, log_group, client, token, stream_filter):
|
||||
d_args = {"logGroupName": log_group, "orderBy": 'LastEventTime', 'limit': 50}
|
||||
if token is not None and len(token) > 0:
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
from chalicelib.core import log_tools
|
||||
from chalicelib.core.log_tools import log_tools
|
||||
from schemas import schemas
|
||||
|
||||
IN_TY = "datadog"
|
||||
|
|
@ -1,8 +1,7 @@
|
|||
import logging
|
||||
|
||||
from chalicelib.core.log_tools import log_tools
|
||||
from elasticsearch import Elasticsearch
|
||||
|
||||
from chalicelib.core import log_tools
|
||||
from schemas import schemas
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
|
@ -1,6 +1,8 @@
|
|||
from chalicelib.utils import pg_client, helper
|
||||
import json
|
||||
|
||||
from chalicelib.core.modules import TENANT_CONDITION
|
||||
from chalicelib.utils import pg_client, helper
|
||||
|
||||
EXCEPT = ["jira_server", "jira_cloud"]
|
||||
|
||||
|
||||
|
|
@ -60,20 +62,6 @@ def get(project_id, integration):
|
|||
return helper.dict_to_camel_case(helper.flatten_nested_dicts(r))
|
||||
|
||||
|
||||
def get_all_by_type(integration):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(
|
||||
"""\
|
||||
SELECT integrations.*
|
||||
FROM public.integrations INNER JOIN public.projects USING(project_id)
|
||||
WHERE provider = %(provider)s AND projects.deleted_at ISNULL;""",
|
||||
{"provider": integration})
|
||||
)
|
||||
r = cur.fetchall()
|
||||
return helper.list_to_camel_case(r, flatten=True)
|
||||
|
||||
|
||||
def edit(project_id, integration, changes):
|
||||
if "projectId" in changes:
|
||||
changes.pop("project_id")
|
||||
|
|
@ -108,11 +96,11 @@ def get_all_by_tenant(tenant_id, integration):
|
|||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(
|
||||
"""SELECT integrations.*
|
||||
f"""SELECT integrations.*
|
||||
FROM public.integrations INNER JOIN public.projects USING(project_id)
|
||||
WHERE provider = %(provider)s
|
||||
WHERE provider = %(provider)s AND {TENANT_CONDITION}
|
||||
AND projects.deleted_at ISNULL;""",
|
||||
{"provider": integration})
|
||||
{"tenant_id": tenant_id, "provider": integration})
|
||||
)
|
||||
r = cur.fetchall()
|
||||
return helper.list_to_camel_case(r, flatten=True)
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
from chalicelib.core import log_tools
|
||||
from chalicelib.core.log_tools import log_tools
|
||||
from schemas import schemas
|
||||
|
||||
IN_TY = "newrelic"
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
from chalicelib.core import log_tools
|
||||
from chalicelib.core.log_tools import log_tools
|
||||
from schemas import schemas
|
||||
|
||||
IN_TY = "rollbar"
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
import requests
|
||||
from chalicelib.core import log_tools
|
||||
from chalicelib.core.log_tools import log_tools
|
||||
from schemas import schemas
|
||||
|
||||
IN_TY = "sentry"
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
from chalicelib.core import log_tools
|
||||
from chalicelib.core.log_tools import log_tools
|
||||
from schemas import schemas
|
||||
|
||||
IN_TY = "stackdriver"
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
from chalicelib.core import log_tools
|
||||
from chalicelib.core.log_tools import log_tools
|
||||
from schemas import schemas
|
||||
|
||||
IN_TY = "sumologic"
|
||||
|
|
@ -98,17 +98,23 @@ def __edit(project_id, col_index, colname, new_name):
|
|||
if col_index not in list(old_metas.keys()):
|
||||
return {"errors": ["custom field not found"]}
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
if old_metas[col_index]["key"] != new_name:
|
||||
if old_metas[col_index]["key"] != new_name:
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(f"""UPDATE public.projects
|
||||
SET {colname} = %(value)s
|
||||
WHERE project_id = %(project_id)s
|
||||
AND deleted_at ISNULL
|
||||
RETURNING {colname};""",
|
||||
RETURNING {colname},
|
||||
(SELECT {colname} FROM projects WHERE project_id = %(project_id)s) AS old_{colname};""",
|
||||
{"project_id": project_id, "value": new_name})
|
||||
cur.execute(query=query)
|
||||
new_name = cur.fetchone()[colname]
|
||||
row = cur.fetchone()
|
||||
new_name = row[colname]
|
||||
old_name = row['old_' + colname]
|
||||
old_metas[col_index]["key"] = new_name
|
||||
projects.rename_metadata_condition(project_id=project_id,
|
||||
old_metadata_key=old_name,
|
||||
new_metadata_key=new_name)
|
||||
return {"data": old_metas[col_index]}
|
||||
|
||||
|
||||
|
|
@ -121,8 +127,8 @@ def edit(tenant_id, project_id, index: int, new_name: str):
|
|||
def delete(tenant_id, project_id, index: int):
|
||||
index = int(index)
|
||||
old_segments = get(project_id)
|
||||
old_segments = [k["index"] for k in old_segments]
|
||||
if index not in old_segments:
|
||||
old_indexes = [k["index"] for k in old_segments]
|
||||
if index not in old_indexes:
|
||||
return {"errors": ["custom field not found"]}
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
|
|
@ -132,7 +138,8 @@ def delete(tenant_id, project_id, index: int):
|
|||
WHERE project_id = %(project_id)s AND deleted_at ISNULL;""",
|
||||
{"project_id": project_id})
|
||||
cur.execute(query=query)
|
||||
|
||||
projects.delete_metadata_condition(project_id=project_id,
|
||||
metadata_key=old_segments[old_indexes.index(index)]["key"])
|
||||
return {"data": get(project_id)}
|
||||
|
||||
|
||||
|
|
@ -187,33 +194,6 @@ def search(tenant_id, project_id, key, value):
|
|||
return {"data": [k[key] for k in value]}
|
||||
|
||||
|
||||
def get_available_keys(project_id):
|
||||
all_metas = get(project_id=project_id)
|
||||
return [k["key"] for k in all_metas]
|
||||
|
||||
|
||||
def get_by_session_id(project_id, session_id):
|
||||
all_metas = get(project_id=project_id)
|
||||
if len(all_metas) == 0:
|
||||
return []
|
||||
keys = {index_to_colname(k["index"]): k["key"] for k in all_metas}
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(f"""SELECT {",".join(keys.keys())}
|
||||
FROM public.sessions
|
||||
WHERE project_id= %(project_id)s
|
||||
AND session_id=%(session_id)s;""",
|
||||
{"session_id": session_id, "project_id": project_id})
|
||||
cur.execute(query=query)
|
||||
session_metas = cur.fetchall()
|
||||
results = []
|
||||
for m in session_metas:
|
||||
r = {}
|
||||
for k in m.keys():
|
||||
r[keys[k]] = m[k]
|
||||
results.append(r)
|
||||
return results
|
||||
|
||||
|
||||
def get_keys_by_projects(project_ids):
|
||||
if project_ids is None or len(project_ids) == 0:
|
||||
return {}
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
10
api/chalicelib/core/metrics/__init__.py
Normal file
10
api/chalicelib/core/metrics/__init__.py
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
import logging
|
||||
|
||||
from decouple import config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if config("EXP_METRICS", cast=bool, default=False):
|
||||
logger.info(">>> Using experimental metrics")
|
||||
else:
|
||||
pass
|
||||
|
|
@ -1,32 +1,17 @@
|
|||
import json
|
||||
import logging
|
||||
|
||||
from decouple import config
|
||||
from fastapi import HTTPException, status
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import sessions, funnels, errors, issues, heatmaps, sessions_mobs, product_analytics, \
|
||||
custom_metrics_predefined
|
||||
from chalicelib.core import issues
|
||||
from chalicelib.core.errors import errors
|
||||
from chalicelib.core.metrics import heatmaps, product_analytics, funnels
|
||||
from chalicelib.core.sessions import sessions, sessions_search
|
||||
from chalicelib.utils import helper, pg_client
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.utils.storage import StorageClient
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
PIE_CHART_GROUP = 5
|
||||
|
||||
|
||||
# TODO: refactor this to split
|
||||
# timeseries /
|
||||
# table of errors / table of issues / table of browsers / table of devices / table of countries / table of URLs
|
||||
# remove "table of" calls from this function
|
||||
def __try_live(project_id, data: schemas.CardSchema):
|
||||
results = []
|
||||
for i, s in enumerate(data.series):
|
||||
results.append(sessions.search2_series(data=s.filter, project_id=project_id, density=data.density,
|
||||
view_type=data.view_type, metric_type=data.metric_type,
|
||||
metric_of=data.metric_of, metric_value=data.metric_value))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def __get_table_of_series(project_id, data: schemas.CardSchema):
|
||||
|
|
@ -39,60 +24,66 @@ def __get_table_of_series(project_id, data: schemas.CardSchema):
|
|||
return results
|
||||
|
||||
|
||||
def __get_funnel_chart(project_id: int, data: schemas.CardFunnel, user_id: int = None):
|
||||
def __get_funnel_chart(project: schemas.ProjectContext, data: schemas.CardFunnel, user_id: int = None):
|
||||
if len(data.series) == 0:
|
||||
return {
|
||||
"stages": [],
|
||||
"totalDropDueToIssues": 0
|
||||
}
|
||||
|
||||
return funnels.get_top_insights_on_the_fly_widget(project_id=project_id,
|
||||
data=data.series[0].filter,
|
||||
metric_format=data.metric_format)
|
||||
return funnels.get_simple_funnel(project=project,
|
||||
data=data.series[0].filter,
|
||||
metric_format=data.metric_format)
|
||||
|
||||
|
||||
def __get_errors_list(project_id, user_id, data: schemas.CardSchema):
|
||||
def __get_errors_list(project: schemas.ProjectContext, user_id, data: schemas.CardSchema):
|
||||
if len(data.series) == 0:
|
||||
return {
|
||||
"total": 0,
|
||||
"errors": []
|
||||
}
|
||||
return errors.search(data.series[0].filter, project_id=project_id, user_id=user_id)
|
||||
return errors.search(data.series[0].filter, project=project, user_id=user_id)
|
||||
|
||||
|
||||
def __get_sessions_list(project_id, user_id, data: schemas.CardSchema):
|
||||
def __get_sessions_list(project: schemas.ProjectContext, user_id, data: schemas.CardSchema):
|
||||
if len(data.series) == 0:
|
||||
logger.debug("empty series")
|
||||
return {
|
||||
"total": 0,
|
||||
"sessions": []
|
||||
}
|
||||
return sessions.search_sessions(data=data.series[0].filter, project_id=project_id, user_id=user_id)
|
||||
return sessions_search.search_sessions(data=data.series[0].filter, project=project, user_id=user_id)
|
||||
|
||||
|
||||
def __get_heat_map_chart(project_id, user_id, data: schemas.CardHeatMap, include_mobs: bool = True):
|
||||
def get_heat_map_chart(project: schemas.ProjectContext, user_id, data: schemas.CardHeatMap,
|
||||
include_mobs: bool = True):
|
||||
if len(data.series) == 0:
|
||||
return None
|
||||
data.series[0].filter.filters += data.series[0].filter.events
|
||||
data.series[0].filter.events = []
|
||||
return heatmaps.search_short_session(project_id=project_id, user_id=user_id,
|
||||
return heatmaps.search_short_session(project_id=project.project_id, user_id=user_id,
|
||||
data=schemas.HeatMapSessionsSearch(
|
||||
**data.series[0].filter.model_dump()),
|
||||
include_mobs=include_mobs)
|
||||
|
||||
|
||||
def __get_path_analysis_chart(project_id: int, user_id: int, data: schemas.CardPathAnalysis):
|
||||
def __get_path_analysis_chart(project: schemas.ProjectContext, user_id: int, data: schemas.CardPathAnalysis):
|
||||
if len(data.series) == 0:
|
||||
data.series.append(
|
||||
schemas.CardPathAnalysisSeriesSchema(startTimestamp=data.startTimestamp, endTimestamp=data.endTimestamp))
|
||||
elif not isinstance(data.series[0].filter, schemas.PathAnalysisSchema):
|
||||
data.series[0].filter = schemas.PathAnalysisSchema()
|
||||
|
||||
return product_analytics.path_analysis(project_id=project_id, data=data)
|
||||
return product_analytics.path_analysis(project_id=project.project_id, data=data)
|
||||
|
||||
|
||||
def __get_timeseries_chart(project_id: int, data: schemas.CardTimeSeries, user_id: int = None):
|
||||
series_charts = __try_live(project_id=project_id, data=data)
|
||||
def __get_timeseries_chart(project: schemas.ProjectContext, data: schemas.CardTimeSeries, user_id: int = None):
|
||||
series_charts = []
|
||||
for i, s in enumerate(data.series):
|
||||
series_charts.append(sessions.search2_series(data=s.filter, project_id=project.project_id, density=data.density,
|
||||
metric_type=data.metric_type, metric_of=data.metric_of,
|
||||
metric_value=data.metric_value))
|
||||
|
||||
results = [{}] * len(series_charts[0])
|
||||
for i in range(len(results)):
|
||||
for j, series_chart in enumerate(series_charts):
|
||||
|
|
@ -105,47 +96,47 @@ def not_supported(**args):
|
|||
raise Exception("not supported")
|
||||
|
||||
|
||||
def __get_table_of_user_ids(project_id: int, data: schemas.CardTable, user_id: int = None):
|
||||
return __get_table_of_series(project_id=project_id, data=data)
|
||||
def __get_table_of_user_ids(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
|
||||
return __get_table_of_series(project_id=project.project_id, data=data)
|
||||
|
||||
|
||||
def __get_table_of_sessions(project_id: int, data: schemas.CardTable, user_id):
|
||||
return __get_sessions_list(project_id=project_id, user_id=user_id, data=data)
|
||||
def __get_table_of_sessions(project: schemas.ProjectContext, data: schemas.CardTable, user_id):
|
||||
return __get_sessions_list(project=project, user_id=user_id, data=data)
|
||||
|
||||
|
||||
def __get_table_of_errors(project_id: int, data: schemas.CardTable, user_id: int):
|
||||
return __get_errors_list(project_id=project_id, user_id=user_id, data=data)
|
||||
def __get_table_of_errors(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int):
|
||||
return __get_errors_list(project=project, user_id=user_id, data=data)
|
||||
|
||||
|
||||
def __get_table_of_issues(project_id: int, data: schemas.CardTable, user_id: int = None):
|
||||
return __get_table_of_series(project_id=project_id, data=data)
|
||||
def __get_table_of_issues(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
|
||||
return __get_table_of_series(project_id=project.project_id, data=data)
|
||||
|
||||
|
||||
def __get_table_of_browsers(project_id: int, data: schemas.CardTable, user_id: int = None):
|
||||
return __get_table_of_series(project_id=project_id, data=data)
|
||||
def __get_table_of_browsers(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
|
||||
return __get_table_of_series(project_id=project.project_id, data=data)
|
||||
|
||||
|
||||
def __get_table_of_devises(project_id: int, data: schemas.CardTable, user_id: int = None):
|
||||
return __get_table_of_series(project_id=project_id, data=data)
|
||||
def __get_table_of_devises(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
|
||||
return __get_table_of_series(project_id=project.project_id, data=data)
|
||||
|
||||
|
||||
def __get_table_of_countries(project_id: int, data: schemas.CardTable, user_id: int = None):
|
||||
return __get_table_of_series(project_id=project_id, data=data)
|
||||
def __get_table_of_countries(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
|
||||
return __get_table_of_series(project_id=project.project_id, data=data)
|
||||
|
||||
|
||||
def __get_table_of_urls(project_id: int, data: schemas.CardTable, user_id: int = None):
|
||||
return __get_table_of_series(project_id=project_id, data=data)
|
||||
def __get_table_of_urls(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
|
||||
return __get_table_of_series(project_id=project.project_id, data=data)
|
||||
|
||||
|
||||
def __get_table_of_referrers(project_id: int, data: schemas.CardTable, user_id: int = None):
|
||||
return __get_table_of_series(project_id=project_id, data=data)
|
||||
def __get_table_of_referrers(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
|
||||
return __get_table_of_series(project_id=project.project_id, data=data)
|
||||
|
||||
|
||||
def __get_table_of_requests(project_id: int, data: schemas.CardTable, user_id: int = None):
|
||||
return __get_table_of_series(project_id=project_id, data=data)
|
||||
def __get_table_of_requests(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
|
||||
return __get_table_of_series(project_id=project.project_id, data=data)
|
||||
|
||||
|
||||
def __get_table_chart(project_id: int, data: schemas.CardTable, user_id: int):
|
||||
def __get_table_chart(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int):
|
||||
supported = {
|
||||
schemas.MetricOfTable.SESSIONS: __get_table_of_sessions,
|
||||
schemas.MetricOfTable.ERRORS: __get_table_of_errors,
|
||||
|
|
@ -158,93 +149,32 @@ def __get_table_chart(project_id: int, data: schemas.CardTable, user_id: int):
|
|||
schemas.MetricOfTable.REFERRER: __get_table_of_referrers,
|
||||
schemas.MetricOfTable.FETCH: __get_table_of_requests
|
||||
}
|
||||
return supported.get(data.metric_of, not_supported)(project_id=project_id, data=data, user_id=user_id)
|
||||
return supported.get(data.metric_of, not_supported)(project=project, data=data, user_id=user_id)
|
||||
|
||||
|
||||
def get_chart(project_id: int, data: schemas.CardSchema, user_id: int):
|
||||
if data.is_predefined:
|
||||
return custom_metrics_predefined.get_metric(key=data.metric_of,
|
||||
project_id=project_id,
|
||||
data=data.model_dump())
|
||||
|
||||
def get_chart(project: schemas.ProjectContext, data: schemas.CardSchema, user_id: int):
|
||||
supported = {
|
||||
schemas.MetricType.TIMESERIES: __get_timeseries_chart,
|
||||
schemas.MetricType.TABLE: __get_table_chart,
|
||||
schemas.MetricType.HEAT_MAP: __get_heat_map_chart,
|
||||
schemas.MetricType.HEAT_MAP: get_heat_map_chart,
|
||||
schemas.MetricType.FUNNEL: __get_funnel_chart,
|
||||
schemas.MetricType.INSIGHTS: not_supported,
|
||||
schemas.MetricType.PATH_ANALYSIS: __get_path_analysis_chart
|
||||
}
|
||||
return supported.get(data.metric_type, not_supported)(project_id=project_id, data=data, user_id=user_id)
|
||||
return supported.get(data.metric_type, not_supported)(project=project, data=data, user_id=user_id)
|
||||
|
||||
|
||||
# def __merge_metric_with_data(metric: schemas.CardSchema,
|
||||
# data: schemas.CardSessionsSchema) -> schemas.CardSchema:
|
||||
# metric.startTimestamp = data.startTimestamp
|
||||
# metric.endTimestamp = data.endTimestamp
|
||||
# metric.page = data.page
|
||||
# metric.limit = data.limit
|
||||
# metric.density = data.density
|
||||
# if data.series is not None and len(data.series) > 0:
|
||||
# metric.series = data.series
|
||||
#
|
||||
# # if len(data.filters) > 0:
|
||||
# # for s in metric.series:
|
||||
# # s.filter.filters += data.filters
|
||||
# # metric = schemas.CardSchema(**metric.model_dump(by_alias=True))
|
||||
# return metric
|
||||
|
||||
|
||||
def get_sessions_by_card_id(project_id, user_id, metric_id, data: schemas.CardSessionsSchema):
|
||||
# No need for this because UI is sending the full payload
|
||||
# card: dict = get_card(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
|
||||
# if card is None:
|
||||
# return None
|
||||
# metric: schemas.CardSchema = schemas.CardSchema(**card)
|
||||
# metric: schemas.CardSchema = __merge_metric_with_data(metric=metric, data=data)
|
||||
if not card_exists(metric_id=metric_id, project_id=project_id, user_id=user_id):
|
||||
def get_sessions_by_card_id(project: schemas.ProjectContext, user_id, metric_id, data: schemas.CardSessionsSchema):
|
||||
if not card_exists(metric_id=metric_id, project_id=project.project_id, user_id=user_id):
|
||||
return None
|
||||
results = []
|
||||
for s in data.series:
|
||||
results.append({"seriesId": s.series_id, "seriesName": s.name,
|
||||
**sessions.search_sessions(data=s.filter, project_id=project_id, user_id=user_id)})
|
||||
**sessions_search.search_sessions(data=s.filter, project=project, user_id=user_id)})
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def get_funnel_issues(project_id, user_id, metric_id, data: schemas.CardSessionsSchema):
|
||||
# No need for this because UI is sending the full payload
|
||||
# raw_metric: dict = get_card(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
|
||||
# if raw_metric is None:
|
||||
# return None
|
||||
# metric: schemas.CardSchema = schemas.CardSchema(**raw_metric)
|
||||
# metric: schemas.CardSchema = __merge_metric_with_data(metric=metric, data=data)
|
||||
# if metric is None:
|
||||
# return None
|
||||
if not card_exists(metric_id=metric_id, project_id=project_id, user_id=user_id):
|
||||
return None
|
||||
for s in data.series:
|
||||
return {"seriesId": s.series_id, "seriesName": s.name,
|
||||
**funnels.get_issues_on_the_fly_widget(project_id=project_id, data=s.filter)}
|
||||
|
||||
|
||||
def get_errors_list(project_id, user_id, metric_id, data: schemas.CardSessionsSchema):
|
||||
# No need for this because UI is sending the full payload
|
||||
# raw_metric: dict = get_card(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
|
||||
# if raw_metric is None:
|
||||
# return None
|
||||
# metric: schemas.CardSchema = schemas.CardSchema(**raw_metric)
|
||||
# metric: schemas.CardSchema = __merge_metric_with_data(metric=metric, data=data)
|
||||
# if metric is None:
|
||||
# return None
|
||||
if not card_exists(metric_id=metric_id, project_id=project_id, user_id=user_id):
|
||||
return None
|
||||
for s in data.series:
|
||||
return {"seriesId": s.series_id, "seriesName": s.name,
|
||||
**errors.search(data=s.filter, project_id=project_id, user_id=user_id)}
|
||||
|
||||
|
||||
def get_sessions(project_id, user_id, data: schemas.CardSessionsSchema):
|
||||
def get_sessions(project: schemas.ProjectContext, user_id, data: schemas.CardSessionsSchema):
|
||||
results = []
|
||||
if len(data.series) == 0:
|
||||
return results
|
||||
|
|
@ -254,85 +184,45 @@ def get_sessions(project_id, user_id, data: schemas.CardSessionsSchema):
|
|||
s.filter = schemas.SessionsSearchPayloadSchema(**s.filter.model_dump(by_alias=True))
|
||||
|
||||
results.append({"seriesId": None, "seriesName": s.name,
|
||||
**sessions.search_sessions(data=s.filter, project_id=project_id, user_id=user_id)})
|
||||
**sessions_search.search_sessions(data=s.filter, project=project, user_id=user_id)})
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def __get_funnel_issues(project_id: int, user_id: int, data: schemas.CardFunnel):
|
||||
if len(data.series) == 0:
|
||||
return []
|
||||
data.series[0].filter.startTimestamp = data.startTimestamp
|
||||
data.series[0].filter.endTimestamp = data.endTimestamp
|
||||
data = funnels.get_issues_on_the_fly_widget(project_id=project_id, data=data.series[0].filter)
|
||||
return data
|
||||
|
||||
|
||||
def __get_path_analysis_issues(project_id: int, user_id: int, data: schemas.CardPathAnalysis):
|
||||
if len(data.filters) > 0 or len(data.series) > 0:
|
||||
filters = [f.model_dump(by_alias=True) for f in data.filters] \
|
||||
+ [f.model_dump(by_alias=True) for f in data.series[0].filter.filters]
|
||||
else:
|
||||
return []
|
||||
|
||||
search_data = schemas.SessionsSearchPayloadSchema(
|
||||
startTimestamp=data.startTimestamp,
|
||||
endTimestamp=data.endTimestamp,
|
||||
limit=data.limit,
|
||||
page=data.page,
|
||||
filters=filters
|
||||
)
|
||||
# ---- To make issues response close to the chart response
|
||||
search_data.filters.append(schemas.SessionSearchFilterSchema(type=schemas.FilterType.EVENTS_COUNT,
|
||||
operator=schemas.MathOperator.GREATER,
|
||||
value=[1]))
|
||||
if len(data.start_point) == 0:
|
||||
search_data.events.append(schemas.SessionSearchEventSchema2(type=schemas.EventType.LOCATION,
|
||||
operator=schemas.SearchEventOperator.IS_ANY,
|
||||
value=[]))
|
||||
# ---- End
|
||||
|
||||
for s in data.excludes:
|
||||
search_data.events.append(schemas.SessionSearchEventSchema2(type=s.type,
|
||||
operator=schemas.SearchEventOperator.NOT_ON,
|
||||
value=s.value))
|
||||
result = sessions.search_table_of_individual_issues(project_id=project_id, data=search_data)
|
||||
return result
|
||||
|
||||
|
||||
def get_issues(project_id: int, user_id: int, data: schemas.CardSchema):
|
||||
if data.is_predefined:
|
||||
return not_supported()
|
||||
def get_issues(project: schemas.ProjectContext, user_id: int, data: schemas.CardSchema):
|
||||
if data.metric_of == schemas.MetricOfTable.ISSUES:
|
||||
return __get_table_of_issues(project_id=project_id, user_id=user_id, data=data)
|
||||
return __get_table_of_issues(project=project, user_id=user_id, data=data)
|
||||
supported = {
|
||||
schemas.MetricType.TIMESERIES: not_supported,
|
||||
schemas.MetricType.TABLE: not_supported,
|
||||
schemas.MetricType.HEAT_MAP: not_supported,
|
||||
schemas.MetricType.FUNNEL: __get_funnel_issues,
|
||||
schemas.MetricType.INSIGHTS: not_supported,
|
||||
schemas.MetricType.PATH_ANALYSIS: __get_path_analysis_issues,
|
||||
schemas.MetricType.PATH_ANALYSIS: not_supported,
|
||||
}
|
||||
return supported.get(data.metric_type, not_supported)(project_id=project_id, data=data, user_id=user_id)
|
||||
return supported.get(data.metric_type, not_supported)()
|
||||
|
||||
|
||||
def __get_path_analysis_card_info(data: schemas.CardPathAnalysis):
|
||||
r = {"start_point": [s.model_dump() for s in data.start_point],
|
||||
"start_type": data.start_type,
|
||||
"excludes": [e.model_dump() for e in data.excludes],
|
||||
"hideExcess": data.hide_excess}
|
||||
def get_global_card_info(data: schemas.CardSchema):
|
||||
r = {"hideExcess": data.hide_excess, "compareTo": data.compare_to, "rows": data.rows}
|
||||
return r
|
||||
|
||||
|
||||
def create_card(project_id, user_id, data: schemas.CardSchema, dashboard=False):
|
||||
def get_path_analysis_card_info(data: schemas.CardPathAnalysis):
|
||||
r = {"start_point": [s.model_dump() for s in data.start_point],
|
||||
"start_type": data.start_type,
|
||||
"excludes": [e.model_dump() for e in data.excludes],
|
||||
"rows": data.rows}
|
||||
return r
|
||||
|
||||
|
||||
def create_card(project: schemas.ProjectContext, user_id, data: schemas.CardSchema, dashboard=False):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
session_data = None
|
||||
if data.metric_type == schemas.MetricType.HEAT_MAP:
|
||||
if data.session_id is not None:
|
||||
session_data = {"sessionId": data.session_id}
|
||||
else:
|
||||
session_data = __get_heat_map_chart(project_id=project_id, user_id=user_id,
|
||||
data=data, include_mobs=False)
|
||||
session_data = get_heat_map_chart(project=project, user_id=user_id,
|
||||
data=data, include_mobs=False)
|
||||
if session_data is not None:
|
||||
session_data = {"sessionId": session_data["sessionId"]}
|
||||
|
||||
|
|
@ -343,11 +233,12 @@ def create_card(project_id, user_id, data: schemas.CardSchema, dashboard=False):
|
|||
_data[f"index_{i}"] = i
|
||||
_data[f"filter_{i}"] = s.filter.json()
|
||||
series_len = len(data.series)
|
||||
params = {"user_id": user_id, "project_id": project_id, **data.model_dump(), **_data}
|
||||
params["default_config"] = json.dumps(data.default_config.model_dump())
|
||||
params["card_info"] = None
|
||||
params = {"user_id": user_id, "project_id": project.project_id, **data.model_dump(), **_data,
|
||||
"default_config": json.dumps(data.default_config.model_dump()), "card_info": None}
|
||||
params["card_info"] = get_global_card_info(data=data)
|
||||
if data.metric_type == schemas.MetricType.PATH_ANALYSIS:
|
||||
params["card_info"] = json.dumps(__get_path_analysis_card_info(data=data))
|
||||
params["card_info"] = {**params["card_info"], **get_path_analysis_card_info(data=data)}
|
||||
params["card_info"] = json.dumps(params["card_info"])
|
||||
|
||||
query = """INSERT INTO metrics (project_id, user_id, name, is_public,
|
||||
view_type, metric_type, metric_of, metric_value,
|
||||
|
|
@ -370,7 +261,7 @@ def create_card(project_id, user_id, data: schemas.CardSchema, dashboard=False):
|
|||
r = cur.fetchone()
|
||||
if dashboard:
|
||||
return r["metric_id"]
|
||||
return {"data": get_card(metric_id=r["metric_id"], project_id=project_id, user_id=user_id)}
|
||||
return {"data": get_card(metric_id=r["metric_id"], project_id=project.project_id, user_id=user_id)}
|
||||
|
||||
|
||||
def update_card(metric_id, user_id, project_id, data: schemas.CardSchema):
|
||||
|
|
@ -407,16 +298,18 @@ def update_card(metric_id, user_id, project_id, data: schemas.CardSchema):
|
|||
if i not in u_series_ids:
|
||||
d_series_ids.append(i)
|
||||
params["d_series_ids"] = tuple(d_series_ids)
|
||||
params["card_info"] = None
|
||||
params["session_data"] = json.dumps(metric["data"])
|
||||
params["card_info"] = get_global_card_info(data=data)
|
||||
if data.metric_type == schemas.MetricType.PATH_ANALYSIS:
|
||||
params["card_info"] = json.dumps(__get_path_analysis_card_info(data=data))
|
||||
params["card_info"] = {**params["card_info"], **get_path_analysis_card_info(data=data)}
|
||||
elif data.metric_type == schemas.MetricType.HEAT_MAP:
|
||||
if data.session_id is not None:
|
||||
params["session_data"] = json.dumps({"sessionId": data.session_id})
|
||||
elif metric.get("data") and metric["data"].get("sessionId"):
|
||||
params["session_data"] = json.dumps({"sessionId": metric["data"]["sessionId"]})
|
||||
|
||||
params["card_info"] = json.dumps(params["card_info"])
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
sub_queries = []
|
||||
if len(n_series) > 0:
|
||||
|
|
@ -459,6 +352,100 @@ def update_card(metric_id, user_id, project_id, data: schemas.CardSchema):
|
|||
return get_card(metric_id=metric_id, project_id=project_id, user_id=user_id)
|
||||
|
||||
|
||||
def search_metrics(project_id, user_id, data: schemas.MetricSearchSchema, include_series=False):
|
||||
constraints = ["metrics.project_id = %(project_id)s", "metrics.deleted_at ISNULL"]
|
||||
params = {
|
||||
"project_id": project_id,
|
||||
"user_id": user_id,
|
||||
"offset": (data.page - 1) * data.limit,
|
||||
"limit": data.limit,
|
||||
}
|
||||
if data.mine_only:
|
||||
constraints.append("user_id = %(user_id)s")
|
||||
else:
|
||||
constraints.append("(user_id = %(user_id)s OR metrics.is_public)")
|
||||
if data.shared_only:
|
||||
constraints.append("is_public")
|
||||
|
||||
if data.filter is not None:
|
||||
if data.filter.type:
|
||||
constraints.append("metrics.metric_type = %(filter_type)s")
|
||||
params["filter_type"] = data.filter.type
|
||||
if data.filter.query and len(data.filter.query) > 0:
|
||||
constraints.append("(metrics.name ILIKE %(filter_query)s OR owner.owner_name ILIKE %(filter_query)s)")
|
||||
params["filter_query"] = helper.values_for_operator(
|
||||
value=data.filter.query, op=schemas.SearchEventOperator.CONTAINS
|
||||
)
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
sub_join = ""
|
||||
if include_series:
|
||||
sub_join = """LEFT JOIN LATERAL (
|
||||
SELECT COALESCE(jsonb_agg(metric_series.* ORDER BY index),'[]'::jsonb) AS series
|
||||
FROM metric_series
|
||||
WHERE metric_series.metric_id = metrics.metric_id
|
||||
AND metric_series.deleted_at ISNULL
|
||||
) AS metric_series ON (TRUE)"""
|
||||
|
||||
sort_column = data.sort.field if data.sort.field is not None and len(data.sort.field) > 0 \
|
||||
else "created_at"
|
||||
# change ascend to asc and descend to desc
|
||||
sort_order = data.sort.order.value if hasattr(data.sort.order, "value") else data.sort.order
|
||||
if sort_order == "ascend":
|
||||
sort_order = "asc"
|
||||
elif sort_order == "descend":
|
||||
sort_order = "desc"
|
||||
|
||||
query = cur.mogrify(
|
||||
f"""SELECT count(1) OVER () AS total,metric_id, project_id, user_id, name, is_public, created_at, edited_at,
|
||||
metric_type, metric_of, metric_format, metric_value, view_type, is_pinned,
|
||||
dashboards, owner_email, owner_name, default_config AS config, thumbnail
|
||||
FROM metrics
|
||||
{sub_join}
|
||||
LEFT JOIN LATERAL (
|
||||
SELECT COALESCE(jsonb_agg(connected_dashboards.* ORDER BY is_public, name),'[]'::jsonb) AS dashboards
|
||||
FROM (
|
||||
SELECT DISTINCT dashboard_id, name, is_public
|
||||
FROM dashboards
|
||||
INNER JOIN dashboard_widgets USING (dashboard_id)
|
||||
WHERE deleted_at ISNULL
|
||||
AND dashboard_widgets.metric_id = metrics.metric_id
|
||||
AND project_id = %(project_id)s
|
||||
AND ((dashboards.user_id = %(user_id)s OR is_public))
|
||||
) AS connected_dashboards
|
||||
) AS connected_dashboards ON (TRUE)
|
||||
LEFT JOIN LATERAL (
|
||||
SELECT email AS owner_email, name AS owner_name
|
||||
FROM users
|
||||
WHERE deleted_at ISNULL
|
||||
AND users.user_id = metrics.user_id
|
||||
) AS owner ON (TRUE)
|
||||
WHERE {" AND ".join(constraints)}
|
||||
ORDER BY {sort_column} {sort_order}
|
||||
LIMIT %(limit)s OFFSET %(offset)s;""",
|
||||
params
|
||||
)
|
||||
cur.execute(query)
|
||||
rows = cur.fetchall()
|
||||
if len(rows) > 0:
|
||||
total = rows[0]["total"]
|
||||
if include_series:
|
||||
for r in rows:
|
||||
r.pop("total")
|
||||
for s in r.get("series", []):
|
||||
s["filter"] = helper.old_search_payload_to_flat(s["filter"])
|
||||
else:
|
||||
for r in rows:
|
||||
r.pop("total")
|
||||
r["created_at"] = TimeUTC.datetime_to_timestamp(r["created_at"])
|
||||
r["edited_at"] = TimeUTC.datetime_to_timestamp(r["edited_at"])
|
||||
rows = helper.list_to_camel_case(rows)
|
||||
else:
|
||||
total = 0
|
||||
|
||||
return {"total": total, "list": rows}
|
||||
|
||||
|
||||
def search_all(project_id, user_id, data: schemas.SearchCardsSchema, include_series=False):
|
||||
constraints = ["metrics.project_id = %(project_id)s",
|
||||
"metrics.deleted_at ISNULL"]
|
||||
|
|
@ -551,8 +538,16 @@ def delete_card(project_id, metric_id, user_id):
|
|||
return {"state": "success"}
|
||||
|
||||
|
||||
def __get_global_attributes(row):
|
||||
if row is None or row.get("cardInfo") is None:
|
||||
return row
|
||||
card_info = row.get("cardInfo", {})
|
||||
row["compareTo"] = card_info["compareTo"] if card_info.get("compareTo") is not None else []
|
||||
return row
|
||||
|
||||
|
||||
def __get_path_analysis_attributes(row):
|
||||
card_info = row.pop("cardInfo")
|
||||
card_info = row.get("cardInfo", {})
|
||||
row["excludes"] = card_info.get("excludes", [])
|
||||
row["startPoint"] = card_info.get("startPoint", [])
|
||||
row["startType"] = card_info.get("startType", "start")
|
||||
|
|
@ -605,6 +600,8 @@ def get_card(metric_id, project_id, user_id, flatten: bool = True, include_data:
|
|||
row = helper.dict_to_camel_case(row)
|
||||
if row["metricType"] == schemas.MetricType.PATH_ANALYSIS:
|
||||
row = __get_path_analysis_attributes(row=row)
|
||||
row = __get_global_attributes(row=row)
|
||||
row.pop("cardInfo")
|
||||
return row
|
||||
|
||||
|
||||
|
|
@ -646,17 +643,7 @@ def change_state(project_id, metric_id, user_id, status):
|
|||
|
||||
|
||||
def get_funnel_sessions_by_issue(user_id, project_id, metric_id, issue_id,
|
||||
data: schemas.CardSessionsSchema
|
||||
# , range_value=None, start_date=None, end_date=None
|
||||
):
|
||||
# No need for this because UI is sending the full payload
|
||||
# card: dict = get_card(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
|
||||
# if card is None:
|
||||
# return None
|
||||
# metric: schemas.CardSchema = schemas.CardSchema(**card)
|
||||
# metric: schemas.CardSchema = __merge_metric_with_data(metric=metric, data=data)
|
||||
# if metric is None:
|
||||
# return None
|
||||
data: schemas.CardSessionsSchema):
|
||||
if not card_exists(metric_id=metric_id, project_id=project_id, user_id=user_id):
|
||||
return None
|
||||
for s in data.series:
|
||||
|
|
@ -687,8 +674,8 @@ def get_funnel_sessions_by_issue(user_id, project_id, metric_id, issue_id,
|
|||
"issue": issue}
|
||||
|
||||
|
||||
def make_chart_from_card(project_id, user_id, metric_id, data: schemas.CardSessionsSchema):
|
||||
raw_metric: dict = get_card(metric_id=metric_id, project_id=project_id, user_id=user_id, include_data=True)
|
||||
def make_chart_from_card(project: schemas.ProjectContext, user_id, metric_id, data: schemas.CardSessionsSchema):
|
||||
raw_metric: dict = get_card(metric_id=metric_id, project_id=project.project_id, user_id=user_id, include_data=True)
|
||||
|
||||
if raw_metric is None:
|
||||
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="card not found")
|
||||
|
|
@ -698,20 +685,16 @@ def make_chart_from_card(project_id, user_id, metric_id, data: schemas.CardSessi
|
|||
raw_metric["density"] = data.density
|
||||
metric: schemas.CardSchema = schemas.CardSchema(**raw_metric)
|
||||
|
||||
if metric.is_predefined:
|
||||
return custom_metrics_predefined.get_metric(key=metric.metric_of,
|
||||
project_id=project_id,
|
||||
data=data.model_dump())
|
||||
elif metric.metric_type == schemas.MetricType.HEAT_MAP:
|
||||
if metric.metric_type == schemas.MetricType.HEAT_MAP:
|
||||
if raw_metric["data"] and raw_metric["data"].get("sessionId"):
|
||||
return heatmaps.get_selected_session(project_id=project_id,
|
||||
return heatmaps.get_selected_session(project_id=project.project_id,
|
||||
session_id=raw_metric["data"]["sessionId"])
|
||||
else:
|
||||
return heatmaps.search_short_session(project_id=project_id,
|
||||
return heatmaps.search_short_session(project_id=project.project_id,
|
||||
data=schemas.HeatMapSessionsSearch(**metric.model_dump()),
|
||||
user_id=user_id)
|
||||
|
||||
return get_chart(project_id=project_id, data=metric, user_id=user_id)
|
||||
return get_chart(project=project, data=metric, user_id=user_id)
|
||||
|
||||
|
||||
def card_exists(metric_id, project_id, user_id) -> bool:
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
import json
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import custom_metrics
|
||||
from chalicelib.core.metrics import custom_metrics
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
|
@ -149,30 +149,6 @@ def update_dashboard(project_id, user_id, dashboard_id, data: schemas.EditDashbo
|
|||
return helper.dict_to_camel_case(row)
|
||||
|
||||
|
||||
def get_widget(project_id, user_id, dashboard_id, widget_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = """SELECT metrics.*, metric_series.series
|
||||
FROM dashboard_widgets
|
||||
INNER JOIN dashboards USING (dashboard_id)
|
||||
INNER JOIN metrics USING (metric_id)
|
||||
LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(metric_series.* ORDER BY index), '[]'::jsonb) AS series
|
||||
FROM metric_series
|
||||
WHERE metric_series.metric_id = metrics.metric_id
|
||||
AND metric_series.deleted_at ISNULL
|
||||
) AS metric_series ON (TRUE)
|
||||
WHERE dashboard_id = %(dashboard_id)s
|
||||
AND widget_id = %(widget_id)s
|
||||
AND (dashboards.is_public OR dashboards.user_id = %(userId)s)
|
||||
AND dashboards.deleted_at IS NULL
|
||||
AND metrics.deleted_at ISNULL
|
||||
AND (metrics.project_id = %(projectId)s OR metrics.project_id ISNULL)
|
||||
AND (metrics.is_public OR metrics.user_id = %(userId)s);"""
|
||||
params = {"userId": user_id, "projectId": project_id, "dashboard_id": dashboard_id, "widget_id": widget_id}
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
row = cur.fetchone()
|
||||
return helper.dict_to_camel_case(row)
|
||||
|
||||
|
||||
def add_widget(project_id, user_id, dashboard_id, data: schemas.AddWidgetToDashboardPayloadSchema):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = """INSERT INTO dashboard_widgets(dashboard_id, metric_id, user_id, config)
|
||||
|
|
@ -228,9 +204,9 @@ def pin_dashboard(project_id, user_id, dashboard_id):
|
|||
return helper.dict_to_camel_case(row)
|
||||
|
||||
|
||||
def create_metric_add_widget(project_id, user_id, dashboard_id, data: schemas.CardSchema):
|
||||
metric_id = custom_metrics.create_card(project_id=project_id, user_id=user_id, data=data, dashboard=True)
|
||||
return add_widget(project_id=project_id, user_id=user_id, dashboard_id=dashboard_id,
|
||||
def create_metric_add_widget(project: schemas.ProjectContext, user_id, dashboard_id, data: schemas.CardSchema):
|
||||
metric_id = custom_metrics.create_card(project=project, user_id=user_id, data=data, dashboard=True)
|
||||
return add_widget(project_id=project.project_id, user_id=user_id, dashboard_id=dashboard_id,
|
||||
data=schemas.AddWidgetToDashboardPayloadSchema(metricId=metric_id))
|
||||
|
||||
# def make_chart_widget(dashboard_id, project_id, user_id, widget_id, data: schemas.CardChartSchema):
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
from typing import List
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import significance
|
||||
from chalicelib.core.metrics.modules import significance
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils import sql_helper as sh
|
||||
|
||||
|
|
@ -35,30 +35,6 @@ def __fix_stages(f_events: List[schemas.SessionSearchEventSchema2]):
|
|||
return events
|
||||
|
||||
|
||||
# def get_top_insights_on_the_fly_widget(project_id, data: schemas.FunnelInsightsPayloadSchema):
|
||||
def get_top_insights_on_the_fly_widget(project_id, data: schemas.CardSeriesFilterSchema,
|
||||
metric_format: schemas.MetricExtendedFormatType):
|
||||
data.events = filter_stages(__parse_events(data.events))
|
||||
data.events = __fix_stages(data.events)
|
||||
if len(data.events) == 0:
|
||||
return {"stages": [], "totalDropDueToIssues": 0}
|
||||
insights, total_drop_due_to_issues = significance.get_top_insights(filter_d=data,
|
||||
project_id=project_id,
|
||||
metric_format=metric_format)
|
||||
insights = helper.list_to_camel_case(insights)
|
||||
if len(insights) > 0:
|
||||
if metric_format == schemas.MetricFormatType.SESSION_COUNT and total_drop_due_to_issues > (
|
||||
insights[0]["sessionsCount"] - insights[-1]["sessionsCount"]):
|
||||
total_drop_due_to_issues = insights[0]["sessionsCount"] - insights[-1]["sessionsCount"]
|
||||
elif metric_format == schemas.MetricExtendedFormatType.USER_COUNT and total_drop_due_to_issues > (
|
||||
insights[0]["usersCount"] - insights[-1]["usersCount"]):
|
||||
total_drop_due_to_issues = insights[0]["usersCount"] - insights[-1]["usersCount"]
|
||||
insights[-1]["dropDueToIssues"] = total_drop_due_to_issues
|
||||
return {"stages": insights,
|
||||
"totalDropDueToIssues": total_drop_due_to_issues}
|
||||
|
||||
|
||||
# def get_issues_on_the_fly_widget(project_id, data: schemas.FunnelSearchPayloadSchema):
|
||||
def get_issues_on_the_fly_widget(project_id, data: schemas.CardSeriesFilterSchema):
|
||||
data.events = filter_stages(data.events)
|
||||
data.events = __fix_stages(data.events)
|
||||
|
|
@ -69,3 +45,16 @@ def get_issues_on_the_fly_widget(project_id, data: schemas.CardSeriesFilterSchem
|
|||
"issues": helper.dict_to_camel_case(
|
||||
significance.get_issues_list(filter_d=data, project_id=project_id, first_stage=1,
|
||||
last_stage=len(data.events)))}
|
||||
|
||||
|
||||
def get_simple_funnel(project: schemas.ProjectContext, data: schemas.CardSeriesFilterSchema,
|
||||
metric_format: schemas.MetricExtendedFormatType):
|
||||
data.events = filter_stages(__parse_events(data.events))
|
||||
data.events = __fix_stages(data.events)
|
||||
if len(data.events) == 0:
|
||||
return {"stages": [], "totalDropDueToIssues": 0}
|
||||
insights = significance.get_simple_funnel(filter_d=data,
|
||||
project=project,
|
||||
metric_format=metric_format)
|
||||
|
||||
return {"stages": insights}
|
||||
11
api/chalicelib/core/metrics/heatmaps/__init__.py
Normal file
11
api/chalicelib/core/metrics/heatmaps/__init__.py
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
import logging
|
||||
|
||||
from decouple import config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if config("EXP_METRICS", cast=bool, default=False):
|
||||
logger.info(">>> Using experimental heatmaps")
|
||||
from .heatmaps_ch import *
|
||||
else:
|
||||
from .heatmaps import *
|
||||
|
|
@ -1,28 +1,35 @@
|
|||
import logging
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import sessions_mobs, sessions, events
|
||||
from chalicelib.core import sessions
|
||||
from chalicelib.core.sessions import sessions_mobs
|
||||
from chalicelib.utils import pg_client, helper
|
||||
|
||||
# from chalicelib.utils import sql_helper as sh
|
||||
from chalicelib.utils import sql_helper as sh
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_by_url(project_id, data: schemas.GetHeatMapPayloadSchema):
|
||||
if data.url is None or data.url == "":
|
||||
return []
|
||||
args = {"startDate": data.startTimestamp, "endDate": data.endTimestamp,
|
||||
"project_id": project_id, "url": data.url}
|
||||
constraints = ["sessions.project_id = %(project_id)s",
|
||||
"(url = %(url)s OR path= %(url)s)",
|
||||
"clicks.timestamp >= %(startDate)s",
|
||||
"clicks.timestamp <= %(endDate)s",
|
||||
"start_ts >= %(startDate)s",
|
||||
"start_ts <= %(endDate)s",
|
||||
"duration IS NOT NULL",
|
||||
"normalized_x IS NOT NULL"]
|
||||
if data.operator == schemas.SearchEventOperator.IS:
|
||||
constraints.append("path= %(url)s")
|
||||
else:
|
||||
constraints.append("path ILIKE %(url)s")
|
||||
args["url"] = helper.values_for_operator(data.url, data.operator)
|
||||
|
||||
query_from = "events.clicks INNER JOIN sessions USING (session_id)"
|
||||
has_click_rage_filter = False
|
||||
# TODO: is this used ?
|
||||
# has_click_rage_filter = False
|
||||
# if len(data.filters) > 0:
|
||||
# for i, f in enumerate(data.filters):
|
||||
# if f.type == schemas.FilterType.issue and len(f.value) > 0:
|
||||
|
|
@ -49,15 +56,15 @@ def get_by_url(project_id, data: schemas.GetHeatMapPayloadSchema):
|
|||
# f.value, value_key=f_k))
|
||||
# constraints.append(sh.multi_conditions(f"mis.type = %({f_k})s",
|
||||
# f.value, value_key=f_k))
|
||||
|
||||
if data.click_rage and not has_click_rage_filter:
|
||||
constraints.append("""(issues.session_id IS NULL
|
||||
OR (issues.timestamp >= %(startDate)s
|
||||
AND issues.timestamp <= %(endDate)s
|
||||
AND mis.project_id = %(project_id)s
|
||||
AND mis.type='click_rage'))""")
|
||||
query_from += """LEFT JOIN events_common.issues USING (timestamp, session_id)
|
||||
LEFT JOIN issues AS mis USING (issue_id)"""
|
||||
# TODO: change this once click-rage is fixed
|
||||
# if data.click_rage and not has_click_rage_filter:
|
||||
# constraints.append("""(issues.session_id IS NULL
|
||||
# OR (issues.timestamp >= %(startDate)s
|
||||
# AND issues.timestamp <= %(endDate)s
|
||||
# AND mis.project_id = %(project_id)s
|
||||
# AND mis.type='click_rage'))""")
|
||||
# query_from += """LEFT JOIN events_common.issues USING (timestamp, session_id)
|
||||
# LEFT JOIN issues AS mis USING (issue_id)"""
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(f"""SELECT normalized_x, normalized_y
|
||||
FROM {query_from}
|
||||
|
|
@ -83,8 +90,13 @@ def get_by_url(project_id, data: schemas.GetHeatMapPayloadSchema):
|
|||
def get_x_y_by_url_and_session_id(project_id, session_id, data: schemas.GetHeatMapPayloadSchema):
|
||||
args = {"session_id": session_id, "url": data.url}
|
||||
constraints = ["session_id = %(session_id)s",
|
||||
"(url = %(url)s OR path= %(url)s)",
|
||||
"normalized_x IS NOT NULL"]
|
||||
if data.operator == schemas.SearchEventOperator.IS:
|
||||
constraints.append("path= %(url)s")
|
||||
else:
|
||||
constraints.append("path ILIKE %(url)s")
|
||||
args["url"] = helper.values_for_operator(data.url, data.operator)
|
||||
|
||||
query_from = "events.clicks"
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
|
|
@ -110,8 +122,13 @@ def get_x_y_by_url_and_session_id(project_id, session_id, data: schemas.GetHeatM
|
|||
|
||||
def get_selectors_by_url_and_session_id(project_id, session_id, data: schemas.GetHeatMapPayloadSchema):
|
||||
args = {"session_id": session_id, "url": data.url}
|
||||
constraints = ["session_id = %(session_id)s",
|
||||
"(url = %(url)s OR path= %(url)s)"]
|
||||
constraints = ["session_id = %(session_id)s"]
|
||||
if data.operator == schemas.SearchEventOperator.IS:
|
||||
constraints.append("path= %(url)s")
|
||||
else:
|
||||
constraints.append("path ILIKE %(url)s")
|
||||
args["url"] = helper.values_for_operator(data.url, data.operator)
|
||||
|
||||
query_from = "events.clicks"
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
|
|
@ -143,29 +160,93 @@ s.start_ts,
|
|||
s.duration"""
|
||||
|
||||
|
||||
def __get_1_url(location_condition: schemas.SessionSearchEventSchema2 | None, session_id: str, project_id: int,
|
||||
start_time: int,
|
||||
end_time: int) -> str | None:
|
||||
full_args = {
|
||||
"sessionId": session_id,
|
||||
"projectId": project_id,
|
||||
"start_time": start_time,
|
||||
"end_time": end_time,
|
||||
}
|
||||
sub_condition = ["session_id = %(sessionId)s"]
|
||||
if location_condition and len(location_condition.value) > 0:
|
||||
f_k = "LOC"
|
||||
op = sh.get_sql_operator(location_condition.operator)
|
||||
full_args = {**full_args, **sh.multi_values(location_condition.value, value_key=f_k)}
|
||||
sub_condition.append(
|
||||
sh.multi_conditions(f'path {op} %({f_k})s', location_condition.value, is_not=False,
|
||||
value_key=f_k))
|
||||
with pg_client.PostgresClient() as cur:
|
||||
main_query = cur.mogrify(f"""WITH paths AS (SELECT DISTINCT path
|
||||
FROM events.clicks
|
||||
WHERE {" AND ".join(sub_condition)})
|
||||
SELECT path, COUNT(1) AS count
|
||||
FROM events.clicks
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
INNER JOIN paths USING (path)
|
||||
WHERE sessions.project_id = %(projectId)s
|
||||
AND clicks.timestamp >= %(start_time)s
|
||||
AND clicks.timestamp <= %(end_time)s
|
||||
AND start_ts >= %(start_time)s
|
||||
AND start_ts <= %(end_time)s
|
||||
AND duration IS NOT NULL
|
||||
GROUP BY path
|
||||
ORDER BY count DESC
|
||||
LIMIT 1;""", full_args)
|
||||
logger.debug("--------------------")
|
||||
logger.debug(main_query)
|
||||
logger.debug("--------------------")
|
||||
try:
|
||||
cur.execute(main_query)
|
||||
except Exception as err:
|
||||
logger.warning("--------- CLICK MAP BEST URL SEARCH QUERY EXCEPTION -----------")
|
||||
logger.warning(main_query.decode('UTF-8'))
|
||||
logger.warning("--------- PAYLOAD -----------")
|
||||
logger.warning(full_args)
|
||||
logger.warning("--------------------")
|
||||
raise err
|
||||
|
||||
url = cur.fetchone()
|
||||
if url is None:
|
||||
return None
|
||||
return url["path"]
|
||||
|
||||
|
||||
def search_short_session(data: schemas.HeatMapSessionsSearch, project_id, user_id,
|
||||
include_mobs: bool = True, exclude_sessions: list[str] = [],
|
||||
_depth: int = 3):
|
||||
no_platform = True
|
||||
no_location = True
|
||||
location_condition = None
|
||||
no_click = True
|
||||
for f in data.filters:
|
||||
if f.type == schemas.FilterType.PLATFORM:
|
||||
no_platform = False
|
||||
break
|
||||
for f in data.events:
|
||||
if f.type == schemas.EventType.LOCATION:
|
||||
no_location = False
|
||||
if len(f.value) == 0:
|
||||
f.operator = schemas.SearchEventOperator.IS_ANY
|
||||
location_condition = f.model_copy()
|
||||
elif f.type == schemas.EventType.CLICK:
|
||||
no_click = False
|
||||
if len(f.value) == 0:
|
||||
f.operator = schemas.SearchEventOperator.IS_ANY
|
||||
if location_condition and not no_click:
|
||||
break
|
||||
|
||||
if no_platform:
|
||||
data.filters.append(schemas.SessionSearchFilterSchema(type=schemas.FilterType.PLATFORM,
|
||||
value=[schemas.PlatformType.DESKTOP],
|
||||
operator=schemas.SearchEventOperator.IS))
|
||||
if no_location:
|
||||
if not location_condition:
|
||||
data.events.append(schemas.SessionSearchEventSchema2(type=schemas.EventType.LOCATION,
|
||||
value=[],
|
||||
operator=schemas.SearchEventOperator.IS_ANY))
|
||||
if no_click:
|
||||
data.events.append(schemas.SessionSearchEventSchema2(type=schemas.EventType.CLICK,
|
||||
value=[],
|
||||
operator=schemas.SearchEventOperator.IS_ANY))
|
||||
|
||||
data.filters.append(schemas.SessionSearchFilterSchema(type=schemas.FilterType.EVENTS_COUNT,
|
||||
value=[0],
|
||||
|
|
@ -183,7 +264,8 @@ def search_short_session(data: schemas.HeatMapSessionsSearch, project_id, user_i
|
|||
main_query = cur.mogrify(f"""SELECT *
|
||||
FROM (SELECT {SESSION_PROJECTION_COLS}
|
||||
{query_part}
|
||||
ORDER BY {data.sort} {data.order.value}
|
||||
--ignoring the sort made the query faster (from 6s to 100ms)
|
||||
--ORDER BY {data.sort} {data.order.value}
|
||||
LIMIT 20) AS raw
|
||||
ORDER BY random()
|
||||
LIMIT 1;""", full_args)
|
||||
|
|
@ -202,6 +284,13 @@ def search_short_session(data: schemas.HeatMapSessionsSearch, project_id, user_i
|
|||
|
||||
session = cur.fetchone()
|
||||
if session:
|
||||
if not location_condition or location_condition.operator == schemas.SearchEventOperator.IS_ANY:
|
||||
session["path"] = __get_1_url(project_id=project_id, session_id=session["session_id"],
|
||||
location_condition=location_condition,
|
||||
start_time=data.startTimestamp, end_time=data.endTimestamp)
|
||||
else:
|
||||
session["path"] = location_condition.value[0]
|
||||
|
||||
if include_mobs:
|
||||
session['domURL'] = sessions_mobs.get_urls(session_id=session["session_id"], project_id=project_id)
|
||||
session['mobsUrl'] = sessions_mobs.get_urls_depercated(session_id=session["session_id"])
|
||||
385
api/chalicelib/core/metrics/heatmaps/heatmaps_ch.py
Normal file
385
api/chalicelib/core/metrics/heatmaps/heatmaps_ch.py
Normal file
|
|
@ -0,0 +1,385 @@
|
|||
import logging
|
||||
|
||||
from decouple import config
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import events
|
||||
from chalicelib.core.metrics.modules import sessions, sessions_mobs
|
||||
from chalicelib.utils import sql_helper as sh
|
||||
|
||||
from chalicelib.utils import pg_client, helper, ch_client, exp_ch_helper
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_by_url(project_id, data: schemas.GetHeatMapPayloadSchema):
|
||||
if data.url is None or data.url == "":
|
||||
return []
|
||||
args = {"startDate": data.startTimestamp, "endDate": data.endTimestamp,
|
||||
"project_id": project_id, "url": data.url}
|
||||
constraints = [
|
||||
"main_events.project_id = toUInt16(%(project_id)s)",
|
||||
"main_events.created_at >= toDateTime(%(startDate)s / 1000)",
|
||||
"main_events.created_at <= toDateTime(%(endDate)s / 1000)",
|
||||
"main_events.`$event_name` = 'CLICK'",
|
||||
"isNotNull(JSON_VALUE(CAST(main_events.`$properties` AS String), '$.normalized_x'))"
|
||||
]
|
||||
|
||||
if data.operator == schemas.SearchEventOperator.IS:
|
||||
constraints.append("JSON_VALUE(CAST(main_events.`$properties` AS String), '$.url_path') = %(url)s")
|
||||
else:
|
||||
constraints.append("JSON_VALUE(CAST(main_events.`$properties` AS String), '$.url_path') ILIKE %(url)s")
|
||||
args["url"] = helper.values_for_operator(data.url, data.operator)
|
||||
|
||||
query_from = f"{exp_ch_helper.get_main_events_table(data.startTimestamp)} AS main_events"
|
||||
# TODO: is this used ?
|
||||
# has_click_rage_filter = False
|
||||
# if len(data.filters) > 0:
|
||||
# for i, f in enumerate(data.filters):
|
||||
# if f.type == schemas.FilterType.issue and len(f.value) > 0:
|
||||
# has_click_rage_filter = True
|
||||
# query_from += """INNER JOIN events_common.issues USING (timestamp, session_id)
|
||||
# INNER JOIN issues AS mis USING (issue_id)
|
||||
# INNER JOIN LATERAL (
|
||||
# SELECT COUNT(1) AS real_count
|
||||
# FROM events.clicks AS sc
|
||||
# INNER JOIN sessions as ss USING (session_id)
|
||||
# WHERE ss.project_id = 2
|
||||
# AND (sc.url = %(url)s OR sc.path = %(url)s)
|
||||
# AND sc.timestamp >= %(startDate)s
|
||||
# AND sc.timestamp <= %(endDate)s
|
||||
# AND ss.start_ts >= %(startDate)s
|
||||
# AND ss.start_ts <= %(endDate)s
|
||||
# AND sc.selector = clicks.selector) AS r_clicks ON (TRUE)"""
|
||||
# constraints += ["mis.project_id = %(project_id)s",
|
||||
# "issues.timestamp >= %(startDate)s",
|
||||
# "issues.timestamp <= %(endDate)s"]
|
||||
# f_k = f"issue_value{i}"
|
||||
# args = {**args, **sh.multi_values(f.value, value_key=f_k)}
|
||||
# constraints.append(sh.multi_conditions(f"%({f_k})s = ANY (issue_types)",
|
||||
# f.value, value_key=f_k))
|
||||
# constraints.append(sh.multi_conditions(f"mis.type = %({f_k})s",
|
||||
# f.value, value_key=f_k))
|
||||
# TODO: change this once click-rage is fixed
|
||||
# if data.click_rage and not has_click_rage_filter:
|
||||
# constraints.append("""(issues_t.session_id IS NULL
|
||||
# OR (issues_t.datetime >= toDateTime(%(startDate)s/1000)
|
||||
# AND issues_t.datetime <= toDateTime(%(endDate)s/1000)
|
||||
# AND issues_t.project_id = toUInt16(%(project_id)s)
|
||||
# AND issues_t.event_type = 'ISSUE'
|
||||
# AND issues_t.project_id = toUInt16(%(project_id)s)
|
||||
# AND mis.project_id = toUInt16(%(project_id)s)
|
||||
# AND mis.type='click_rage'))""")
|
||||
# query_from += """ LEFT JOIN experimental.events AS issues_t ON (main_events.session_id=issues_t.session_id)
|
||||
# LEFT JOIN experimental.issues AS mis ON (issues_t.issue_id=mis.issue_id)"""
|
||||
with ch_client.ClickHouseClient() as cur:
|
||||
query = cur.format(query=f"""SELECT
|
||||
JSON_VALUE(CAST(`$properties` AS String), '$.normalized_x') AS normalized_x,
|
||||
JSON_VALUE(CAST(`$properties` AS String), '$.normalized_y') AS normalized_y
|
||||
FROM {query_from}
|
||||
WHERE {" AND ".join(constraints)}
|
||||
LIMIT 500;""",
|
||||
parameters=args)
|
||||
logger.debug("---------")
|
||||
logger.debug(query)
|
||||
logger.debug("---------")
|
||||
try:
|
||||
rows = cur.execute(query=query)
|
||||
except Exception as err:
|
||||
logger.warning("--------- HEATMAP 2 SEARCH QUERY EXCEPTION CH -----------")
|
||||
logger.warning(query)
|
||||
logger.warning("--------- PAYLOAD -----------")
|
||||
logger.warning(data)
|
||||
logger.warning("--------------------")
|
||||
raise err
|
||||
|
||||
return helper.list_to_camel_case(rows)
|
||||
|
||||
|
||||
def get_x_y_by_url_and_session_id(project_id, session_id, data: schemas.GetHeatMapPayloadSchema):
|
||||
args = {"project_id": project_id, "session_id": session_id, "url": data.url}
|
||||
constraints = [
|
||||
"main_events.project_id = toUInt16(%(project_id)s)",
|
||||
"main_events.session_id = %(session_id)s",
|
||||
"main_events.`$event_name`='CLICK'",
|
||||
"isNotNull(JSON_VALUE(CAST(main_events.`$properties` AS String), '$.normalized_x'))"
|
||||
]
|
||||
if data.operator == schemas.SearchEventOperator.IS:
|
||||
constraints.append("JSON_VALUE(CAST(main_events.`$properties` AS String), '$.url_path') = %(url)s")
|
||||
else:
|
||||
constraints.append("JSON_VALUE(CAST(main_events.`$properties` AS String), '$.url_path') ILIKE %(url)s")
|
||||
args["url"] = helper.values_for_operator(data.url, data.operator)
|
||||
|
||||
query_from = f"{exp_ch_helper.get_main_events_table(0)} AS main_events"
|
||||
|
||||
with ch_client.ClickHouseClient() as cur:
|
||||
query = cur.format(query=f"""SELECT main_events.normalized_x AS normalized_x,
|
||||
main_events.normalized_y AS normalized_y
|
||||
FROM {query_from}
|
||||
WHERE {" AND ".join(constraints)};""",
|
||||
parameters=args)
|
||||
logger.debug("---------")
|
||||
logger.debug(query)
|
||||
logger.debug("---------")
|
||||
try:
|
||||
rows = cur.execute(query=query)
|
||||
except Exception as err:
|
||||
logger.warning("--------- HEATMAP-session_id SEARCH QUERY EXCEPTION CH -----------")
|
||||
logger.warning(query)
|
||||
logger.warning("--------- PAYLOAD -----------")
|
||||
logger.warning(data)
|
||||
logger.warning("--------------------")
|
||||
raise err
|
||||
|
||||
return helper.list_to_camel_case(rows)
|
||||
|
||||
|
||||
def get_selectors_by_url_and_session_id(project_id, session_id, data: schemas.GetHeatMapPayloadSchema):
|
||||
args = {"project_id": project_id, "session_id": session_id, "url": data.url}
|
||||
constraints = ["main_events.project_id = toUInt16(%(project_id)s)",
|
||||
"main_events.session_id = %(session_id)s",
|
||||
"main_events.`$event_name`='CLICK'"]
|
||||
|
||||
if data.operator == schemas.SearchEventOperator.IS:
|
||||
constraints.append("JSON_VALUE(CAST(main_events.`$properties` AS String), '$.url_path') = %(url)s")
|
||||
else:
|
||||
constraints.append("JSON_VALUE(CAST(main_events.`$properties` AS String), '$.url_path') ILIKE %(url)s")
|
||||
args["url"] = helper.values_for_operator(data.url, data.operator)
|
||||
|
||||
query_from = f"{exp_ch_helper.get_main_events_table(0)} AS main_events"
|
||||
|
||||
with ch_client.ClickHouseClient() as cur:
|
||||
query = cur.format(query=f"""SELECT CAST(`$properties`.selector AS String) AS selector,
|
||||
COUNT(1) AS count
|
||||
FROM {query_from}
|
||||
WHERE {" AND ".join(constraints)}
|
||||
GROUP BY 1
|
||||
ORDER BY count DESC;""",
|
||||
parameters=args)
|
||||
logger.debug("---------")
|
||||
logger.debug(query)
|
||||
logger.debug("---------")
|
||||
try:
|
||||
rows = cur.execute(query=query)
|
||||
except Exception as err:
|
||||
logger.warning("--------- HEATMAP-session_id SEARCH QUERY EXCEPTION CH -----------")
|
||||
logger.warning(query)
|
||||
logger.warning("--------- PAYLOAD -----------")
|
||||
logger.warning(data)
|
||||
logger.warning("--------------------")
|
||||
raise err
|
||||
|
||||
return helper.list_to_camel_case(rows)
|
||||
|
||||
|
||||
# use CH
|
||||
SESSION_PROJECTION_COLS = """s.project_id,
|
||||
s.session_id AS session_id,
|
||||
toUnixTimestamp(s.datetime)*1000 AS start_ts,
|
||||
s.duration AS duration"""
|
||||
|
||||
|
||||
def __get_1_url(location_condition: schemas.SessionSearchEventSchema2 | None, session_id: str, project_id: int,
|
||||
start_time: int,
|
||||
end_time: int) -> str | None:
|
||||
full_args = {
|
||||
"sessionId": session_id,
|
||||
"projectId": project_id,
|
||||
"start_time": start_time,
|
||||
"end_time": end_time,
|
||||
}
|
||||
sub_condition = ["session_id = %(sessionId)s", "`$event_name` = 'CLICK'", "project_id = %(projectId)s"]
|
||||
if location_condition and len(location_condition.value) > 0:
|
||||
f_k = "LOC"
|
||||
op = sh.get_sql_operator(location_condition.operator)
|
||||
full_args = {**full_args, **sh.multi_values(location_condition.value, value_key=f_k)}
|
||||
sub_condition.append(
|
||||
sh.multi_conditions(f'path {op} %({f_k})s', location_condition.value, is_not=False,
|
||||
value_key=f_k))
|
||||
with ch_client.ClickHouseClient() as cur:
|
||||
main_query = cur.format(query=f"""WITH paths AS (
|
||||
SELECT DISTINCT
|
||||
JSON_VALUE(CAST(`$properties` AS String), '$.url_path') AS url_path
|
||||
FROM product_analytics.events
|
||||
WHERE {" AND ".join(sub_condition)}
|
||||
)
|
||||
SELECT
|
||||
paths.url_path,
|
||||
COUNT(*) AS count
|
||||
FROM product_analytics.events
|
||||
INNER JOIN paths
|
||||
ON JSON_VALUE(CAST(product_analytics.events.$properties AS String), '$.url_path') = paths.url_path
|
||||
WHERE `$event_name` = 'CLICK'
|
||||
AND project_id = %(projectId)s
|
||||
AND created_at >= toDateTime(%(start_time)s / 1000)
|
||||
AND created_at <= toDateTime(%(end_time)s / 1000)
|
||||
GROUP BY paths.url_path
|
||||
ORDER BY count DESC
|
||||
LIMIT 1;""",
|
||||
parameters=full_args)
|
||||
logger.debug("--------------------")
|
||||
logger.debug(main_query)
|
||||
logger.debug("--------------------")
|
||||
try:
|
||||
url = cur.execute(query=main_query)
|
||||
except Exception as err:
|
||||
logger.warning("--------- CLICK MAP BEST URL SEARCH QUERY EXCEPTION CH-----------")
|
||||
logger.warning(main_query.decode('UTF-8'))
|
||||
logger.warning("--------- PAYLOAD -----------")
|
||||
logger.warning(full_args)
|
||||
logger.warning("--------------------")
|
||||
raise err
|
||||
|
||||
if url is None or len(url) == 0:
|
||||
return None
|
||||
return url[0]["url_path"]
|
||||
|
||||
|
||||
def search_short_session(data: schemas.HeatMapSessionsSearch, project_id, user_id,
|
||||
include_mobs: bool = True, exclude_sessions: list[str] = [],
|
||||
_depth: int = 3):
|
||||
no_platform = True
|
||||
location_condition = None
|
||||
no_click = True
|
||||
for f in data.filters:
|
||||
if f.type == schemas.FilterType.PLATFORM:
|
||||
no_platform = False
|
||||
break
|
||||
for f in data.events:
|
||||
if f.type == schemas.EventType.LOCATION:
|
||||
if len(f.value) == 0:
|
||||
f.operator = schemas.SearchEventOperator.IS_ANY
|
||||
location_condition = f.model_copy()
|
||||
elif f.type == schemas.EventType.CLICK:
|
||||
no_click = False
|
||||
if len(f.value) == 0:
|
||||
f.operator = schemas.SearchEventOperator.IS_ANY
|
||||
if location_condition and not no_click:
|
||||
break
|
||||
|
||||
if no_platform:
|
||||
data.filters.append(schemas.SessionSearchFilterSchema(type=schemas.FilterType.PLATFORM,
|
||||
value=[schemas.PlatformType.DESKTOP],
|
||||
operator=schemas.SearchEventOperator.IS))
|
||||
if not location_condition:
|
||||
data.events.append(schemas.SessionSearchEventSchema2(type=schemas.EventType.LOCATION,
|
||||
value=[],
|
||||
operator=schemas.SearchEventOperator.IS_ANY))
|
||||
if no_click:
|
||||
data.events.append(schemas.SessionSearchEventSchema2(type=schemas.EventType.CLICK,
|
||||
value=[],
|
||||
operator=schemas.SearchEventOperator.IS_ANY))
|
||||
|
||||
data.filters.append(schemas.SessionSearchFilterSchema(type=schemas.FilterType.EVENTS_COUNT,
|
||||
value=[0],
|
||||
operator=schemas.MathOperator.GREATER))
|
||||
|
||||
full_args, query_part = sessions.search_query_parts_ch(data=data, error_status=None, errors_only=False,
|
||||
favorite_only=data.bookmarked, issue=None,
|
||||
project_id=project_id, user_id=user_id)
|
||||
full_args["exclude_sessions"] = tuple(exclude_sessions)
|
||||
if len(exclude_sessions) > 0:
|
||||
query_part += "\n AND session_id NOT IN (%(exclude_sessions)s)"
|
||||
with ch_client.ClickHouseClient() as cur:
|
||||
data.order = schemas.SortOrderType.DESC
|
||||
data.sort = 'duration'
|
||||
main_query = cur.format(query=f"""SELECT *
|
||||
FROM (SELECT {SESSION_PROJECTION_COLS}
|
||||
{query_part}
|
||||
-- ORDER BY {data.sort} {data.order.value}
|
||||
LIMIT 20) AS raw
|
||||
ORDER BY rand()
|
||||
LIMIT 1;""",
|
||||
parameters=full_args)
|
||||
logger.debug("--------------------")
|
||||
logger.debug(main_query)
|
||||
logger.debug("--------------------")
|
||||
try:
|
||||
session = cur.execute(query=main_query)
|
||||
except Exception as err:
|
||||
logger.warning("--------- CLICK MAP SHORT SESSION SEARCH QUERY EXCEPTION CH -----------")
|
||||
logger.warning(main_query)
|
||||
logger.warning("--------- PAYLOAD -----------")
|
||||
logger.warning(data.model_dump_json())
|
||||
logger.warning("--------------------")
|
||||
raise err
|
||||
|
||||
if len(session) > 0:
|
||||
session = session[0]
|
||||
if not location_condition or location_condition.operator == schemas.SearchEventOperator.IS_ANY:
|
||||
session["path"] = __get_1_url(project_id=project_id, session_id=session["session_id"],
|
||||
location_condition=location_condition,
|
||||
start_time=data.startTimestamp, end_time=data.endTimestamp)
|
||||
else:
|
||||
session["path"] = location_condition.value[0]
|
||||
|
||||
if include_mobs:
|
||||
session['domURL'] = sessions_mobs.get_urls(session_id=session["session_id"], project_id=project_id)
|
||||
session['mobsUrl'] = sessions_mobs.get_urls_depercated(session_id=session["session_id"])
|
||||
if _depth > 0 and len(session['domURL']) == 0 and len(session['mobsUrl']) == 0:
|
||||
return search_short_session(data=data, project_id=project_id, user_id=user_id,
|
||||
include_mobs=include_mobs,
|
||||
exclude_sessions=exclude_sessions + [session["session_id"]],
|
||||
_depth=_depth - 1)
|
||||
elif _depth == 0 and len(session['domURL']) == 0 and len(session['mobsUrl']) == 0:
|
||||
logger.info("couldn't find an existing replay after 3 iterations for heatmap")
|
||||
|
||||
session['events'] = events.get_by_session_id(project_id=project_id, session_id=session["session_id"],
|
||||
event_type=schemas.EventType.LOCATION)
|
||||
else:
|
||||
return None
|
||||
|
||||
return helper.dict_to_camel_case(session)
|
||||
|
||||
|
||||
def get_selected_session(project_id, session_id):
|
||||
with ch_client.ClickHouseClient() as cur:
|
||||
main_query = cur.format(query=f"""SELECT {SESSION_PROJECTION_COLS}
|
||||
FROM experimental.sessions AS s
|
||||
WHERE session_id=%(session_id)s;""",
|
||||
parameters={"session_id": session_id})
|
||||
logger.debug("--------------------")
|
||||
logger.debug(main_query)
|
||||
logger.debug("--------------------")
|
||||
try:
|
||||
session = cur.execute(query=main_query)
|
||||
except Exception as err:
|
||||
logger.warning("--------- CLICK MAP GET SELECTED SESSION QUERY EXCEPTION -----------")
|
||||
logger.warning(main_query.decode('UTF-8'))
|
||||
raise err
|
||||
if len(session) > 0:
|
||||
session = session[0]
|
||||
else:
|
||||
session = None
|
||||
|
||||
if session:
|
||||
session['domURL'] = sessions_mobs.get_urls(session_id=session["session_id"], project_id=project_id)
|
||||
session['mobsUrl'] = sessions_mobs.get_urls_depercated(session_id=session["session_id"])
|
||||
if len(session['domURL']) == 0 and len(session['mobsUrl']) == 0:
|
||||
session["_issue"] = "mob file not found"
|
||||
logger.info("can't find selected mob file for heatmap")
|
||||
session['events'] = get_page_events(session_id=session["session_id"], project_id=project_id)
|
||||
|
||||
return helper.dict_to_camel_case(session)
|
||||
|
||||
|
||||
def get_page_events(session_id, project_id):
|
||||
with ch_client.ClickHouseClient() as cur:
|
||||
query = cur.format(query=f"""SELECT
|
||||
event_id as message_id,
|
||||
toUnixTimestamp(created_at)*1000 AS timestamp,
|
||||
JSON_VALUE(CAST(`$properties` AS String), '$.url_host') AS host,
|
||||
JSON_VALUE(CAST(`$properties` AS String), '$.url_path') AS path,
|
||||
JSON_VALUE(CAST(`$properties` AS String), '$.url_path') AS value,
|
||||
JSON_VALUE(CAST(`$properties` AS String), '$.url_path') AS url,
|
||||
'LOCATION' AS type
|
||||
FROM product_analytics.events
|
||||
WHERE session_id = %(session_id)s
|
||||
AND `$event_name`='LOCATION'
|
||||
AND project_id= %(project_id)s
|
||||
ORDER BY created_at,message_id;""",
|
||||
parameters={"session_id": session_id, "project_id": project_id})
|
||||
|
||||
rows = cur.execute(query=query)
|
||||
rows = helper.list_to_camel_case(rows)
|
||||
return rows
|
||||
12
api/chalicelib/core/metrics/modules/__init__.py
Normal file
12
api/chalicelib/core/metrics/modules/__init__.py
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
import logging
|
||||
|
||||
from decouple import config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if config("EXP_METRICS", cast=bool, default=False):
|
||||
import chalicelib.core.sessions.sessions_ch as sessions
|
||||
else:
|
||||
import chalicelib.core.sessions.sessions_pg as sessions
|
||||
|
||||
from chalicelib.core.sessions import sessions_mobs
|
||||
10
api/chalicelib/core/metrics/modules/significance/__init__.py
Normal file
10
api/chalicelib/core/metrics/modules/significance/__init__.py
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
import logging
|
||||
|
||||
from decouple import config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
from .significance import *
|
||||
|
||||
if config("EXP_METRICS", cast=bool, default=False):
|
||||
from .significance_ch import *
|
||||
|
|
@ -1,20 +1,15 @@
|
|||
import logging
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import events, metadata
|
||||
from chalicelib.utils import sql_helper as sh
|
||||
|
||||
"""
|
||||
todo: remove LIMIT from the query
|
||||
"""
|
||||
|
||||
from typing import List
|
||||
import math
|
||||
import warnings
|
||||
from collections import defaultdict
|
||||
from typing import List
|
||||
|
||||
from psycopg2.extras import RealDictRow
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import events, metadata
|
||||
from chalicelib.utils import pg_client, helper
|
||||
from chalicelib.utils import sql_helper as sh
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
SIGNIFICANCE_THRSH = 0.4
|
||||
|
|
@ -238,6 +233,227 @@ def get_stages_and_events(filter_d: schemas.CardSeriesFilterSchema, project_id)
|
|||
return rows
|
||||
|
||||
|
||||
def get_simple_funnel(filter_d: schemas.CardSeriesFilterSchema, project: schemas.ProjectContext,
|
||||
metric_format: schemas.MetricExtendedFormatType) -> List[RealDictRow]:
|
||||
"""
|
||||
Add minimal timestamp
|
||||
:param filter_d: dict contains events&filters&...
|
||||
:return:
|
||||
"""
|
||||
|
||||
stages: List[schemas.SessionSearchEventSchema2] = filter_d.events
|
||||
filters: List[schemas.SessionSearchFilterSchema] = filter_d.filters
|
||||
|
||||
stage_constraints = ["main.timestamp <= %(endTimestamp)s"]
|
||||
first_stage_extra_constraints = ["s.project_id=%(project_id)s", "s.start_ts >= %(startTimestamp)s",
|
||||
"s.start_ts <= %(endTimestamp)s"]
|
||||
if metric_format == schemas.MetricExtendedFormatType.SESSION_COUNT:
|
||||
count_value = '1'
|
||||
else:
|
||||
count_value = 'user_id'
|
||||
first_stage_extra_constraints.append("user_id IS NOT NULL")
|
||||
first_stage_extra_constraints.append("user_id !=''")
|
||||
|
||||
filter_extra_from = []
|
||||
n_stages_query = []
|
||||
values = {}
|
||||
if len(filters) > 0:
|
||||
meta_keys = None
|
||||
for i, f in enumerate(filters):
|
||||
if len(f.value) == 0:
|
||||
continue
|
||||
f.value = helper.values_for_operator(value=f.value, op=f.operator)
|
||||
|
||||
op = sh.get_sql_operator(f.operator)
|
||||
|
||||
filter_type = f.type
|
||||
f_k = f"f_value{i}"
|
||||
values = {**values,
|
||||
**sh.multi_values(f.value, value_key=f_k)}
|
||||
is_not = False
|
||||
if sh.is_negation_operator(f.operator):
|
||||
is_not = True
|
||||
if filter_type == schemas.FilterType.USER_BROWSER:
|
||||
first_stage_extra_constraints.append(
|
||||
sh.multi_conditions(f's.user_browser {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
|
||||
|
||||
elif filter_type in [schemas.FilterType.USER_OS, schemas.FilterType.USER_OS_MOBILE]:
|
||||
first_stage_extra_constraints.append(
|
||||
sh.multi_conditions(f's.user_os {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
|
||||
|
||||
elif filter_type in [schemas.FilterType.USER_DEVICE, schemas.FilterType.USER_DEVICE_MOBILE]:
|
||||
first_stage_extra_constraints.append(
|
||||
sh.multi_conditions(f's.user_device {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
|
||||
|
||||
elif filter_type in [schemas.FilterType.USER_COUNTRY, schemas.FilterType.USER_COUNTRY_MOBILE]:
|
||||
first_stage_extra_constraints.append(
|
||||
sh.multi_conditions(f's.user_country {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
|
||||
elif filter_type == schemas.FilterType.DURATION:
|
||||
if len(f.value) > 0 and f.value[0] is not None:
|
||||
first_stage_extra_constraints.append(f's.duration >= %(minDuration)s')
|
||||
values["minDuration"] = f.value[0]
|
||||
if len(f["value"]) > 1 and f.value[1] is not None and int(f.value[1]) > 0:
|
||||
first_stage_extra_constraints.append('s.duration <= %(maxDuration)s')
|
||||
values["maxDuration"] = f.value[1]
|
||||
elif filter_type == schemas.FilterType.REFERRER:
|
||||
# events_query_part = events_query_part + f"INNER JOIN events.pages AS p USING(session_id)"
|
||||
filter_extra_from = [f"INNER JOIN {events.EventType.LOCATION.table} AS p USING(session_id)"]
|
||||
first_stage_extra_constraints.append(
|
||||
sh.multi_conditions(f"p.base_referrer {op} %({f_k})s", f.value, is_not=is_not, value_key=f_k))
|
||||
elif filter_type == events.EventType.METADATA.ui_type:
|
||||
if meta_keys is None:
|
||||
meta_keys = metadata.get(project_id=project.project_id)
|
||||
meta_keys = {m["key"]: m["index"] for m in meta_keys}
|
||||
if f.source in meta_keys.keys():
|
||||
first_stage_extra_constraints.append(
|
||||
sh.multi_conditions(
|
||||
f's.{metadata.index_to_colname(meta_keys[f.source])} {op} %({f_k})s', f.value,
|
||||
is_not=is_not, value_key=f_k))
|
||||
# values[f_k] = helper.string_to_sql_like_with_op(f["value"][0], op)
|
||||
elif filter_type in [schemas.FilterType.USER_ID, schemas.FilterType.USER_ID_MOBILE]:
|
||||
first_stage_extra_constraints.append(
|
||||
sh.multi_conditions(f's.user_id {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
|
||||
# values[f_k] = helper.string_to_sql_like_with_op(f["value"][0], op)
|
||||
elif filter_type in [schemas.FilterType.USER_ANONYMOUS_ID,
|
||||
schemas.FilterType.USER_ANONYMOUS_ID_MOBILE]:
|
||||
first_stage_extra_constraints.append(
|
||||
sh.multi_conditions(f's.user_anonymous_id {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
|
||||
# values[f_k] = helper.string_to_sql_like_with_op(f["value"][0], op)
|
||||
elif filter_type in [schemas.FilterType.REV_ID, schemas.FilterType.REV_ID_MOBILE]:
|
||||
first_stage_extra_constraints.append(
|
||||
sh.multi_conditions(f's.rev_id {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
|
||||
# values[f_k] = helper.string_to_sql_like_with_op(f["value"][0], op)
|
||||
i = -1
|
||||
for s in stages:
|
||||
|
||||
if s.operator is None:
|
||||
s.operator = schemas.SearchEventOperator.IS
|
||||
|
||||
if not isinstance(s.value, list):
|
||||
s.value = [s.value]
|
||||
is_any = sh.isAny_opreator(s.operator)
|
||||
if not is_any and isinstance(s.value, list) and len(s.value) == 0:
|
||||
continue
|
||||
i += 1
|
||||
if i == 0:
|
||||
extra_from = filter_extra_from + ["INNER JOIN public.sessions AS s USING (session_id)"]
|
||||
else:
|
||||
extra_from = []
|
||||
op = sh.get_sql_operator(s.operator)
|
||||
# event_type = s["type"].upper()
|
||||
event_type = s.type
|
||||
if event_type == events.EventType.CLICK.ui_type:
|
||||
next_table = events.EventType.CLICK.table
|
||||
next_col_name = events.EventType.CLICK.column
|
||||
elif event_type == events.EventType.INPUT.ui_type:
|
||||
next_table = events.EventType.INPUT.table
|
||||
next_col_name = events.EventType.INPUT.column
|
||||
elif event_type == events.EventType.LOCATION.ui_type:
|
||||
next_table = events.EventType.LOCATION.table
|
||||
next_col_name = events.EventType.LOCATION.column
|
||||
elif event_type == events.EventType.CUSTOM.ui_type:
|
||||
next_table = events.EventType.CUSTOM.table
|
||||
next_col_name = events.EventType.CUSTOM.column
|
||||
# IOS --------------
|
||||
elif event_type == events.EventType.CLICK_MOBILE.ui_type:
|
||||
next_table = events.EventType.CLICK_MOBILE.table
|
||||
next_col_name = events.EventType.CLICK_MOBILE.column
|
||||
elif event_type == events.EventType.INPUT_MOBILE.ui_type:
|
||||
next_table = events.EventType.INPUT_MOBILE.table
|
||||
next_col_name = events.EventType.INPUT_MOBILE.column
|
||||
elif event_type == events.EventType.VIEW_MOBILE.ui_type:
|
||||
next_table = events.EventType.VIEW_MOBILE.table
|
||||
next_col_name = events.EventType.VIEW_MOBILE.column
|
||||
elif event_type == events.EventType.CUSTOM_MOBILE.ui_type:
|
||||
next_table = events.EventType.CUSTOM_MOBILE.table
|
||||
next_col_name = events.EventType.CUSTOM_MOBILE.column
|
||||
else:
|
||||
logger.warning(f"=================UNDEFINED:{event_type}")
|
||||
continue
|
||||
|
||||
values = {**values, **sh.multi_values(helper.values_for_operator(value=s.value, op=s.operator),
|
||||
value_key=f"value{i + 1}")}
|
||||
if sh.is_negation_operator(s.operator) and i > 0:
|
||||
op = sh.reverse_sql_operator(op)
|
||||
main_condition = "left_not.session_id ISNULL"
|
||||
extra_from.append(f"""LEFT JOIN LATERAL (SELECT session_id
|
||||
FROM {next_table} AS s_main
|
||||
WHERE
|
||||
{sh.multi_conditions(f"s_main.{next_col_name} {op} %(value{i + 1})s",
|
||||
values=s.value, value_key=f"value{i + 1}")}
|
||||
AND s_main.timestamp >= T{i}.stage{i}_timestamp
|
||||
AND s_main.session_id = T1.session_id) AS left_not ON (TRUE)""")
|
||||
else:
|
||||
if is_any:
|
||||
main_condition = "TRUE"
|
||||
else:
|
||||
main_condition = sh.multi_conditions(f"main.{next_col_name} {op} %(value{i + 1})s",
|
||||
values=s.value, value_key=f"value{i + 1}")
|
||||
n_stages_query.append(f"""
|
||||
(SELECT main.session_id,
|
||||
{"MIN(main.timestamp)" if i + 1 < len(stages) else "MAX(main.timestamp)"} AS stage{i + 1}_timestamp,
|
||||
{count_value} AS c
|
||||
FROM {next_table} AS main {" ".join(extra_from)}
|
||||
WHERE main.timestamp >= {f"T{i}.stage{i}_timestamp" if i > 0 else "%(startTimestamp)s"}
|
||||
{f"AND main.session_id=T1.session_id" if i > 0 else ""}
|
||||
AND {main_condition}
|
||||
{(" AND " + " AND ".join(stage_constraints)) if len(stage_constraints) > 0 else ""}
|
||||
{(" AND " + " AND ".join(first_stage_extra_constraints)) if len(first_stage_extra_constraints) > 0 and i == 0 else ""}
|
||||
GROUP BY main.session_id,{count_value})
|
||||
AS T{i + 1} {"ON (TRUE)" if i > 0 else ""}
|
||||
""")
|
||||
count_value = '1'
|
||||
n_stages = len(n_stages_query)
|
||||
if n_stages == 0:
|
||||
return []
|
||||
n_stages_query = " LEFT JOIN LATERAL ".join(n_stages_query)
|
||||
|
||||
n_stages_query = f"""
|
||||
SELECT {",".join([f"COUNT(T{i + 1}.c) AS stage{i + 1}" for i in range(n_stages)])}
|
||||
FROM {n_stages_query};
|
||||
"""
|
||||
|
||||
params = {"project_id": project.project_id, "startTimestamp": filter_d.startTimestamp,
|
||||
"endTimestamp": filter_d.endTimestamp, **values}
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(n_stages_query, params)
|
||||
logger.debug("---------------------------------------------------")
|
||||
logger.debug(query)
|
||||
logger.debug("---------------------------------------------------")
|
||||
try:
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
except Exception as err:
|
||||
logger.warning("--------- SIMPLE FUNNEL SEARCH QUERY EXCEPTION -----------")
|
||||
logger.warning(query.decode('UTF-8'))
|
||||
logger.warning("--------- PAYLOAD -----------")
|
||||
logger.warning(filter_d.model_dump_json())
|
||||
logger.warning("--------------------")
|
||||
raise err
|
||||
|
||||
stages_list = []
|
||||
for i, stage in enumerate(stages):
|
||||
count = row[f"stage{i + 1}"]
|
||||
drop = None
|
||||
if i != 0:
|
||||
base_count = row[f"stage{i}"]
|
||||
if base_count == 0:
|
||||
drop = 0
|
||||
elif base_count > 0:
|
||||
drop = int(100 * (base_count - count) / base_count)
|
||||
|
||||
stages_list.append(
|
||||
{"value": stage.value,
|
||||
"type": stage.type,
|
||||
"operator": stage.operator,
|
||||
"dropPct": drop,
|
||||
"count": count
|
||||
}
|
||||
)
|
||||
|
||||
return stages_list
|
||||
|
||||
|
||||
def pearson_corr(x: list, y: list):
|
||||
n = len(x)
|
||||
if n != len(y):
|
||||
|
|
@ -527,7 +743,7 @@ def get_issues(stages, rows, first_stage=None, last_stage=None, drop_only=False)
|
|||
"type": all_issues[issue_id]["issue_type"],
|
||||
"title": helper.get_issue_title(all_issues[issue_id]["issue_type"]),
|
||||
"affected_sessions": affected_sessions[issue_id],
|
||||
"unaffected_sessions": session_counts[1] - affected_sessions[issue_id],
|
||||
"unaffected_sessions": session_counts[1] - affected_sessions.get(issue_id, 0),
|
||||
"lost_conversions": lost_conversions,
|
||||
"affected_users": affected_users_dict[issue_id],
|
||||
"conversion_impact": round(r * 100),
|
||||
|
|
@ -544,30 +760,6 @@ def get_issues(stages, rows, first_stage=None, last_stage=None, drop_only=False)
|
|||
return n_critical_issues, issues_dict, total_drop_due_to_issues
|
||||
|
||||
|
||||
def get_top_insights(filter_d: schemas.CardSeriesFilterSchema, project_id,
|
||||
metric_format: schemas.MetricExtendedFormatType):
|
||||
output = []
|
||||
stages = filter_d.events
|
||||
|
||||
if len(stages) == 0:
|
||||
logger.debug("no stages found")
|
||||
return output, 0
|
||||
|
||||
# The result of the multi-stage query
|
||||
rows = get_stages_and_events(filter_d=filter_d, project_id=project_id)
|
||||
# Obtain the first part of the output
|
||||
stages_list = get_stages(stages, rows, metric_format=metric_format)
|
||||
if len(rows) == 0:
|
||||
return stages_list, 0
|
||||
|
||||
# Obtain the second part of the output
|
||||
total_drop_due_to_issues = get_issues(stages, rows,
|
||||
first_stage=1,
|
||||
last_stage=len(filter_d.events),
|
||||
drop_only=True)
|
||||
return stages_list, total_drop_due_to_issues
|
||||
|
||||
|
||||
def get_issues_list(filter_d: schemas.CardSeriesFilterSchema, project_id, first_stage=None, last_stage=None):
|
||||
output = dict({"total_drop_due_to_issues": 0, "critical_issues_count": 0, "significant": [], "insignificant": []})
|
||||
stages = filter_d.events
|
||||
|
|
@ -0,0 +1,276 @@
|
|||
import logging
|
||||
from typing import List
|
||||
|
||||
from psycopg2.extras import RealDictRow
|
||||
|
||||
import schemas
|
||||
from chalicelib.utils import ch_client
|
||||
from chalicelib.utils import exp_ch_helper
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils import sql_helper as sh
|
||||
from chalicelib.core import events
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_simple_funnel(filter_d: schemas.CardSeriesFilterSchema, project: schemas.ProjectContext,
|
||||
metric_format: schemas.MetricExtendedFormatType) -> List[RealDictRow]:
|
||||
stages: List[schemas.SessionSearchEventSchema2] = filter_d.events
|
||||
filters: List[schemas.SessionSearchFilterSchema] = filter_d.filters
|
||||
platform = project.platform
|
||||
constraints = ["e.project_id = %(project_id)s",
|
||||
"e.created_at >= toDateTime(%(startTimestamp)s/1000)",
|
||||
"e.created_at <= toDateTime(%(endTimestamp)s/1000)",
|
||||
"e.`$event_name` IN %(eventTypes)s"]
|
||||
|
||||
full_args = {"project_id": project.project_id, "startTimestamp": filter_d.startTimestamp,
|
||||
"endTimestamp": filter_d.endTimestamp}
|
||||
|
||||
MAIN_EVENTS_TABLE = exp_ch_helper.get_main_events_table(timestamp=filter_d.startTimestamp,
|
||||
platform=platform)
|
||||
MAIN_SESSIONS_TABLE = exp_ch_helper.get_main_sessions_table(filter_d.startTimestamp)
|
||||
|
||||
full_args["MAIN_EVENTS_TABLE"] = MAIN_EVENTS_TABLE
|
||||
full_args["MAIN_SESSIONS_TABLE"] = MAIN_SESSIONS_TABLE
|
||||
|
||||
n_stages_query = []
|
||||
n_stages_query_not = []
|
||||
event_types = []
|
||||
values = {}
|
||||
has_filters = False
|
||||
if len(filters) > 0:
|
||||
meta_keys = None
|
||||
for i, f in enumerate(filters):
|
||||
if len(f.value) == 0:
|
||||
continue
|
||||
|
||||
has_filters = True
|
||||
f.value = helper.values_for_operator(value=f.value, op=f.operator)
|
||||
|
||||
op = sh.get_sql_operator(f.operator)
|
||||
|
||||
filter_type = f.type
|
||||
f_k = f"f_value{i}"
|
||||
values = {**values,
|
||||
**sh.multi_values(f.value, value_key=f_k)}
|
||||
is_not = False
|
||||
if sh.is_negation_operator(f.operator):
|
||||
is_not = True
|
||||
|
||||
if filter_type == schemas.FilterType.USER_BROWSER:
|
||||
constraints.append(
|
||||
sh.multi_conditions(f's.user_browser {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
|
||||
|
||||
elif filter_type in [schemas.FilterType.USER_OS, schemas.FilterType.USER_OS_MOBILE]:
|
||||
constraints.append(
|
||||
sh.multi_conditions(f's.user_os {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
|
||||
|
||||
elif filter_type in [schemas.FilterType.USER_DEVICE, schemas.FilterType.USER_DEVICE_MOBILE]:
|
||||
constraints.append(
|
||||
sh.multi_conditions(f's.user_device {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
|
||||
|
||||
elif filter_type in [schemas.FilterType.USER_COUNTRY, schemas.FilterType.USER_COUNTRY_MOBILE]:
|
||||
constraints.append(
|
||||
sh.multi_conditions(f's.user_country {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
|
||||
elif filter_type == schemas.FilterType.DURATION:
|
||||
if len(f.value) > 0 and f.value[0] is not None:
|
||||
constraints.append(f's.duration >= %(minDuration)s')
|
||||
values["minDuration"] = f.value[0]
|
||||
if len(f["value"]) > 1 and f.value[1] is not None and int(f.value[1]) > 0:
|
||||
constraints.append('s.duration <= %(maxDuration)s')
|
||||
values["maxDuration"] = f.value[1]
|
||||
elif filter_type == schemas.FilterType.REFERRER:
|
||||
constraints.append(
|
||||
sh.multi_conditions(f"s.base_referrer {op} %({f_k})s", f.value, is_not=is_not, value_key=f_k))
|
||||
elif filter_type == events.EventType.METADATA.ui_type:
|
||||
if meta_keys is None:
|
||||
meta_keys = metadata.get(project_id=project.project_id)
|
||||
meta_keys = {m["key"]: m["index"] for m in meta_keys}
|
||||
if f.source in meta_keys.keys():
|
||||
constraints.append(
|
||||
sh.multi_conditions(
|
||||
f's.{metadata.index_to_colname(meta_keys[f.source])} {op} %({f_k})s', f.value,
|
||||
is_not=is_not, value_key=f_k))
|
||||
elif filter_type in [schemas.FilterType.USER_ID, schemas.FilterType.USER_ID_MOBILE]:
|
||||
constraints.append(
|
||||
sh.multi_conditions(f's.user_id {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
|
||||
elif filter_type in [schemas.FilterType.USER_ANONYMOUS_ID,
|
||||
schemas.FilterType.USER_ANONYMOUS_ID_MOBILE]:
|
||||
constraints.append(
|
||||
sh.multi_conditions(f's.user_anonymous_id {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
|
||||
elif filter_type in [schemas.FilterType.REV_ID, schemas.FilterType.REV_ID_MOBILE]:
|
||||
constraints.append(
|
||||
sh.multi_conditions(f's.rev_id {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
|
||||
|
||||
i = -1
|
||||
for s in stages:
|
||||
|
||||
if s.operator is None:
|
||||
s.operator = schemas.SearchEventOperator.IS
|
||||
|
||||
if not isinstance(s.value, list):
|
||||
s.value = [s.value]
|
||||
is_any = sh.isAny_opreator(s.operator)
|
||||
if not is_any and isinstance(s.value, list) and len(s.value) == 0:
|
||||
continue
|
||||
i += 1
|
||||
|
||||
op = sh.get_sql_operator(s.operator)
|
||||
is_not = False
|
||||
if sh.is_negation_operator(s.operator):
|
||||
is_not = True
|
||||
op = sh.reverse_sql_operator(op)
|
||||
|
||||
specific_condition = None
|
||||
e_k = f"e_value{i}"
|
||||
event_type = s.type
|
||||
next_event_type = exp_ch_helper.get_event_type(event_type, platform=platform)
|
||||
if event_type == events.EventType.CLICK.ui_type:
|
||||
if platform == "web":
|
||||
next_col_name = events.EventType.CLICK.column
|
||||
if not is_any:
|
||||
if schemas.ClickEventExtraOperator.has_value(s.operator):
|
||||
specific_condition = sh.multi_conditions(f"selector {op} %({e_k})s", s.value, value_key=e_k)
|
||||
else:
|
||||
next_col_name = events.EventType.CLICK_MOBILE.column
|
||||
elif event_type == events.EventType.INPUT.ui_type:
|
||||
next_col_name = events.EventType.INPUT.column
|
||||
elif event_type == events.EventType.LOCATION.ui_type:
|
||||
next_col_name = 'url_path'
|
||||
elif event_type == events.EventType.CUSTOM.ui_type:
|
||||
next_col_name = events.EventType.CUSTOM.column
|
||||
# IOS --------------
|
||||
elif event_type == events.EventType.CLICK_MOBILE.ui_type:
|
||||
next_col_name = events.EventType.CLICK_MOBILE.column
|
||||
elif event_type == events.EventType.INPUT_MOBILE.ui_type:
|
||||
next_col_name = events.EventType.INPUT_MOBILE.column
|
||||
elif event_type == events.EventType.VIEW_MOBILE.ui_type:
|
||||
next_col_name = events.EventType.VIEW_MOBILE.column
|
||||
elif event_type == events.EventType.CUSTOM_MOBILE.ui_type:
|
||||
next_col_name = events.EventType.CUSTOM_MOBILE.column
|
||||
else:
|
||||
logger.warning(f"=================UNDEFINED:{event_type}")
|
||||
continue
|
||||
|
||||
values = {**values, **sh.multi_values(helper.values_for_operator(value=s.value, op=s.operator), value_key=e_k)}
|
||||
|
||||
if next_event_type not in event_types:
|
||||
event_types.append(next_event_type)
|
||||
full_args[f"event_type_{i}"] = next_event_type
|
||||
n_stages_query.append(f"`$event_name`=%(event_type_{i})s")
|
||||
if is_not:
|
||||
n_stages_query_not.append(n_stages_query[-1] + " AND " +
|
||||
(sh.multi_conditions(
|
||||
f"JSON_VALUE(CAST(`$properties` AS String), '$.{next_col_name}') {op} %({e_k})s",
|
||||
s.value,
|
||||
is_not=is_not,
|
||||
value_key=e_k
|
||||
) if not specific_condition else specific_condition))
|
||||
elif not is_any:
|
||||
n_stages_query[-1] += " AND " + (
|
||||
sh.multi_conditions(
|
||||
f"JSON_VALUE(CAST(`$properties` AS String), '$.{next_col_name}') {op} %({e_k})s",
|
||||
s.value,
|
||||
is_not=is_not,
|
||||
value_key=e_k
|
||||
) if not specific_condition else specific_condition)
|
||||
|
||||
full_args = {"eventTypes": event_types, **full_args, **values}
|
||||
n_stages = len(n_stages_query)
|
||||
if n_stages == 0:
|
||||
return []
|
||||
|
||||
extra_from = ""
|
||||
if has_filters or metric_format == schemas.MetricExtendedFormatType.USER_COUNT:
|
||||
extra_from = f"INNER JOIN {MAIN_SESSIONS_TABLE} AS s ON (e.session_id=s.session_id)"
|
||||
constraints += ["s.project_id = %(project_id)s",
|
||||
"s.datetime >= toDateTime(%(startTimestamp)s/1000)",
|
||||
"s.datetime <= toDateTime(%(endTimestamp)s/1000)"]
|
||||
|
||||
if metric_format == schemas.MetricExtendedFormatType.SESSION_COUNT:
|
||||
group_by = 'e.session_id'
|
||||
else:
|
||||
constraints.append("isNotNull(s.user_id)")
|
||||
group_by = 's.user_id'
|
||||
|
||||
if len(n_stages_query_not) > 0:
|
||||
value_conditions_not_base = ["project_id = %(project_id)s",
|
||||
"created_at >= toDateTime(%(startTimestamp)s/1000)",
|
||||
"created_at <= toDateTime(%(endTimestamp)s/1000)"]
|
||||
_value_conditions_not = []
|
||||
value_conditions_not = []
|
||||
for c in n_stages_query_not:
|
||||
_p = c % full_args
|
||||
if _p not in _value_conditions_not:
|
||||
_value_conditions_not.append(_p)
|
||||
value_conditions_not.append(c)
|
||||
|
||||
extra_from += f"""LEFT ANTI JOIN (SELECT DISTINCT sub.session_id
|
||||
FROM {MAIN_EVENTS_TABLE} AS sub
|
||||
WHERE {' AND '.join(value_conditions_not_base)}
|
||||
AND ({' OR '.join([c for c in value_conditions_not])})
|
||||
) AS sub ON(e.session_id=sub.session_id)"""
|
||||
del _value_conditions_not
|
||||
del value_conditions_not
|
||||
del value_conditions_not_base
|
||||
|
||||
sequences = []
|
||||
projections = []
|
||||
for i, s in enumerate(n_stages_query):
|
||||
projections.append(f"coalesce(SUM(T{i + 1}),0) AS stage{i + 1}")
|
||||
if i == 0:
|
||||
sequences.append(f"anyIf(1,{s}) AS T1")
|
||||
else:
|
||||
pattern = ""
|
||||
conditions = []
|
||||
j = 0
|
||||
while j <= i:
|
||||
pattern += f"(?{j + 1})"
|
||||
conditions.append(n_stages_query[j])
|
||||
j += 1
|
||||
sequences.append(f"sequenceMatch('{pattern}')(toDateTime(e.created_at), {','.join(conditions)}) AS T{i + 1}")
|
||||
|
||||
n_stages_query = f"""
|
||||
SELECT {",".join(projections)}
|
||||
FROM (SELECT {",".join(sequences)}
|
||||
FROM {MAIN_EVENTS_TABLE} AS e {extra_from}
|
||||
WHERE {" AND ".join(constraints)}
|
||||
GROUP BY {group_by}) AS raw;"""
|
||||
|
||||
with ch_client.ClickHouseClient() as cur:
|
||||
query = cur.format(query=n_stages_query, parameters=full_args)
|
||||
logger.debug("---------------------------------------------------")
|
||||
logger.debug(query)
|
||||
logger.debug("---------------------------------------------------")
|
||||
try:
|
||||
row = cur.execute(query=query)
|
||||
except Exception as err:
|
||||
logger.warning("--------- SIMPLE FUNNEL SEARCH QUERY EXCEPTION CH-----------")
|
||||
logger.warning(query)
|
||||
logger.warning("--------- PAYLOAD -----------")
|
||||
logger.warning(filter_d.model_dump_json())
|
||||
logger.warning("--------------------")
|
||||
raise err
|
||||
|
||||
stages_list = []
|
||||
row = row[0]
|
||||
for i, stage in enumerate(stages):
|
||||
count = row[f"stage{i + 1}"]
|
||||
drop = None
|
||||
if i != 0:
|
||||
base_count = row[f"stage{i}"]
|
||||
if base_count == 0:
|
||||
drop = 0
|
||||
elif base_count > 0:
|
||||
drop = int(100 * (base_count - count) / base_count)
|
||||
|
||||
stages_list.append(
|
||||
{"value": stage.value,
|
||||
"type": stage.type,
|
||||
"operator": stage.operator,
|
||||
"dropPct": drop,
|
||||
"count": count
|
||||
}
|
||||
)
|
||||
|
||||
return stages_list
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue