Compare commits
794 commits
v1.21.0-ba
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
90510aa33b | ||
|
|
96a70f5d41 | ||
|
|
d4a13edcf0 | ||
|
|
51fad91a22 | ||
|
|
36abcda1e1 | ||
|
|
dd5f464f73 | ||
|
|
f9ada41272 | ||
|
|
9e24a3583e | ||
|
|
0a3129d3cd | ||
|
|
99d61db9d9 | ||
|
|
133958622e | ||
|
|
fb021f606f | ||
|
|
a2905fa8ed | ||
|
|
beec2283fd | ||
|
|
6c8b55019e | ||
|
|
e3e3e11227 | ||
|
|
c6f7de04cc | ||
|
|
2921c17cbf | ||
|
|
7eb3f5c4c8 | ||
|
|
5a9a8e588a | ||
|
|
4b14258266 | ||
|
|
744d2d4311 | ||
|
|
64242a5dc0 | ||
|
|
cae3002697 | ||
|
|
3d3c62196b | ||
|
|
e810958a5d | ||
|
|
39fa9787d1 | ||
|
|
c9c1ad4dde | ||
|
|
d9868928be | ||
|
|
a460d8c9a2 | ||
|
|
930417aab4 | ||
|
|
07bc184f4d | ||
|
|
71b7cca569 | ||
|
|
355d27eaa0 | ||
|
|
66b485cccf | ||
|
|
de33a42151 | ||
|
|
f12bdebf82 | ||
|
|
bbfa20c693 | ||
|
|
f264ba043d | ||
|
|
a05dce8125 | ||
|
|
3a1635d81f | ||
|
|
ccb332c636 | ||
|
|
80ffa15959 | ||
|
|
b2e961d621 | ||
|
|
b4d0598f23 | ||
|
|
e77f083f10 | ||
|
|
58da1d3f64 | ||
|
|
447fc26a2a | ||
|
|
9bdf6e4f92 | ||
|
|
01f403e12d | ||
|
|
39eb943b86 | ||
|
|
366b0d38b0 | ||
|
|
f4d5b3c06e | ||
|
|
93ae18133e | ||
|
|
fbe5d78270 | ||
|
|
b803eed1d4 | ||
|
|
9ed3cb1b7e | ||
|
|
5e0e5730ba | ||
|
|
d78b33dcd2 | ||
|
|
4b1ca200b4 | ||
|
|
08d930f9ff | ||
|
|
da37809bc8 | ||
|
|
d922fc7ad5 | ||
|
|
796360fdd2 | ||
|
|
13dbb60d8b | ||
|
|
9e20a49128 | ||
|
|
91f8cc1399 | ||
|
|
f8ba3f6d89 | ||
|
|
85e30b3692 | ||
|
|
0360e3726e | ||
|
|
77bbb5af36 | ||
|
|
ab0d4cfb62 | ||
|
|
3fd506a812 | ||
|
|
e8432e2dec | ||
|
|
5c76a8524c | ||
|
|
3ba40a4811 | ||
|
|
f9a3f24590 | ||
|
|
85d6d0abac | ||
|
|
b3594136ce | ||
|
|
8f67edde8d | ||
|
|
74ed29915b | ||
|
|
3ca71ec211 | ||
|
|
0e469fd056 | ||
|
|
a8cb0e1643 | ||
|
|
e171f0d8d5 | ||
|
|
68ea291444 | ||
|
|
05cbb831c7 | ||
|
|
5070ded1f4 | ||
|
|
77610a4924 | ||
|
|
7c34e4a0f6 | ||
|
|
330e21183f | ||
|
|
30ce37896c | ||
|
|
80a7817e7d | ||
|
|
1b9c568cb1 | ||
|
|
3759771ae9 | ||
|
|
f6ae5aba88 | ||
|
|
5190dc512a | ||
|
|
3fcccb51e8 | ||
|
|
26077d5689 | ||
|
|
00c57348fd | ||
|
|
1f9bc5520a | ||
|
|
aef94618f6 | ||
|
|
2a330318c7 | ||
|
|
6777d5ce2a | ||
|
|
8a6f8fe91f | ||
|
|
7b078fed4c | ||
|
|
894d4c84b3 | ||
|
|
46390a3ba9 | ||
|
|
621667f5ce | ||
|
|
a72f476f1c | ||
|
|
623946ce4e | ||
|
|
2d099214fc | ||
|
|
b0e7054f89 | ||
|
|
a9097270af | ||
|
|
5d514ddaf2 | ||
|
|
43688bb03b | ||
|
|
e050cee7bb | ||
|
|
6b35df7125 | ||
|
|
8e099b6dc3 | ||
|
|
c0a4734054 | ||
|
|
7de1efb5fe | ||
|
|
d4ff28ddbe | ||
|
|
b2256f72d0 | ||
|
|
a63bda1c79 | ||
|
|
3a0176789e | ||
|
|
f2b7271fca | ||
|
|
d50f89662b | ||
|
|
35051d201c | ||
|
|
214be95ecc | ||
|
|
dbc142c114 | ||
|
|
443f5e8f08 | ||
|
|
9f693f220d | ||
|
|
5ab30380b0 | ||
|
|
fc86555644 | ||
|
|
2a3c611a27 | ||
|
|
1d6fb0ae9e | ||
|
|
bef91a6136 | ||
|
|
1e2bd19d32 | ||
|
|
3b58cb347e | ||
|
|
ca4590501a | ||
|
|
fd12cc7585 | ||
|
|
6abded53e0 | ||
|
|
82c5e5e59d | ||
|
|
c77b0cc4de | ||
|
|
de344e62ef | ||
|
|
deb78a62c0 | ||
|
|
0724cf05f0 | ||
|
|
cc704f1bc3 | ||
|
|
4c159b2d26 | ||
|
|
42df33bc01 | ||
|
|
ae95b48760 | ||
|
|
4be3050e61 | ||
|
|
8eec6e983b | ||
|
|
5fec615044 | ||
|
|
f77568a01c | ||
|
|
618e4dc59f | ||
|
|
b94fcb11e5 | ||
|
|
f93ee6fb8f | ||
|
|
23820b7ea5 | ||
|
|
e92bfe3cfe | ||
|
|
102f0c7b06 | ||
|
|
8d57cc55a5 | ||
|
|
24b36efc9d | ||
|
|
fe91cad4af | ||
|
|
033ffcb7b9 | ||
|
|
499048e46c | ||
|
|
5b6c653862 | ||
|
|
4169ab87c6 | ||
|
|
80229a0214 | ||
|
|
fb48ba8300 | ||
|
|
b0f3c50c0f | ||
|
|
5806362ce0 | ||
|
|
2458af460b | ||
|
|
6c891cb131 | ||
|
|
8e41c3ce91 | ||
|
|
14d0a77a73 | ||
|
|
0333c56d52 | ||
|
|
52d4abb61c | ||
|
|
b0e7d3aa79 | ||
|
|
e9eea78283 | ||
|
|
0f4c509582 | ||
|
|
820bca6308 | ||
|
|
51e71a4d52 | ||
|
|
2c9e9576c5 | ||
|
|
9e7f751df6 | ||
|
|
b6d0e71544 | ||
|
|
93a9e03026 | ||
|
|
a62f6f6bb0 | ||
|
|
cd80aa85ea | ||
|
|
961c685310 | ||
|
|
160b5ac2c8 | ||
|
|
1cca40d4c5 | ||
|
|
bd2a59266d | ||
|
|
8acee7d357 | ||
|
|
fb49c715cb | ||
|
|
221bee70f5 | ||
|
|
8eb431f70c | ||
|
|
820b0954e7 | ||
|
|
19b350761c | ||
|
|
3b3e95a413 | ||
|
|
fe1130397c | ||
|
|
fd4b71d854 | ||
|
|
404ffd5b2d | ||
|
|
5af63eb9f1 | ||
|
|
038bfee383 | ||
|
|
bd09160a4a | ||
|
|
136a5b2bfb | ||
|
|
33deaef0ce | ||
|
|
3f541e5d59 | ||
|
|
ae463db150 | ||
|
|
9eb19fedf1 | ||
|
|
5df934c9ce | ||
|
|
e027a2d016 | ||
|
|
c7f3c78740 | ||
|
|
3245579b7c | ||
|
|
0107c9c523 | ||
|
|
05f4054b31 | ||
|
|
ce844296ed | ||
|
|
0a5856afe1 | ||
|
|
45b8bdef8a | ||
|
|
264f28ed39 | ||
|
|
59d3253737 | ||
|
|
1c8c231d13 | ||
|
|
77208b95e8 | ||
|
|
cdbbb482ce | ||
|
|
ccd8d76e98 | ||
|
|
17a5089c24 | ||
|
|
384866621c | ||
|
|
743625f66b | ||
|
|
ffd134c204 | ||
|
|
8da099ba98 | ||
|
|
75ca0267ae | ||
|
|
6ab3c80985 | ||
|
|
eab2d3a2cf | ||
|
|
c6cbc4eba8 | ||
|
|
fdd26c567c | ||
|
|
4b9be69719 | ||
|
|
b8511b6be1 | ||
|
|
5cc9945f16 | ||
|
|
cef251db6a | ||
|
|
687ab05f22 | ||
|
|
4b09213448 | ||
|
|
af4a344c85 | ||
|
|
c40e32d624 | ||
|
|
afbf5fee7a | ||
|
|
28b580499f | ||
|
|
9d7c54554e | ||
|
|
adf302bc34 | ||
|
|
6852d63cdb | ||
|
|
41178ba841 | ||
|
|
90bc6bc83e | ||
|
|
b8d365de3d | ||
|
|
87e7acecde | ||
|
|
e53301d18e | ||
|
|
ff04276623 | ||
|
|
b0e0321224 | ||
|
|
e95417c1ed | ||
|
|
5f3b3bb2ef | ||
|
|
06937b305a | ||
|
|
a693a36a6c | ||
|
|
c8ff481725 | ||
|
|
ef897538d1 | ||
|
|
07ffb06db1 | ||
|
|
ad9883ceb2 | ||
|
|
5c9a29570c | ||
|
|
9f9990d737 | ||
|
|
fd5c0c9747 | ||
|
|
b8091b69c2 | ||
|
|
502303aee7 | ||
|
|
632bc1cbb9 | ||
|
|
bcc7d35b7f | ||
|
|
45656ec6d7 | ||
|
|
15829d865e | ||
|
|
029376c3e4 | ||
|
|
3ca6f78bed | ||
|
|
12a729fafe | ||
|
|
0a17460c5a | ||
|
|
faadfa497f | ||
|
|
bbeb508738 | ||
|
|
333fd642be | ||
|
|
5e93178876 | ||
|
|
da433e1666 | ||
|
|
a87f6c658c | ||
|
|
4ebbfd3501 | ||
|
|
6dc3dcfd4e | ||
|
|
74146eecf1 | ||
|
|
2e69a6e4df | ||
|
|
afacbc1460 | ||
|
|
6e1316c05f | ||
|
|
d3851cedec | ||
|
|
a1989eb574 | ||
|
|
95455f761b | ||
|
|
69d1d88600 | ||
|
|
ceb40992cc | ||
|
|
1ab7d0ad7f | ||
|
|
2ee535f213 | ||
|
|
0ba1382c16 | ||
|
|
c025b2f1a5 | ||
|
|
918d9de4c9 | ||
|
|
047a5f52e7 | ||
|
|
7a88acfa9f | ||
|
|
366d2e1017 | ||
|
|
46e6f1a503 | ||
|
|
ce2a65f276 | ||
|
|
f168f90f10 | ||
|
|
b6cca71053 | ||
|
|
2841740afb | ||
|
|
927f96cb79 | ||
|
|
e174a11466 | ||
|
|
ee4c5cf45d | ||
|
|
78ddbb9233 | ||
|
|
66edf44f8b | ||
|
|
0af941e543 | ||
|
|
fd64d721c6 | ||
|
|
f965c69a26 | ||
|
|
9bb93d5daa | ||
|
|
4d19586eb9 | ||
|
|
5e10e168c6 | ||
|
|
aa2c14b7c1 | ||
|
|
4ef61f6fb5 | ||
|
|
95a5037abf | ||
|
|
23514d4b3f | ||
|
|
ee46413b13 | ||
|
|
9f57271af2 | ||
|
|
84771542a6 | ||
|
|
83f8b67f74 | ||
|
|
6af9f719c8 | ||
|
|
789427dd57 | ||
|
|
59bbc6a903 | ||
|
|
0529ee3afd | ||
|
|
307b0c1cd8 | ||
|
|
11a2ea48bc | ||
|
|
1146900dc0 | ||
|
|
0a999247e4 | ||
|
|
f13ad8a882 | ||
|
|
0d12fdddc9 | ||
|
|
c0a5415eb9 | ||
|
|
b8a70367ed | ||
|
|
1efe5c87e8 | ||
|
|
2dcbfe2ef9 | ||
|
|
fedc48bd0e | ||
|
|
de72e79fc6 | ||
|
|
d43bc3a2e9 | ||
|
|
8ba6a17055 | ||
|
|
e5809a5eff | ||
|
|
171fd5aa59 | ||
|
|
533fb71cb7 | ||
|
|
90964e8f50 | ||
|
|
7d5ac6a8c9 | ||
|
|
32b281f689 | ||
|
|
b175c836a3 | ||
|
|
4e54bced9c | ||
|
|
e2fa3c91e2 | ||
|
|
19c8fba445 | ||
|
|
94e8e0319d | ||
|
|
ec8f9a349d | ||
|
|
992cb2feca | ||
|
|
844f79a989 | ||
|
|
1ec06d360e | ||
|
|
fd76f7c302 | ||
|
|
c793d9d177 | ||
|
|
1c1a41bb55 | ||
|
|
6873f1c56b | ||
|
|
d79665cbea | ||
|
|
256c065153 | ||
|
|
114bd4080b | ||
|
|
d4965f2137 | ||
|
|
8ed97b353b | ||
|
|
ac232ef599 | ||
|
|
264f35cc9e | ||
|
|
d85f63c72e | ||
|
|
4b16e50e5f | ||
|
|
735b86d778 | ||
|
|
78bb1c3c6b | ||
|
|
968a3eefde | ||
|
|
1122ced4c3 | ||
|
|
b406893d00 | ||
|
|
8b2cf031ca | ||
|
|
fe06f43dd5 | ||
|
|
64e08916f9 | ||
|
|
99d6545720 | ||
|
|
7e4782ae71 | ||
|
|
ed3020dc7e | ||
|
|
74f6c2cd66 | ||
|
|
0533624c25 | ||
|
|
c271e01dfc | ||
|
|
c07ad14ffc | ||
|
|
b91d979c98 | ||
|
|
d63877de1c | ||
|
|
3a331d266c | ||
|
|
e8835d3058 | ||
|
|
3c32e8eec1 | ||
|
|
b1d51c19ea | ||
|
|
f6015f31f5 | ||
|
|
06113f7534 | ||
|
|
8500c1c11e | ||
|
|
fc542cd7d2 | ||
|
|
44a1d96d2d | ||
|
|
bb8e097759 | ||
|
|
7e7387001f | ||
|
|
5dd1256cd3 | ||
|
|
bf56cc53a7 | ||
|
|
da9b926b25 | ||
|
|
ef3ed8b690 | ||
|
|
b86e6fdadc | ||
|
|
1293cbde7d | ||
|
|
3e1f073e07 | ||
|
|
7cfe29adf3 | ||
|
|
aa07d41bb5 | ||
|
|
305c7ae064 | ||
|
|
724d5a2897 | ||
|
|
f8a40fd875 | ||
|
|
6c7880efbc | ||
|
|
2465029a6c | ||
|
|
11824d2993 | ||
|
|
0a4379be6b | ||
|
|
659aa7495f | ||
|
|
af5d730028 | ||
|
|
346fd76ea8 | ||
|
|
963c8354c6 | ||
|
|
4970bc365b | ||
|
|
def33daa6c | ||
|
|
f752876675 | ||
|
|
e046bcbe0a | ||
|
|
3ed8b3c27d | ||
|
|
e996600dc8 | ||
|
|
00a834b143 | ||
|
|
b4497edb05 | ||
|
|
231a3ac330 | ||
|
|
b70effa904 | ||
|
|
63b6b39d75 | ||
|
|
cd5d6e861d | ||
|
|
2144a90ea7 | ||
|
|
ecdb98b057 | ||
|
|
8d0c9d5a1f | ||
|
|
f8a1c9447b | ||
|
|
f1614b6626 | ||
|
|
adb359b3bf | ||
|
|
9949928335 | ||
|
|
6360b9a580 | ||
|
|
132de0af0d | ||
|
|
b70a641af5 | ||
|
|
ea142b9596 | ||
|
|
08340eb0f4 | ||
|
|
7b61d06454 | ||
|
|
d031210365 | ||
|
|
4b21194ec5 | ||
|
|
6bd5b60b1e | ||
|
|
c55b1971c4 | ||
|
|
e34e4fad6c | ||
|
|
57041140cb | ||
|
|
d70ecab1d9 | ||
|
|
c4c5fcc2b2 | ||
|
|
118412d4ab | ||
|
|
38653d200f | ||
|
|
3be8e8092d | ||
|
|
2654273f97 | ||
|
|
7dc70c0ce5 | ||
|
|
7d31197c78 | ||
|
|
f4b659e508 | ||
|
|
31290d7a89 | ||
|
|
198c5e3a92 | ||
|
|
7da11341cf | ||
|
|
9492234ccc | ||
|
|
ed528e7b5e | ||
|
|
d7a85d0920 | ||
|
|
f7339c8954 | ||
|
|
febe784322 | ||
|
|
f6cd20712d | ||
|
|
c2b84d18b5 | ||
|
|
6579b6842b | ||
|
|
ba55b359fb | ||
|
|
0d9c265452 | ||
|
|
b09becdcb7 | ||
|
|
4819907635 | ||
|
|
6e7ced6959 | ||
|
|
3a2e822bea | ||
|
|
4245dd49e8 | ||
|
|
b93e953fd9 | ||
|
|
a67ca7b870 | ||
|
|
d457332461 | ||
|
|
36a0fad5b4 | ||
|
|
e6c7c43246 | ||
|
|
b04bcb935e | ||
|
|
9e17673a4a | ||
|
|
1799f9d4a2 | ||
|
|
eed357a79b | ||
|
|
ff061567d8 | ||
|
|
ebee06b37a | ||
|
|
442611cb26 | ||
|
|
ad022f9cea | ||
|
|
b9ac2d2238 | ||
|
|
3191843829 | ||
|
|
5267a1c830 | ||
|
|
0b7b857d65 | ||
|
|
4ba16bada1 | ||
|
|
c7523a1526 | ||
|
|
3e722ea5ba | ||
|
|
06bad31a7d | ||
|
|
4f2b8d43b7 | ||
|
|
51ba151794 | ||
|
|
fda53bc4ad | ||
|
|
d45347da2b | ||
|
|
9c1be9b22a | ||
|
|
dd549b4c1f | ||
|
|
5dc5f085b9 | ||
|
|
e325eee47e | ||
|
|
0d68fcc428 | ||
|
|
e73d633518 | ||
|
|
22b95f308c | ||
|
|
f87c3e7a5e | ||
|
|
603df2d559 | ||
|
|
98405db9ff | ||
|
|
d4092ebc69 | ||
|
|
5d49a91dde | ||
|
|
8162236139 | ||
|
|
3dc933daf3 | ||
|
|
afb08cfe6d | ||
|
|
500d70aa67 | ||
|
|
c697c99fec | ||
|
|
600eba27a1 | ||
|
|
c50515e799 | ||
|
|
d29c7f20a4 | ||
|
|
a4b65c618f | ||
|
|
7a8be69c85 | ||
|
|
92c142ec33 | ||
|
|
8d878a3445 | ||
|
|
e1b05dbd33 | ||
|
|
30dd123f29 | ||
|
|
f88ff53e15 | ||
|
|
cb8d87e367 | ||
|
|
0e5fe14dc2 | ||
|
|
1feb4bdc64 | ||
|
|
de0c10de56 | ||
|
|
c59dbbc79d | ||
|
|
49c408f44e | ||
|
|
d374137e42 | ||
|
|
7caa386d2d | ||
|
|
82e170ff1c | ||
|
|
047a4d0108 | ||
|
|
7485016f92 | ||
|
|
ff6342298e | ||
|
|
8d8e6176be | ||
|
|
82621012de | ||
|
|
1b3a3dfc21 | ||
|
|
da923f13b9 | ||
|
|
2a52de073d | ||
|
|
ea8729dd93 | ||
|
|
84f9c02802 | ||
|
|
6ec7fe64a7 | ||
|
|
68c5d986fe | ||
|
|
cb977d54e1 | ||
|
|
392088be22 | ||
|
|
88c1f18c48 | ||
|
|
8597f9ef84 | ||
|
|
12f4d9a10c | ||
|
|
ab7e9e505d | ||
|
|
0484c0ccdd | ||
|
|
e72d492e66 | ||
|
|
12472cf84c | ||
|
|
2fc4f552d5 | ||
|
|
dbcb651f40 | ||
|
|
cd868f736b | ||
|
|
d4b3791b19 | ||
|
|
a8f167b5af | ||
|
|
5c1e5078b5 | ||
|
|
c2ce9b8466 | ||
|
|
3da965959b | ||
|
|
44108bd57e | ||
|
|
30c0e5abe9 | ||
|
|
3dd56cbf13 | ||
|
|
084749b6f9 | ||
|
|
693634fb14 | ||
|
|
d50ad9e579 | ||
|
|
c83dec7774 | ||
|
|
2b05bb59af | ||
|
|
4c6f23e31f | ||
|
|
312db29d23 | ||
|
|
3038fe58d0 | ||
|
|
defcc65848 | ||
|
|
14d64256a9 | ||
|
|
bced0611ea | ||
|
|
1dbf29a595 | ||
|
|
93db47901d | ||
|
|
260ed8ac19 | ||
|
|
2585107bd7 | ||
|
|
bd80b7fccd | ||
|
|
ab84a872db | ||
|
|
b4d2e685de | ||
|
|
16182031e1 | ||
|
|
6cbe17c8e6 | ||
|
|
fbfd0a9854 | ||
|
|
778112c751 | ||
|
|
0f744ec1a0 | ||
|
|
ab454894f8 | ||
|
|
6882c62a32 | ||
|
|
c2878bacd4 | ||
|
|
1a70e61de8 | ||
|
|
f4c94aa2d1 | ||
|
|
6ccf2e2887 | ||
|
|
2cd96b0df0 | ||
|
|
622d0a7dfa | ||
|
|
954e811be0 | ||
|
|
f535870811 | ||
|
|
6559fe27ee | ||
|
|
c26c235f2f | ||
|
|
57e604794c | ||
|
|
eb11f4bf58 | ||
|
|
12a9448a8d | ||
|
|
9370a7a50e | ||
|
|
3f13eeef75 | ||
|
|
e8169fdf2a | ||
|
|
325937dc4e | ||
|
|
74637f3042 | ||
|
|
eb0fd35688 | ||
|
|
c692ff26b5 | ||
|
|
48bf849f4b | ||
|
|
e71036b2f8 | ||
|
|
0911c6528c | ||
|
|
d42905d394 | ||
|
|
016011dd23 | ||
|
|
9ee853365c | ||
|
|
a58bff9d11 | ||
|
|
830fb70ee0 | ||
|
|
637a265c24 | ||
|
|
c28f677d10 | ||
|
|
7c5af16493 | ||
|
|
b5bf70a8e0 | ||
|
|
a9bbf31f73 | ||
|
|
c74d1671a5 | ||
|
|
4c81b195a1 | ||
|
|
8c66fd412d | ||
|
|
bd9f95851c | ||
|
|
2291980a89 | ||
|
|
80462e4534 | ||
|
|
adf27d4cb7 | ||
|
|
22d04436c0 | ||
|
|
e7e821daee | ||
|
|
700870f957 | ||
|
|
0dfe1da0af | ||
|
|
dcad0798e8 | ||
|
|
776069fca1 | ||
|
|
463ffc8cae | ||
|
|
3003934374 | ||
|
|
69e1e60c70 | ||
|
|
e8dbc40a5c | ||
|
|
297f633906 | ||
|
|
70bae502d3 | ||
|
|
c9d63d912f | ||
|
|
b92d5c8706 | ||
|
|
b18871c632 | ||
|
|
aad8542d97 | ||
|
|
fe7e200dba | ||
|
|
471558fec5 | ||
|
|
7b7856184e | ||
|
|
82ab91bc25 | ||
|
|
4c4e1b6580 | ||
|
|
763aed14a1 | ||
|
|
230924c4b8 | ||
|
|
880f4f1a94 | ||
|
|
f05e84777b | ||
|
|
5fe204020f | ||
|
|
4395c9ee46 | ||
|
|
0658e3b3d9 | ||
|
|
31ba4176aa | ||
|
|
2a75785181 | ||
|
|
ddfaaeb6c5 | ||
|
|
7c4ff2ed3f | ||
|
|
21992ceadb | ||
|
|
d35d201a10 | ||
|
|
9d4120e7d6 | ||
|
|
93d51acfc4 | ||
|
|
fdae00c602 | ||
|
|
9d82c2935a | ||
|
|
99ddcd9708 | ||
|
|
f7ddf82591 | ||
|
|
c004bc8932 | ||
|
|
694de75052 | ||
|
|
3b68bebf40 | ||
|
|
b6080b2492 | ||
|
|
f791d06ecd | ||
|
|
d7f810809e | ||
|
|
21895677c3 | ||
|
|
129ab734f3 | ||
|
|
8882a18c0d | ||
|
|
4f8dd444ff | ||
|
|
c42391c3da | ||
|
|
e27d2394d1 | ||
|
|
77981feb2b | ||
|
|
e38b729edd | ||
|
|
8ca332e2f0 | ||
|
|
af761693aa | ||
|
|
64d9029554 | ||
|
|
0e00ca19ad | ||
|
|
a88002852d | ||
|
|
77673b15f8 | ||
|
|
efa0a2878b | ||
|
|
bc2259aef3 | ||
|
|
92a6379e2c | ||
|
|
0a49df3996 | ||
|
|
69ef083abe | ||
|
|
00b7f65e31 | ||
|
|
0c0cac8fbe | ||
|
|
48483be8f9 | ||
|
|
0eae03f29d | ||
|
|
383bbee2dc | ||
|
|
77d4c890cf | ||
|
|
a654e30df2 | ||
|
|
e03bce3ba5 | ||
|
|
92f3e8a0b5 | ||
|
|
8d2b998f9a | ||
|
|
83c979ade0 | ||
|
|
5640913e68 | ||
|
|
c93df14f93 | ||
|
|
c151d55c67 | ||
|
|
d6e0865b8a | ||
|
|
d30d1570bd | ||
|
|
8e7cfebdba | ||
|
|
ca035d699e | ||
|
|
7cb6bc7d38 | ||
|
|
566d6c2fdb | ||
|
|
f1e43b12be | ||
|
|
9a37ba0739 | ||
|
|
e74effe24d | ||
|
|
dab822e772 | ||
|
|
ec53099eb0 | ||
|
|
5cde5aefce | ||
|
|
4eea15b053 | ||
|
|
37f00f4d73 | ||
|
|
9b75e4502f | ||
|
|
122416d311 | ||
|
|
890630dfa0 | ||
|
|
42dd341e6c | ||
|
|
922ccede98 | ||
|
|
f6cf1cfb4a | ||
|
|
0ac4ed1fa2 | ||
|
|
013d866455 | ||
|
|
a010ef9d0f | ||
|
|
71b96c1728 | ||
|
|
d35837416b | ||
|
|
d0ef617e40 | ||
|
|
aa8cebca7e | ||
|
|
f360961500 | ||
|
|
962385651f | ||
|
|
ac47e339cf | ||
|
|
d99187e14a | ||
|
|
e2417ef2be | ||
|
|
2f693cd490 | ||
|
|
c13a220b52 | ||
|
|
844b9d80c3 | ||
|
|
27e94fed35 | ||
|
|
5180ad8717 | ||
|
|
8434044611 | ||
|
|
99bdb5dba7 | ||
|
|
e51455b8ea | ||
|
|
30c9f6184e | ||
|
|
ca20ae3d10 | ||
|
|
eeb1d616bc | ||
|
|
594d57905f | ||
|
|
db92ae3bf0 | ||
|
|
cadb2b456d | ||
|
|
8e8064abd5 | ||
|
|
9d740c7bb2 | ||
|
|
4ec2dbfeca | ||
|
|
f1ce859e8b | ||
|
|
eff22eb554 | ||
|
|
076e664ced | ||
|
|
f95c1c9c94 | ||
|
|
67a3494804 | ||
|
|
77273b0df9 | ||
|
|
72d4867e42 | ||
|
|
ef9a077655 | ||
|
|
5831da93e1 | ||
|
|
197d6bbc0a | ||
|
|
0b72007006 | ||
|
|
54b07c6110 | ||
|
|
99085a95a1 | ||
|
|
f5df3fb5b5 | ||
|
|
253feefe53 | ||
|
|
b84c05cbad | ||
|
|
043d6a9f53 | ||
|
|
5dbe313a68 | ||
|
|
73db2c44d0 | ||
|
|
4f269ce4a0 | ||
|
|
7c8912933f | ||
|
|
f6f2a14a18 | ||
|
|
aa213e036c | ||
|
|
ab331d57a4 | ||
|
|
423d9cb671 | ||
|
|
d409b41ddb | ||
|
|
38367777b7 | ||
|
|
6830c8879f | ||
|
|
d95738bb0d | ||
|
|
73ade8da81 |
2885 changed files with 117769 additions and 73227 deletions
2
.github/workflows/api-ee.yaml
vendored
2
.github/workflows/api-ee.yaml
vendored
|
|
@ -10,8 +10,6 @@ on:
|
||||||
branches:
|
branches:
|
||||||
- dev
|
- dev
|
||||||
- api-*
|
- api-*
|
||||||
- v1.11.0-patch
|
|
||||||
- actions_test
|
|
||||||
paths:
|
paths:
|
||||||
- "ee/api/**"
|
- "ee/api/**"
|
||||||
- "api/**"
|
- "api/**"
|
||||||
|
|
|
||||||
1
.github/workflows/api.yaml
vendored
1
.github/workflows/api.yaml
vendored
|
|
@ -10,7 +10,6 @@ on:
|
||||||
branches:
|
branches:
|
||||||
- dev
|
- dev
|
||||||
- api-*
|
- api-*
|
||||||
- v1.11.0-patch
|
|
||||||
paths:
|
paths:
|
||||||
- "api/**"
|
- "api/**"
|
||||||
- "!api/.gitignore"
|
- "!api/.gitignore"
|
||||||
|
|
|
||||||
1
.github/workflows/assist-ee.yaml
vendored
1
.github/workflows/assist-ee.yaml
vendored
|
|
@ -9,7 +9,6 @@ on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- dev
|
- dev
|
||||||
- api-*
|
|
||||||
paths:
|
paths:
|
||||||
- "ee/assist/**"
|
- "ee/assist/**"
|
||||||
- "assist/**"
|
- "assist/**"
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
# This action will push the peers changes to aws
|
# This action will push the assist changes to aws
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
|
|
@ -9,14 +9,10 @@ on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- dev
|
- dev
|
||||||
- api-*
|
|
||||||
paths:
|
paths:
|
||||||
- "ee/peers/**"
|
- "ee/assist-server/**"
|
||||||
- "peers/**"
|
|
||||||
- "!peers/.gitignore"
|
|
||||||
- "!peers/*-dev.sh"
|
|
||||||
|
|
||||||
name: Build and Deploy Peers EE
|
name: Build and Deploy Assist-Server EE
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
deploy:
|
deploy:
|
||||||
|
|
@ -57,12 +53,7 @@ jobs:
|
||||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||||
id: setcontext
|
id: setcontext
|
||||||
|
|
||||||
# Caching docker images
|
- name: Building and Pushing Assist-Server image
|
||||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
|
||||||
# Ignore the failure of a step and avoid terminating the job.
|
|
||||||
continue-on-error: true
|
|
||||||
|
|
||||||
- name: Building and Pushing peers image
|
|
||||||
id: build-image
|
id: build-image
|
||||||
env:
|
env:
|
||||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||||
|
|
@ -70,11 +61,11 @@ jobs:
|
||||||
ENVIRONMENT: staging
|
ENVIRONMENT: staging
|
||||||
run: |
|
run: |
|
||||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||||
cd peers
|
cd assist-server
|
||||||
PUSH_IMAGE=0 bash -x ./build.sh ee
|
PUSH_IMAGE=0 bash -x ./build.sh ee
|
||||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||||
images=("peers")
|
images=("assist-server")
|
||||||
for image in ${images[*]};do
|
for image in ${images[*]};do
|
||||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||||
done
|
done
|
||||||
|
|
@ -85,7 +76,7 @@ jobs:
|
||||||
} && {
|
} && {
|
||||||
echo "Skipping Security Checks"
|
echo "Skipping Security Checks"
|
||||||
}
|
}
|
||||||
images=("peers")
|
images=("assist-server")
|
||||||
for image in ${images[*]};do
|
for image in ${images[*]};do
|
||||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||||
done
|
done
|
||||||
|
|
@ -109,43 +100,23 @@ jobs:
|
||||||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||||
EOF
|
EOF
|
||||||
done
|
done
|
||||||
|
|
||||||
- name: Deploy to kubernetes
|
- name: Deploy to kubernetes
|
||||||
run: |
|
run: |
|
||||||
|
pwd
|
||||||
cd scripts/helmcharts/
|
cd scripts/helmcharts/
|
||||||
|
|
||||||
# Update changed image tag
|
# Update changed image tag
|
||||||
sed -i "/peers/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
sed -i "/assist-server/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||||
|
|
||||||
cat /tmp/image_override.yaml
|
cat /tmp/image_override.yaml
|
||||||
# Deploy command
|
# Deploy command
|
||||||
mkdir -p /tmp/charts
|
mkdir -p /tmp/charts
|
||||||
mv openreplay/charts/{ingress-nginx,peers,quickwit,connector} /tmp/charts/
|
mv openreplay/charts/{ingress-nginx,assist-server,quickwit,connector} /tmp/charts/
|
||||||
rm -rf openreplay/charts/*
|
rm -rf openreplay/charts/*
|
||||||
mv /tmp/charts/* openreplay/charts/
|
mv /tmp/charts/* openreplay/charts/
|
||||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||||
env:
|
env:
|
||||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||||
|
# We're not passing -ee flag, because helm will add that.
|
||||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||||
ENVIRONMENT: staging
|
ENVIRONMENT: staging
|
||||||
|
|
||||||
- name: Alert slack
|
|
||||||
if: ${{ failure() }}
|
|
||||||
uses: rtCamp/action-slack-notify@v2
|
|
||||||
env:
|
|
||||||
SLACK_CHANNEL: ee
|
|
||||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
|
||||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
|
||||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
|
||||||
SLACK_USERNAME: "OR Bot"
|
|
||||||
SLACK_MESSAGE: "Build failed :bomb:"
|
|
||||||
|
|
||||||
# - name: Debug Job
|
|
||||||
# # if: ${{ failure() }}
|
|
||||||
# uses: mxschmitt/action-tmate@v3
|
|
||||||
# env:
|
|
||||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
|
||||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
|
||||||
# ENVIRONMENT: staging
|
|
||||||
# with:
|
|
||||||
# iimit-access-to-actor: true
|
|
||||||
5
.github/workflows/assist-stats.yaml
vendored
5
.github/workflows/assist-stats.yaml
vendored
|
|
@ -15,7 +15,7 @@ on:
|
||||||
- "!assist-stats/*-dev.sh"
|
- "!assist-stats/*-dev.sh"
|
||||||
- "!assist-stats/requirements-*.txt"
|
- "!assist-stats/requirements-*.txt"
|
||||||
|
|
||||||
name: Build and Deploy Assist Stats
|
name: Build and Deploy Assist Stats ee
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
deploy:
|
deploy:
|
||||||
|
|
@ -123,8 +123,9 @@ jobs:
|
||||||
tag: ${IMAGE_TAG}
|
tag: ${IMAGE_TAG}
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
|
export IMAGE_TAG=${IMAGE_TAG}
|
||||||
# Update changed image tag
|
# Update changed image tag
|
||||||
sed -i "/assist-stats/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
yq '.utilities.apiCrons.assiststats.image.tag = strenv(IMAGE_TAG)' -i /tmp/image_override.yaml
|
||||||
|
|
||||||
cat /tmp/image_override.yaml
|
cat /tmp/image_override.yaml
|
||||||
# Deploy command
|
# Deploy command
|
||||||
|
|
|
||||||
1
.github/workflows/assist.yaml
vendored
1
.github/workflows/assist.yaml
vendored
|
|
@ -9,7 +9,6 @@ on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- dev
|
- dev
|
||||||
- api-*
|
|
||||||
paths:
|
paths:
|
||||||
- "assist/**"
|
- "assist/**"
|
||||||
- "!assist/.gitignore"
|
- "!assist/.gitignore"
|
||||||
|
|
|
||||||
42
.github/workflows/crons-ee.yaml
vendored
42
.github/workflows/crons-ee.yaml
vendored
|
|
@ -10,7 +10,6 @@ on:
|
||||||
branches:
|
branches:
|
||||||
- dev
|
- dev
|
||||||
- api-*
|
- api-*
|
||||||
- v1.11.0-patch
|
|
||||||
paths:
|
paths:
|
||||||
- "ee/api/**"
|
- "ee/api/**"
|
||||||
- "api/**"
|
- "api/**"
|
||||||
|
|
@ -101,33 +100,32 @@ jobs:
|
||||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||||
done
|
done
|
||||||
- name: Creating old image input
|
- name: Creating old image input
|
||||||
|
env:
|
||||||
|
# We're not passing -ee flag, because helm will add that.
|
||||||
|
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||||
run: |
|
run: |
|
||||||
#
|
cd scripts/helmcharts/
|
||||||
# Create yaml with existing image tags
|
cat <<EOF>/tmp/image_override.yaml
|
||||||
#
|
image: &image
|
||||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
tag: "${IMAGE_TAG}"
|
||||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
utilities:
|
||||||
|
apiCrons:
|
||||||
echo > /tmp/image_override.yaml
|
assiststats:
|
||||||
|
image: *image
|
||||||
for line in `cat /tmp/image_tag.txt`;
|
report:
|
||||||
do
|
image: *image
|
||||||
image_array=($(echo "$line" | tr ':' '\n'))
|
sessionsCleaner:
|
||||||
cat <<EOF >> /tmp/image_override.yaml
|
image: *image
|
||||||
${image_array[0]}:
|
projectsStats:
|
||||||
image:
|
image: *image
|
||||||
# We've to strip off the -ee, as helm will append it.
|
fixProjectsStats:
|
||||||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
image: *image
|
||||||
EOF
|
EOF
|
||||||
done
|
|
||||||
|
|
||||||
- name: Deploy to kubernetes
|
- name: Deploy to kubernetes
|
||||||
run: |
|
run: |
|
||||||
cd scripts/helmcharts/
|
cd scripts/helmcharts/
|
||||||
|
|
||||||
# Update changed image tag
|
|
||||||
sed -i "/crons/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
|
||||||
|
|
||||||
cat /tmp/image_override.yaml
|
cat /tmp/image_override.yaml
|
||||||
# Deploy command
|
# Deploy command
|
||||||
mkdir -p /tmp/charts
|
mkdir -p /tmp/charts
|
||||||
|
|
@ -137,8 +135,6 @@ jobs:
|
||||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||||
env:
|
env:
|
||||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||||
# We're not passing -ee flag, because helm will add that.
|
|
||||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
|
||||||
ENVIRONMENT: staging
|
ENVIRONMENT: staging
|
||||||
|
|
||||||
- name: Alert slack
|
- name: Alert slack
|
||||||
|
|
|
||||||
189
.github/workflows/patch-build-old.yaml
vendored
Normal file
189
.github/workflows/patch-build-old.yaml
vendored
Normal file
|
|
@ -0,0 +1,189 @@
|
||||||
|
# Ref: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
services:
|
||||||
|
description: 'Comma separated names of services to build(in small letters).'
|
||||||
|
required: true
|
||||||
|
default: 'chalice,frontend'
|
||||||
|
tag:
|
||||||
|
description: 'Tag to update.'
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
branch:
|
||||||
|
description: 'Branch to build patches from. Make sure the branch is uptodate with tag. Else itll cause missing commits.'
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
|
||||||
|
name: Build patches from tag, rewrite commit HEAD to older timestamp, and Push the tag
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
deploy:
|
||||||
|
name: Build Patch from old tag
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
|
||||||
|
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
fetch-depth: 4
|
||||||
|
ref: ${{ github.event.inputs.tag }}
|
||||||
|
|
||||||
|
- name: Set Remote with GITHUB_TOKEN
|
||||||
|
run: |
|
||||||
|
git config --unset http.https://github.com/.extraheader
|
||||||
|
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}.git
|
||||||
|
|
||||||
|
- name: Create backup tag with timestamp
|
||||||
|
run: |
|
||||||
|
set -e # Exit immediately if a command exits with a non-zero status
|
||||||
|
TIMESTAMP=$(date +%Y%m%d%H%M%S)
|
||||||
|
BACKUP_TAG="${{ github.event.inputs.tag }}-backup-${TIMESTAMP}"
|
||||||
|
echo "BACKUP_TAG=${BACKUP_TAG}" >> $GITHUB_ENV
|
||||||
|
echo "INPUT_TAG=${{ github.event.inputs.tag }}" >> $GITHUB_ENV
|
||||||
|
git tag $BACKUP_TAG || { echo "Failed to create backup tag"; exit 1; }
|
||||||
|
git push origin $BACKUP_TAG || { echo "Failed to push backup tag"; exit 1; }
|
||||||
|
echo "Created backup tag: $BACKUP_TAG"
|
||||||
|
|
||||||
|
# Get the oldest commit date from the last 3 commits in raw format
|
||||||
|
OLDEST_COMMIT_TIMESTAMP=$(git log -3 --pretty=format:"%at" | tail -1)
|
||||||
|
echo "Oldest commit timestamp: $OLDEST_COMMIT_TIMESTAMP"
|
||||||
|
# Add 1 second to the timestamp
|
||||||
|
NEW_TIMESTAMP=$((OLDEST_COMMIT_TIMESTAMP + 1))
|
||||||
|
echo "NEW_TIMESTAMP=$NEW_TIMESTAMP" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
|
||||||
|
- name: Setup yq
|
||||||
|
uses: mikefarah/yq@master
|
||||||
|
|
||||||
|
# Configure AWS credentials for the first registry
|
||||||
|
- name: Configure AWS credentials for RELEASE_ARM_REGISTRY
|
||||||
|
uses: aws-actions/configure-aws-credentials@v1
|
||||||
|
with:
|
||||||
|
aws-access-key-id: ${{ secrets.AWS_DEPOT_ACCESS_KEY }}
|
||||||
|
aws-secret-access-key: ${{ secrets.AWS_DEPOT_SECRET_KEY }}
|
||||||
|
aws-region: ${{ secrets.AWS_DEPOT_DEFAULT_REGION }}
|
||||||
|
|
||||||
|
- name: Login to Amazon ECR for RELEASE_ARM_REGISTRY
|
||||||
|
id: login-ecr-arm
|
||||||
|
run: |
|
||||||
|
aws ecr get-login-password --region ${{ secrets.AWS_DEPOT_DEFAULT_REGION }} | docker login --username AWS --password-stdin ${{ secrets.RELEASE_ARM_REGISTRY }}
|
||||||
|
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ secrets.RELEASE_OSS_REGISTRY }}
|
||||||
|
|
||||||
|
- uses: depot/setup-action@v1
|
||||||
|
- name: Get HEAD Commit ID
|
||||||
|
run: echo "HEAD_COMMIT_ID=$(git rev-parse HEAD)" >> $GITHUB_ENV
|
||||||
|
- name: Define Branch Name
|
||||||
|
run: echo "BRANCH_NAME=${{inputs.branch}}" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
id: build-image
|
||||||
|
env:
|
||||||
|
DOCKER_REPO_ARM: ${{ secrets.RELEASE_ARM_REGISTRY }}
|
||||||
|
DOCKER_REPO_OSS: ${{ secrets.RELEASE_OSS_REGISTRY }}
|
||||||
|
MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
|
||||||
|
MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
|
||||||
|
MSAAS_REPO_FOLDER: /tmp/msaas
|
||||||
|
run: |
|
||||||
|
set -exo pipefail
|
||||||
|
git config --local user.email "action@github.com"
|
||||||
|
git config --local user.name "GitHub Action"
|
||||||
|
git checkout -b $BRANCH_NAME
|
||||||
|
working_dir=$(pwd)
|
||||||
|
function image_version(){
|
||||||
|
local service=$1
|
||||||
|
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
|
||||||
|
current_version=$(yq eval '.AppVersion' $chart_path)
|
||||||
|
new_version=$(echo $current_version | awk -F. '{$NF += 1 ; print $1"."$2"."$3}')
|
||||||
|
echo $new_version
|
||||||
|
# yq eval ".AppVersion = \"$new_version\"" -i $chart_path
|
||||||
|
}
|
||||||
|
function clone_msaas() {
|
||||||
|
[ -d $MSAAS_REPO_FOLDER ] || {
|
||||||
|
git clone -b $INPUT_TAG --recursive https://x-access-token:$MSAAS_REPO_CLONE_TOKEN@$MSAAS_REPO_URL $MSAAS_REPO_FOLDER
|
||||||
|
cd $MSAAS_REPO_FOLDER
|
||||||
|
cd openreplay && git fetch origin && git checkout $INPUT_TAG
|
||||||
|
git log -1
|
||||||
|
cd $MSAAS_REPO_FOLDER
|
||||||
|
bash git-init.sh
|
||||||
|
git checkout
|
||||||
|
}
|
||||||
|
}
|
||||||
|
function build_managed() {
|
||||||
|
local service=$1
|
||||||
|
local version=$2
|
||||||
|
echo building managed
|
||||||
|
clone_msaas
|
||||||
|
if [[ $service == 'chalice' ]]; then
|
||||||
|
cd $MSAAS_REPO_FOLDER/openreplay/api
|
||||||
|
else
|
||||||
|
cd $MSAAS_REPO_FOLDER/openreplay/$service
|
||||||
|
fi
|
||||||
|
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh >> /tmp/arm.txt
|
||||||
|
}
|
||||||
|
# Checking for backend images
|
||||||
|
ls backend/cmd >> /tmp/backend.txt
|
||||||
|
echo Services: "${{ github.event.inputs.services }}"
|
||||||
|
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
||||||
|
BUILD_SCRIPT_NAME="build.sh"
|
||||||
|
# Build FOSS
|
||||||
|
for SERVICE in "${SERVICES[@]}"; do
|
||||||
|
# Check if service is backend
|
||||||
|
if grep -q $SERVICE /tmp/backend.txt; then
|
||||||
|
cd backend
|
||||||
|
foss_build_args="nil $SERVICE"
|
||||||
|
ee_build_args="ee $SERVICE"
|
||||||
|
else
|
||||||
|
[[ $SERVICE == 'chalice' || $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && cd $working_dir/api || cd $SERVICE
|
||||||
|
[[ $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && BUILD_SCRIPT_NAME="build_${SERVICE}.sh"
|
||||||
|
ee_build_args="ee"
|
||||||
|
fi
|
||||||
|
version=$(image_version $SERVICE)
|
||||||
|
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||||
|
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||||
|
echo IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||||
|
IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||||
|
if [[ "$SERVICE" != "chalice" && "$SERVICE" != "frontend" ]]; then
|
||||||
|
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||||
|
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||||
|
else
|
||||||
|
build_managed $SERVICE $version
|
||||||
|
fi
|
||||||
|
cd $working_dir
|
||||||
|
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$SERVICE/Chart.yaml"
|
||||||
|
yq eval ".AppVersion = \"$version\"" -i $chart_path
|
||||||
|
git add $chart_path
|
||||||
|
git commit -m "Increment $SERVICE chart version"
|
||||||
|
done
|
||||||
|
|
||||||
|
- name: Change commit timestamp
|
||||||
|
run: |
|
||||||
|
# Convert the timestamp to a date format git can understand
|
||||||
|
NEW_DATE=$(perl -le 'print scalar gmtime($ARGV[0])." +0000"' $NEW_TIMESTAMP)
|
||||||
|
echo "Setting commit date to: $NEW_DATE"
|
||||||
|
|
||||||
|
# Amend the commit with the new date
|
||||||
|
GIT_COMMITTER_DATE="$NEW_DATE" git commit --amend --no-edit --date="$NEW_DATE"
|
||||||
|
|
||||||
|
# Verify the change
|
||||||
|
git log -1 --pretty=format:"Commit now dated: %cD"
|
||||||
|
|
||||||
|
# git tag and push
|
||||||
|
git tag $INPUT_TAG -f
|
||||||
|
git push origin $INPUT_TAG -f
|
||||||
|
|
||||||
|
|
||||||
|
# - name: Debug Job
|
||||||
|
# if: ${{ failure() }}
|
||||||
|
# uses: mxschmitt/action-tmate@v3
|
||||||
|
# env:
|
||||||
|
# DOCKER_REPO_ARM: ${{ secrets.RELEASE_ARM_REGISTRY }}
|
||||||
|
# DOCKER_REPO_OSS: ${{ secrets.RELEASE_OSS_REGISTRY }}
|
||||||
|
# MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
|
||||||
|
# MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
|
||||||
|
# MSAAS_REPO_FOLDER: /tmp/msaas
|
||||||
|
# with:
|
||||||
|
# limit-access-to-actor: true
|
||||||
246
.github/workflows/patch-build.yaml
vendored
246
.github/workflows/patch-build.yaml
vendored
|
|
@ -2,7 +2,6 @@
|
||||||
|
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
description: 'This workflow will build for patches for latest tag, and will Always use commit from main branch.'
|
|
||||||
inputs:
|
inputs:
|
||||||
services:
|
services:
|
||||||
description: 'Comma separated names of services to build(in small letters).'
|
description: 'Comma separated names of services to build(in small letters).'
|
||||||
|
|
@ -20,12 +19,20 @@ jobs:
|
||||||
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 1
|
fetch-depth: 0
|
||||||
|
token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Rebase with main branch, to make sure the code has latest main changes
|
- name: Rebase with main branch, to make sure the code has latest main changes
|
||||||
|
if: github.ref != 'refs/heads/main'
|
||||||
run: |
|
run: |
|
||||||
git pull --rebase origin main
|
git remote -v
|
||||||
|
git config --global user.email "action@github.com"
|
||||||
|
git config --global user.name "GitHub Action"
|
||||||
|
git config --global rebase.autoStash true
|
||||||
|
git fetch origin main:main
|
||||||
|
git rebase main
|
||||||
|
git log -3
|
||||||
|
|
||||||
- name: Downloading yq
|
- name: Downloading yq
|
||||||
run: |
|
run: |
|
||||||
|
|
@ -48,6 +55,8 @@ jobs:
|
||||||
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ secrets.RELEASE_OSS_REGISTRY }}
|
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ secrets.RELEASE_OSS_REGISTRY }}
|
||||||
|
|
||||||
- uses: depot/setup-action@v1
|
- uses: depot/setup-action@v1
|
||||||
|
env:
|
||||||
|
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
|
||||||
- name: Get HEAD Commit ID
|
- name: Get HEAD Commit ID
|
||||||
run: echo "HEAD_COMMIT_ID=$(git rev-parse HEAD)" >> $GITHUB_ENV
|
run: echo "HEAD_COMMIT_ID=$(git rev-parse HEAD)" >> $GITHUB_ENV
|
||||||
- name: Define Branch Name
|
- name: Define Branch Name
|
||||||
|
|
@ -65,78 +74,168 @@ jobs:
|
||||||
MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
|
MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
|
||||||
MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
|
MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
|
||||||
MSAAS_REPO_FOLDER: /tmp/msaas
|
MSAAS_REPO_FOLDER: /tmp/msaas
|
||||||
|
SERVICES_INPUT: ${{ github.event.inputs.services }}
|
||||||
run: |
|
run: |
|
||||||
set -exo pipefail
|
#!/bin/bash
|
||||||
git config --local user.email "action@github.com"
|
set -euo pipefail
|
||||||
git config --local user.name "GitHub Action"
|
|
||||||
git checkout -b $BRANCH_NAME
|
# Configuration
|
||||||
working_dir=$(pwd)
|
readonly WORKING_DIR=$(pwd)
|
||||||
function image_version(){
|
readonly BUILD_SCRIPT_NAME="build.sh"
|
||||||
local service=$1
|
readonly BACKEND_SERVICES_FILE="/tmp/backend.txt"
|
||||||
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
|
|
||||||
current_version=$(yq eval '.AppVersion' $chart_path)
|
# Initialize git configuration
|
||||||
new_version=$(echo $current_version | awk -F. '{$NF += 1 ; print $1"."$2"."$3}')
|
setup_git() {
|
||||||
echo $new_version
|
git config --local user.email "action@github.com"
|
||||||
# yq eval ".AppVersion = \"$new_version\"" -i $chart_path
|
git config --local user.name "GitHub Action"
|
||||||
|
git checkout -b "$BRANCH_NAME"
|
||||||
}
|
}
|
||||||
function clone_msaas() {
|
|
||||||
[ -d $MSAAS_REPO_FOLDER ] || {
|
# Get and increment image version
|
||||||
git clone -b dev --recursive https://x-access-token:$MSAAS_REPO_CLONE_TOKEN@$MSAAS_REPO_URL $MSAAS_REPO_FOLDER
|
image_version() {
|
||||||
cd $MSAAS_REPO_FOLDER
|
local service=$1
|
||||||
cd openreplay && git fetch origin && git checkout main # This have to be changed to specific tag
|
local chart_path="$WORKING_DIR/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
|
||||||
git log -1
|
local current_version new_version
|
||||||
cd $MSAAS_REPO_FOLDER
|
|
||||||
bash git-init.sh
|
current_version=$(yq eval '.AppVersion' "$chart_path")
|
||||||
git checkout
|
new_version=$(echo "$current_version" | awk -F. '{$NF += 1; print $1"."$2"."$3}')
|
||||||
}
|
echo "$new_version"
|
||||||
}
|
}
|
||||||
function build_managed() {
|
|
||||||
local service=$1
|
# Clone MSAAS repository if not exists
|
||||||
local version=$2
|
clone_msaas() {
|
||||||
echo building managed
|
if [[ ! -d "$MSAAS_REPO_FOLDER" ]]; then
|
||||||
clone_msaas
|
git clone -b dev --recursive "https://x-access-token:${MSAAS_REPO_CLONE_TOKEN}@${MSAAS_REPO_URL}" "$MSAAS_REPO_FOLDER"
|
||||||
if [[ $service == 'chalice' ]]; then
|
cd "$MSAAS_REPO_FOLDER"
|
||||||
cd $MSAAS_REPO_FOLDER/openreplay/api
|
cd openreplay && git fetch origin && git checkout main
|
||||||
else
|
git log -1
|
||||||
cd $MSAAS_REPO_FOLDER/openreplay/$service
|
cd "$MSAAS_REPO_FOLDER"
|
||||||
fi
|
bash git-init.sh
|
||||||
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh >> /tmp/arm.txt
|
git checkout
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
# Checking for backend images
|
|
||||||
ls backend/cmd >> /tmp/backend.txt
|
# Build managed services
|
||||||
echo Services: "${{ github.event.inputs.services }}"
|
build_managed() {
|
||||||
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
local service=$1
|
||||||
BUILD_SCRIPT_NAME="build.sh"
|
local version=$2
|
||||||
# Build FOSS
|
|
||||||
for SERVICE in "${SERVICES[@]}"; do
|
echo "Building managed service: $service"
|
||||||
# Check if service is backend
|
clone_msaas
|
||||||
if grep -q $SERVICE /tmp/backend.txt; then
|
|
||||||
cd backend
|
if [[ $service == 'chalice' ]]; then
|
||||||
foss_build_args="nil $SERVICE"
|
cd "$MSAAS_REPO_FOLDER/openreplay/api"
|
||||||
ee_build_args="ee $SERVICE"
|
else
|
||||||
else
|
cd "$MSAAS_REPO_FOLDER/openreplay/$service"
|
||||||
[[ $SERVICE == 'chalice' || $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && cd $working_dir/api || cd $SERVICE
|
fi
|
||||||
[[ $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && BUILD_SCRIPT_NAME="build_${SERVICE}.sh"
|
|
||||||
ee_build_args="ee"
|
local build_cmd="IMAGE_TAG=$version DOCKER_RUNTIME=depot DOCKER_BUILD_ARGS=--push ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh"
|
||||||
fi
|
|
||||||
version=$(image_version $SERVICE)
|
echo "Executing: $build_cmd"
|
||||||
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
if ! eval "$build_cmd" 2>&1; then
|
||||||
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
echo "Build failed for $service"
|
||||||
echo IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
exit 1
|
||||||
IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
fi
|
||||||
if [[ "$SERVICE" != "chalice" && "$SERVICE" != "frontend" ]]; then
|
}
|
||||||
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
|
||||||
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
# Build service with given arguments
|
||||||
else
|
build_service() {
|
||||||
build_managed $SERVICE $version
|
local service=$1
|
||||||
fi
|
local version=$2
|
||||||
cd $working_dir
|
local build_args=$3
|
||||||
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$SERVICE/Chart.yaml"
|
local build_script=${4:-$BUILD_SCRIPT_NAME}
|
||||||
yq eval ".AppVersion = \"$version\"" -i $chart_path
|
|
||||||
git add $chart_path
|
local command="IMAGE_TAG=$version DOCKER_RUNTIME=depot DOCKER_BUILD_ARGS=--push ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash $build_script $build_args"
|
||||||
git commit -m "Increment $SERVICE chart version"
|
echo "Executing: $command"
|
||||||
git push --set-upstream origin $BRANCH_NAME
|
eval "$command"
|
||||||
done
|
}
|
||||||
|
|
||||||
|
# Update chart version and commit changes
|
||||||
|
update_chart_version() {
|
||||||
|
local service=$1
|
||||||
|
local version=$2
|
||||||
|
local chart_path="$WORKING_DIR/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
|
||||||
|
|
||||||
|
# Ensure we're in the original working directory/repository
|
||||||
|
cd "$WORKING_DIR"
|
||||||
|
yq eval ".AppVersion = \"$version\"" -i "$chart_path"
|
||||||
|
git add "$chart_path"
|
||||||
|
git commit -m "Increment $service chart version to $version"
|
||||||
|
git push --set-upstream origin "$BRANCH_NAME"
|
||||||
|
cd -
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main execution
|
||||||
|
main() {
|
||||||
|
setup_git
|
||||||
|
|
||||||
|
# Get backend services list
|
||||||
|
ls backend/cmd >"$BACKEND_SERVICES_FILE"
|
||||||
|
|
||||||
|
# Parse services input (fix for GitHub Actions syntax)
|
||||||
|
echo "Services: ${SERVICES_INPUT:-$1}"
|
||||||
|
IFS=',' read -ra services <<<"${SERVICES_INPUT:-$1}"
|
||||||
|
|
||||||
|
# Process each service
|
||||||
|
for service in "${services[@]}"; do
|
||||||
|
echo "Processing service: $service"
|
||||||
|
cd "$WORKING_DIR"
|
||||||
|
|
||||||
|
local foss_build_args="" ee_build_args="" build_script="$BUILD_SCRIPT_NAME"
|
||||||
|
|
||||||
|
# Determine build configuration based on service type
|
||||||
|
if grep -q "$service" "$BACKEND_SERVICES_FILE"; then
|
||||||
|
# Backend service
|
||||||
|
cd backend
|
||||||
|
foss_build_args="nil $service"
|
||||||
|
ee_build_args="ee $service"
|
||||||
|
else
|
||||||
|
# Non-backend service
|
||||||
|
case "$service" in
|
||||||
|
chalice | alerts | crons)
|
||||||
|
cd "$WORKING_DIR/api"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
cd "$service"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Special build scripts for alerts/crons
|
||||||
|
if [[ $service == 'alerts' || $service == 'crons' ]]; then
|
||||||
|
build_script="build_${service}.sh"
|
||||||
|
fi
|
||||||
|
|
||||||
|
ee_build_args="ee"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Get version and build
|
||||||
|
local version
|
||||||
|
version=$(image_version "$service")
|
||||||
|
|
||||||
|
# Build FOSS and EE versions
|
||||||
|
build_service "$service" "$version" "$foss_build_args"
|
||||||
|
build_service "$service" "${version}-ee" "$ee_build_args"
|
||||||
|
|
||||||
|
# Build managed version for specific services
|
||||||
|
if [[ "$service" != "chalice" && "$service" != "frontend" ]]; then
|
||||||
|
echo "Nothing to build in managed for service $service"
|
||||||
|
else
|
||||||
|
build_managed "$service" "$version"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Update chart and commit
|
||||||
|
update_chart_version "$service" "$version"
|
||||||
|
done
|
||||||
|
cd "$WORKING_DIR"
|
||||||
|
|
||||||
|
# Cleanup
|
||||||
|
rm -f "$BACKEND_SERVICES_FILE"
|
||||||
|
}
|
||||||
|
|
||||||
|
echo "Working directory: $WORKING_DIR"
|
||||||
|
# Run main function with all arguments
|
||||||
|
main "$SERVICES_INPUT"
|
||||||
|
|
||||||
|
|
||||||
- name: Create Pull Request
|
- name: Create Pull Request
|
||||||
uses: repo-sync/pull-request@v2
|
uses: repo-sync/pull-request@v2
|
||||||
|
|
@ -147,8 +246,7 @@ jobs:
|
||||||
pr_title: "Updated patch build from main ${{ env.HEAD_COMMIT_ID }}"
|
pr_title: "Updated patch build from main ${{ env.HEAD_COMMIT_ID }}"
|
||||||
pr_body: |
|
pr_body: |
|
||||||
This PR updates the Helm chart version after building the patch from $HEAD_COMMIT_ID.
|
This PR updates the Helm chart version after building the patch from $HEAD_COMMIT_ID.
|
||||||
Once this PR is merged, To update the latest tag, run the following workflow.
|
Once this PR is merged, tag update job will run automatically.
|
||||||
https://github.com/openreplay/openreplay/actions/workflows/update-tag.yaml
|
|
||||||
|
|
||||||
# - name: Debug Job
|
# - name: Debug Job
|
||||||
# if: ${{ failure() }}
|
# if: ${{ failure() }}
|
||||||
|
|
|
||||||
149
.github/workflows/peers.yaml
vendored
149
.github/workflows/peers.yaml
vendored
|
|
@ -1,149 +0,0 @@
|
||||||
# This action will push the peers changes to aws
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
skip_security_checks:
|
|
||||||
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
|
|
||||||
required: false
|
|
||||||
default: "false"
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- dev
|
|
||||||
- api-*
|
|
||||||
paths:
|
|
||||||
- "peers/**"
|
|
||||||
- "!peers/.gitignore"
|
|
||||||
- "!peers/*-dev.sh"
|
|
||||||
|
|
||||||
name: Build and Deploy Peers
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
deploy:
|
|
||||||
name: Deploy
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
# We need to diff with old commit
|
|
||||||
# to see which workers got changed.
|
|
||||||
fetch-depth: 2
|
|
||||||
|
|
||||||
- uses: ./.github/composite-actions/update-keys
|
|
||||||
with:
|
|
||||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
|
||||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
|
||||||
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
|
|
||||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
|
||||||
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
|
|
||||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
|
||||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
|
||||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
|
||||||
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
|
|
||||||
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
|
|
||||||
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
|
|
||||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
|
||||||
name: Update Keys
|
|
||||||
|
|
||||||
- name: Docker login
|
|
||||||
run: |
|
|
||||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
|
||||||
|
|
||||||
- uses: azure/k8s-set-context@v1
|
|
||||||
with:
|
|
||||||
method: kubeconfig
|
|
||||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
|
||||||
id: setcontext
|
|
||||||
|
|
||||||
# Caching docker images
|
|
||||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
|
||||||
# Ignore the failure of a step and avoid terminating the job.
|
|
||||||
continue-on-error: true
|
|
||||||
|
|
||||||
- name: Building and Pushing peers image
|
|
||||||
id: build-image
|
|
||||||
env:
|
|
||||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
|
||||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
|
||||||
ENVIRONMENT: staging
|
|
||||||
run: |
|
|
||||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
|
||||||
cd peers
|
|
||||||
PUSH_IMAGE=0 bash -x ./build.sh
|
|
||||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
|
||||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
|
||||||
images=("peers")
|
|
||||||
for image in ${images[*]};do
|
|
||||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
|
||||||
done
|
|
||||||
err_code=$?
|
|
||||||
[[ $err_code -ne 0 ]] && {
|
|
||||||
exit $err_code
|
|
||||||
}
|
|
||||||
} && {
|
|
||||||
echo "Skipping Security Checks"
|
|
||||||
}
|
|
||||||
images=("peers")
|
|
||||||
for image in ${images[*]};do
|
|
||||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
|
||||||
done
|
|
||||||
- name: Creating old image input
|
|
||||||
run: |
|
|
||||||
#
|
|
||||||
# Create yaml with existing image tags
|
|
||||||
#
|
|
||||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
|
||||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
|
||||||
|
|
||||||
echo > /tmp/image_override.yaml
|
|
||||||
|
|
||||||
for line in `cat /tmp/image_tag.txt`;
|
|
||||||
do
|
|
||||||
image_array=($(echo "$line" | tr ':' '\n'))
|
|
||||||
cat <<EOF >> /tmp/image_override.yaml
|
|
||||||
${image_array[0]}:
|
|
||||||
image:
|
|
||||||
tag: ${image_array[1]}
|
|
||||||
EOF
|
|
||||||
done
|
|
||||||
|
|
||||||
- name: Deploy to kubernetes
|
|
||||||
run: |
|
|
||||||
cd scripts/helmcharts/
|
|
||||||
|
|
||||||
# Update changed image tag
|
|
||||||
sed -i "/peers/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
|
||||||
|
|
||||||
cat /tmp/image_override.yaml
|
|
||||||
# Deploy command
|
|
||||||
mkdir -p /tmp/charts
|
|
||||||
mv openreplay/charts/{ingress-nginx,peers,quickwit,connector} /tmp/charts/
|
|
||||||
rm -rf openreplay/charts/*
|
|
||||||
mv /tmp/charts/* openreplay/charts/
|
|
||||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
|
||||||
env:
|
|
||||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
|
||||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
|
||||||
ENVIRONMENT: staging
|
|
||||||
|
|
||||||
- name: Alert slack
|
|
||||||
if: ${{ failure() }}
|
|
||||||
uses: rtCamp/action-slack-notify@v2
|
|
||||||
env:
|
|
||||||
SLACK_CHANNEL: foss
|
|
||||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
|
||||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
|
||||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
|
||||||
SLACK_USERNAME: "OR Bot"
|
|
||||||
SLACK_MESSAGE: "Build failed :bomb:"
|
|
||||||
|
|
||||||
# - name: Debug Job
|
|
||||||
# # if: ${{ failure() }}
|
|
||||||
# uses: mxschmitt/action-tmate@v3
|
|
||||||
# env:
|
|
||||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
|
||||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
|
||||||
# ENVIRONMENT: staging
|
|
||||||
# with:
|
|
||||||
# limit-access-to-actor: true
|
|
||||||
103
.github/workflows/release-deployment.yaml
vendored
Normal file
103
.github/workflows/release-deployment.yaml
vendored
Normal file
|
|
@ -0,0 +1,103 @@
|
||||||
|
name: Release Deployment
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
services:
|
||||||
|
description: 'Comma-separated list of services to deploy. eg: frontend,api,sink'
|
||||||
|
required: true
|
||||||
|
branch:
|
||||||
|
description: 'Branch to deploy (defaults to dev)'
|
||||||
|
required: false
|
||||||
|
default: 'dev'
|
||||||
|
|
||||||
|
env:
|
||||||
|
IMAGE_REGISTRY_URL: ${{ secrets.OSS_REGISTRY_URL }}
|
||||||
|
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
||||||
|
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
deploy:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.inputs.branch }}
|
||||||
|
- name: Docker login
|
||||||
|
run: |
|
||||||
|
docker login $IMAGE_REGISTRY_URL -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||||
|
|
||||||
|
- name: Set image tag with branch info
|
||||||
|
run: |
|
||||||
|
SHORT_SHA=$(git rev-parse --short HEAD)
|
||||||
|
echo "IMAGE_TAG=${{ github.event.inputs.branch }}-${SHORT_SHA}" >> $GITHUB_ENV
|
||||||
|
echo "Using image tag: $IMAGE_TAG"
|
||||||
|
|
||||||
|
- uses: depot/setup-action@v1
|
||||||
|
|
||||||
|
- name: Build and push Docker images
|
||||||
|
run: |
|
||||||
|
# Parse the comma-separated services list into an array
|
||||||
|
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
||||||
|
working_dir=$(pwd)
|
||||||
|
|
||||||
|
# Define backend services (consider moving this to workflow inputs or repo config)
|
||||||
|
ls backend/cmd >> /tmp/backend.txt
|
||||||
|
BUILD_SCRIPT_NAME="build.sh"
|
||||||
|
|
||||||
|
for SERVICE in "${SERVICES[@]}"; do
|
||||||
|
# Check if service is backend
|
||||||
|
if grep -q $SERVICE /tmp/backend.txt; then
|
||||||
|
cd $working_dir/backend
|
||||||
|
foss_build_args="nil $SERVICE"
|
||||||
|
ee_build_args="ee $SERVICE"
|
||||||
|
else
|
||||||
|
cd $working_dir
|
||||||
|
[[ $SERVICE == 'chalice' || $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && cd $working_dir/api || cd $SERVICE
|
||||||
|
[[ $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && BUILD_SCRIPT_NAME="build_${SERVICE}.sh"
|
||||||
|
ee_build_args="ee"
|
||||||
|
fi
|
||||||
|
{
|
||||||
|
echo IMAGE_TAG=$IMAGE_TAG DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||||
|
IMAGE_TAG=$IMAGE_TAG DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||||
|
}&
|
||||||
|
{
|
||||||
|
echo IMAGE_TAG=${IMAGE_TAG}-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||||
|
IMAGE_TAG=${IMAGE_TAG}-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||||
|
}&
|
||||||
|
done
|
||||||
|
wait
|
||||||
|
|
||||||
|
- uses: azure/k8s-set-context@v1
|
||||||
|
name: Using ee release cluster
|
||||||
|
with:
|
||||||
|
method: kubeconfig
|
||||||
|
kubeconfig: ${{ secrets.EE_RELEASE_KUBECONFIG }}
|
||||||
|
|
||||||
|
- name: Deploy to ee release Kubernetes
|
||||||
|
run: |
|
||||||
|
echo "Deploying services to EE cluster: ${{ github.event.inputs.services }}"
|
||||||
|
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
||||||
|
for SERVICE in "${SERVICES[@]}"; do
|
||||||
|
SERVICE=$(echo $SERVICE | xargs) # Trim whitespace
|
||||||
|
echo "Deploying $SERVICE to EE cluster with image tag: ${IMAGE_TAG}"
|
||||||
|
kubectl set image deployment/$SERVICE-openreplay -n app $SERVICE=${IMAGE_REGISTRY_URL}/$SERVICE:${IMAGE_TAG}-ee
|
||||||
|
done
|
||||||
|
|
||||||
|
- uses: azure/k8s-set-context@v1
|
||||||
|
name: Using foss release cluster
|
||||||
|
with:
|
||||||
|
method: kubeconfig
|
||||||
|
kubeconfig: ${{ secrets.FOSS_RELEASE_KUBECONFIG }}
|
||||||
|
|
||||||
|
- name: Deploy to FOSS release Kubernetes
|
||||||
|
run: |
|
||||||
|
echo "Deploying services to FOSS cluster: ${{ github.event.inputs.services }}"
|
||||||
|
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
||||||
|
for SERVICE in "${SERVICES[@]}"; do
|
||||||
|
SERVICE=$(echo $SERVICE | xargs) # Trim whitespace
|
||||||
|
echo "Deploying $SERVICE to FOSS cluster with image tag: ${IMAGE_TAG}"
|
||||||
|
echo "Deploying $SERVICE to FOSS cluster with image tag: ${IMAGE_TAG}"
|
||||||
|
kubectl set image deployment/$SERVICE-openreplay -n app $SERVICE=${IMAGE_REGISTRY_URL}/$SERVICE:${IMAGE_TAG}
|
||||||
|
done
|
||||||
10
.github/workflows/sourcemaps-reader-ee.yaml
vendored
10
.github/workflows/sourcemaps-reader-ee.yaml
vendored
|
|
@ -1,4 +1,4 @@
|
||||||
# This action will push the sourcemapreader changes to aws
|
# This action will push the sourcemapreader changes to ee
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
|
|
@ -9,13 +9,13 @@ on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- dev
|
- dev
|
||||||
- api-*
|
|
||||||
paths:
|
paths:
|
||||||
|
- "ee/sourcemap-reader/**"
|
||||||
- "sourcemap-reader/**"
|
- "sourcemap-reader/**"
|
||||||
- "!sourcemap-reader/.gitignore"
|
- "!sourcemap-reader/.gitignore"
|
||||||
- "!sourcemap-reader/*-dev.sh"
|
- "!sourcemap-reader/*-dev.sh"
|
||||||
|
|
||||||
name: Build and Deploy sourcemap-reader
|
name: Build and Deploy sourcemap-reader EE
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
deploy:
|
deploy:
|
||||||
|
|
@ -64,7 +64,7 @@ jobs:
|
||||||
- name: Building and Pushing sourcemaps-reader image
|
- name: Building and Pushing sourcemaps-reader image
|
||||||
id: build-image
|
id: build-image
|
||||||
env:
|
env:
|
||||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||||
ENVIRONMENT: staging
|
ENVIRONMENT: staging
|
||||||
run: |
|
run: |
|
||||||
|
|
@ -132,7 +132,7 @@ jobs:
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
uses: rtCamp/action-slack-notify@v2
|
uses: rtCamp/action-slack-notify@v2
|
||||||
env:
|
env:
|
||||||
SLACK_CHANNEL: foss
|
SLACK_CHANNEL: ee
|
||||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||||
|
|
|
||||||
1
.github/workflows/sourcemaps-reader.yaml
vendored
1
.github/workflows/sourcemaps-reader.yaml
vendored
|
|
@ -9,7 +9,6 @@ on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- dev
|
- dev
|
||||||
- api-*
|
|
||||||
paths:
|
paths:
|
||||||
- "sourcemap-reader/**"
|
- "sourcemap-reader/**"
|
||||||
- "!sourcemap-reader/.gitignore"
|
- "!sourcemap-reader/.gitignore"
|
||||||
|
|
|
||||||
47
.github/workflows/update-tag.yaml
vendored
47
.github/workflows/update-tag.yaml
vendored
|
|
@ -1,35 +1,42 @@
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
pull_request:
|
||||||
description: "This workflow will build for patches for latest tag, and will Always use commit from main branch."
|
types: [closed]
|
||||||
inputs:
|
branches:
|
||||||
services:
|
- main
|
||||||
description: "This action will update the latest tag with current main branch HEAD. Should I proceed ? true/false"
|
name: Release tag update --force
|
||||||
required: true
|
|
||||||
default: "false"
|
|
||||||
|
|
||||||
name: Force Push tag with main branch HEAD
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
deploy:
|
deploy:
|
||||||
name: Build Patch from main
|
name: Build Patch from main
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
env:
|
if: ${{ (github.event_name == 'pull_request' && github.event.pull_request.merged == true) || github.event.inputs.services == 'true' }}
|
||||||
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
|
|
||||||
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Get latest release tag using GitHub API
|
||||||
|
id: get-latest-tag
|
||||||
|
run: |
|
||||||
|
LATEST_TAG=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
|
||||||
|
"https://api.github.com/repos/${{ github.repository }}/releases/latest" \
|
||||||
|
| jq -r .tag_name)
|
||||||
|
|
||||||
|
# Fallback to git command if API doesn't return a tag
|
||||||
|
if [ "$LATEST_TAG" == "null" ] || [ -z "$LATEST_TAG" ]; then
|
||||||
|
echo "Not found latest tag"
|
||||||
|
exit 100
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "LATEST_TAG=$LATEST_TAG" >> $GITHUB_ENV
|
||||||
|
echo "Latest tag: $LATEST_TAG"
|
||||||
|
|
||||||
- name: Set Remote with GITHUB_TOKEN
|
- name: Set Remote with GITHUB_TOKEN
|
||||||
run: |
|
run: |
|
||||||
git config --unset http.https://github.com/.extraheader
|
git config --unset http.https://github.com/.extraheader
|
||||||
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}.git
|
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}
|
||||||
|
|
||||||
- name: Push main branch to tag
|
- name: Push main branch to tag
|
||||||
run: |
|
run: |
|
||||||
git fetch --tags
|
|
||||||
git checkout main
|
git checkout main
|
||||||
git push origin HEAD:refs/tags/$(git tag --list 'v[0-9]*' --sort=-v:refname | head -n 1) --force
|
echo "Updating tag ${{ env.LATEST_TAG }} to point to latest commit on main"
|
||||||
# - name: Debug Job
|
git push origin HEAD:refs/tags/${{ env.LATEST_TAG }} --force
|
||||||
# if: ${{ failure() }}
|
|
||||||
# uses: mxschmitt/action-tmate@v3
|
|
||||||
# with:
|
|
||||||
# limit-access-to-actor: true
|
|
||||||
|
|
|
||||||
2
LICENSE
2
LICENSE
|
|
@ -1,4 +1,4 @@
|
||||||
Copyright (c) 2021-2024 Asayer, Inc dba OpenReplay
|
Copyright (c) 2021-2025 Asayer, Inc dba OpenReplay
|
||||||
|
|
||||||
OpenReplay monorepo uses multiple licenses. Portions of this software are licensed as follows:
|
OpenReplay monorepo uses multiple licenses. Portions of this software are licensed as follows:
|
||||||
- All content that resides under the "ee/" directory of this repository, is licensed under the license defined in "ee/LICENSE".
|
- All content that resides under the "ee/" directory of this repository, is licensed under the license defined in "ee/LICENSE".
|
||||||
|
|
|
||||||
|
|
@ -1,10 +1,17 @@
|
||||||
FROM python:3.11-alpine
|
FROM python:3.12-alpine AS builder
|
||||||
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
|
LABEL maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
|
||||||
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
|
LABEL maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
|
||||||
ARG GIT_SHA
|
|
||||||
LABEL GIT_SHA=$GIT_SHA
|
|
||||||
|
|
||||||
RUN apk add --no-cache build-base tini
|
RUN apk add --no-cache build-base
|
||||||
|
WORKDIR /work
|
||||||
|
COPY requirements.txt ./requirements.txt
|
||||||
|
RUN pip install --no-cache-dir --upgrade uv && \
|
||||||
|
export UV_SYSTEM_PYTHON=true && \
|
||||||
|
uv pip install --no-cache-dir --upgrade pip setuptools wheel && \
|
||||||
|
uv pip install --no-cache-dir --upgrade -r requirements.txt
|
||||||
|
|
||||||
|
FROM python:3.12-alpine
|
||||||
|
ARG GIT_SHA
|
||||||
ARG envarg
|
ARG envarg
|
||||||
# Add Tini
|
# Add Tini
|
||||||
# Startup daemon
|
# Startup daemon
|
||||||
|
|
@ -14,19 +21,11 @@ ENV SOURCE_MAP_VERSION=0.7.4 \
|
||||||
PRIVATE_ENDPOINTS=false \
|
PRIVATE_ENDPOINTS=false \
|
||||||
ENTERPRISE_BUILD=${envarg} \
|
ENTERPRISE_BUILD=${envarg} \
|
||||||
GIT_SHA=$GIT_SHA
|
GIT_SHA=$GIT_SHA
|
||||||
|
COPY --from=builder /usr/local/lib/python3.12/site-packages /usr/local/lib/python3.12/site-packages
|
||||||
|
COPY --from=builder /usr/local/bin /usr/local/bin
|
||||||
WORKDIR /work
|
WORKDIR /work
|
||||||
COPY requirements.txt ./requirements.txt
|
|
||||||
RUN pip install --no-cache-dir --upgrade uv
|
|
||||||
RUN uv pip install --no-cache-dir --upgrade pip setuptools wheel --system
|
|
||||||
RUN uv pip install --no-cache-dir --upgrade -r requirements.txt --system
|
|
||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
RUN mv env.default .env
|
RUN apk add --no-cache tini && mv env.default .env
|
||||||
|
|
||||||
RUN adduser -u 1001 openreplay -D
|
|
||||||
USER 1001
|
|
||||||
|
|
||||||
ENTRYPOINT ["/sbin/tini", "--"]
|
ENTRYPOINT ["/sbin/tini", "--"]
|
||||||
CMD ./entrypoint.sh
|
CMD ["./entrypoint.sh"]
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
FROM python:3.11-alpine
|
FROM python:3.12-alpine
|
||||||
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
|
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
|
||||||
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
|
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
|
||||||
ARG GIT_SHA
|
ARG GIT_SHA
|
||||||
|
|
|
||||||
25
api/Pipfile
25
api/Pipfile
|
|
@ -4,23 +4,26 @@ verify_ssl = true
|
||||||
name = "pypi"
|
name = "pypi"
|
||||||
|
|
||||||
[packages]
|
[packages]
|
||||||
urllib3 = "==1.26.16"
|
urllib3 = "==2.3.0"
|
||||||
requests = "==2.32.3"
|
requests = "==2.32.3"
|
||||||
boto3 = "==1.35.60"
|
boto3 = "==1.36.12"
|
||||||
pyjwt = "==2.9.0"
|
pyjwt = "==2.10.1"
|
||||||
psycopg2-binary = "==2.9.10"
|
psycopg2-binary = "==2.9.10"
|
||||||
psycopg = {extras = ["pool", "binary"], version = "==3.2.3"}
|
psycopg = {extras = ["pool", "binary"], version = "==3.2.4"}
|
||||||
elasticsearch = "==8.16.0"
|
clickhouse-driver = {extras = ["lz4"], version = "==0.2.9"}
|
||||||
|
clickhouse-connect = "==0.8.15"
|
||||||
|
elasticsearch = "==8.17.1"
|
||||||
jira = "==3.8.0"
|
jira = "==3.8.0"
|
||||||
cachetools = "==5.5.0"
|
cachetools = "==5.5.1"
|
||||||
fastapi = "==0.115.5"
|
fastapi = "==0.115.8"
|
||||||
uvicorn = {extras = ["standard"], version = "==0.32.0"}
|
uvicorn = {extras = ["standard"], version = "==0.34.0"}
|
||||||
python-decouple = "==3.8"
|
python-decouple = "==3.8"
|
||||||
pydantic = {extras = ["email"], version = "==2.9.2"}
|
pydantic = {extras = ["email"], version = "==2.10.6"}
|
||||||
apscheduler = "==3.10.4"
|
apscheduler = "==3.11.0"
|
||||||
redis = "==5.2.0"
|
redis = "==5.2.1"
|
||||||
|
|
||||||
[dev-packages]
|
[dev-packages]
|
||||||
|
|
||||||
[requires]
|
[requires]
|
||||||
python_version = "3.12"
|
python_version = "3.12"
|
||||||
|
python_full_version = "3.12.8"
|
||||||
|
|
|
||||||
10
api/app.py
10
api/app.py
|
|
@ -13,17 +13,16 @@ from psycopg.rows import dict_row
|
||||||
from starlette.responses import StreamingResponse
|
from starlette.responses import StreamingResponse
|
||||||
|
|
||||||
from chalicelib.utils import helper
|
from chalicelib.utils import helper
|
||||||
from chalicelib.utils import pg_client
|
from chalicelib.utils import pg_client, ch_client
|
||||||
from crons import core_crons, core_dynamic_crons
|
from crons import core_crons, core_dynamic_crons
|
||||||
from routers import core, core_dynamic
|
from routers import core, core_dynamic
|
||||||
from routers.subs import insights, metrics, v1_api, health, usability_tests, spot
|
from routers.subs import insights, metrics, v1_api, health, usability_tests, spot, product_anaytics
|
||||||
|
|
||||||
loglevel = config("LOGLEVEL", default=logging.WARNING)
|
loglevel = config("LOGLEVEL", default=logging.WARNING)
|
||||||
print(f">Loglevel set to: {loglevel}")
|
print(f">Loglevel set to: {loglevel}")
|
||||||
logging.basicConfig(level=loglevel)
|
logging.basicConfig(level=loglevel)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class ORPYAsyncConnection(AsyncConnection):
|
class ORPYAsyncConnection(AsyncConnection):
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
|
|
@ -39,6 +38,7 @@ async def lifespan(app: FastAPI):
|
||||||
|
|
||||||
app.schedule = AsyncIOScheduler()
|
app.schedule = AsyncIOScheduler()
|
||||||
await pg_client.init()
|
await pg_client.init()
|
||||||
|
await ch_client.init()
|
||||||
app.schedule.start()
|
app.schedule.start()
|
||||||
|
|
||||||
for job in core_crons.cron_jobs + core_dynamic_crons.cron_jobs:
|
for job in core_crons.cron_jobs + core_dynamic_crons.cron_jobs:
|
||||||
|
|
@ -128,3 +128,7 @@ app.include_router(usability_tests.app_apikey)
|
||||||
app.include_router(spot.public_app)
|
app.include_router(spot.public_app)
|
||||||
app.include_router(spot.app)
|
app.include_router(spot.app)
|
||||||
app.include_router(spot.app_apikey)
|
app.include_router(spot.app_apikey)
|
||||||
|
|
||||||
|
app.include_router(product_anaytics.public_app)
|
||||||
|
app.include_router(product_anaytics.app)
|
||||||
|
app.include_router(product_anaytics.app_apikey)
|
||||||
|
|
|
||||||
|
|
@ -5,14 +5,14 @@ from apscheduler.schedulers.asyncio import AsyncIOScheduler
|
||||||
from decouple import config
|
from decouple import config
|
||||||
from fastapi import FastAPI
|
from fastapi import FastAPI
|
||||||
|
|
||||||
from chalicelib.core import alerts_processor
|
from chalicelib.core.alerts import alerts_processor
|
||||||
from chalicelib.utils import pg_client
|
from chalicelib.utils import pg_client
|
||||||
|
|
||||||
|
|
||||||
@asynccontextmanager
|
@asynccontextmanager
|
||||||
async def lifespan(app: FastAPI):
|
async def lifespan(app: FastAPI):
|
||||||
# Startup
|
# Startup
|
||||||
logging.info(">>>>> starting up <<<<<")
|
ap_logger.info(">>>>> starting up <<<<<")
|
||||||
await pg_client.init()
|
await pg_client.init()
|
||||||
app.schedule.start()
|
app.schedule.start()
|
||||||
app.schedule.add_job(id="alerts_processor", **{"func": alerts_processor.process, "trigger": "interval",
|
app.schedule.add_job(id="alerts_processor", **{"func": alerts_processor.process, "trigger": "interval",
|
||||||
|
|
@ -27,14 +27,22 @@ async def lifespan(app: FastAPI):
|
||||||
yield
|
yield
|
||||||
|
|
||||||
# Shutdown
|
# Shutdown
|
||||||
logging.info(">>>>> shutting down <<<<<")
|
ap_logger.info(">>>>> shutting down <<<<<")
|
||||||
app.schedule.shutdown(wait=False)
|
app.schedule.shutdown(wait=False)
|
||||||
await pg_client.terminate()
|
await pg_client.terminate()
|
||||||
|
|
||||||
|
|
||||||
|
loglevel = config("LOGLEVEL", default=logging.INFO)
|
||||||
|
print(f">Loglevel set to: {loglevel}")
|
||||||
|
logging.basicConfig(level=loglevel)
|
||||||
|
ap_logger = logging.getLogger('apscheduler')
|
||||||
|
ap_logger.setLevel(loglevel)
|
||||||
|
|
||||||
app = FastAPI(root_path=config("root_path", default="/alerts"), docs_url=config("docs_url", default=""),
|
app = FastAPI(root_path=config("root_path", default="/alerts"), docs_url=config("docs_url", default=""),
|
||||||
redoc_url=config("redoc_url", default=""), lifespan=lifespan)
|
redoc_url=config("redoc_url", default=""), lifespan=lifespan)
|
||||||
logging.info("============= ALERTS =============")
|
|
||||||
|
app.schedule = AsyncIOScheduler()
|
||||||
|
ap_logger.info("============= ALERTS =============")
|
||||||
|
|
||||||
|
|
||||||
@app.get("/")
|
@app.get("/")
|
||||||
|
|
@ -50,17 +58,8 @@ async def get_health_status():
|
||||||
}}
|
}}
|
||||||
|
|
||||||
|
|
||||||
app.schedule = AsyncIOScheduler()
|
|
||||||
|
|
||||||
loglevel = config("LOGLEVEL", default=logging.INFO)
|
|
||||||
print(f">Loglevel set to: {loglevel}")
|
|
||||||
logging.basicConfig(level=loglevel)
|
|
||||||
ap_logger = logging.getLogger('apscheduler')
|
|
||||||
ap_logger.setLevel(loglevel)
|
|
||||||
app.schedule = AsyncIOScheduler()
|
|
||||||
|
|
||||||
if config("LOCAL_DEV", default=False, cast=bool):
|
if config("LOCAL_DEV", default=False, cast=bool):
|
||||||
@app.get('/trigger', tags=["private"])
|
@app.get('/trigger', tags=["private"])
|
||||||
async def trigger_main_cron():
|
async def trigger_main_cron():
|
||||||
logging.info("Triggering main cron")
|
ap_logger.info("Triggering main cron")
|
||||||
alerts_processor.process()
|
alerts_processor.process()
|
||||||
|
|
|
||||||
|
|
@ -45,8 +45,6 @@ class JWTAuth(HTTPBearer):
|
||||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,
|
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,
|
||||||
detail="Invalid authentication scheme.")
|
detail="Invalid authentication scheme.")
|
||||||
jwt_payload = authorizers.jwt_authorizer(scheme=credentials.scheme, token=credentials.credentials)
|
jwt_payload = authorizers.jwt_authorizer(scheme=credentials.scheme, token=credentials.credentials)
|
||||||
logger.info("------ jwt_payload ------")
|
|
||||||
logger.info(jwt_payload)
|
|
||||||
auth_exists = jwt_payload is not None and users.auth_exists(user_id=jwt_payload.get("userId", -1),
|
auth_exists = jwt_payload is not None and users.auth_exists(user_id=jwt_payload.get("userId", -1),
|
||||||
jwt_iat=jwt_payload.get("iat", 100))
|
jwt_iat=jwt_payload.get("iat", 100))
|
||||||
if jwt_payload is None \
|
if jwt_payload is None \
|
||||||
|
|
@ -120,8 +118,7 @@ class JWTAuth(HTTPBearer):
|
||||||
jwt_payload = None
|
jwt_payload = None
|
||||||
else:
|
else:
|
||||||
jwt_payload = authorizers.jwt_refresh_authorizer(scheme="Bearer", token=request.cookies["spotRefreshToken"])
|
jwt_payload = authorizers.jwt_refresh_authorizer(scheme="Bearer", token=request.cookies["spotRefreshToken"])
|
||||||
logger.info("__process_spot_refresh_call")
|
|
||||||
logger.info(jwt_payload)
|
|
||||||
if jwt_payload is None or jwt_payload.get("jti") is None:
|
if jwt_payload is None or jwt_payload.get("jti") is None:
|
||||||
logger.warning("Null spotRefreshToken's payload, or null JTI.")
|
logger.warning("Null spotRefreshToken's payload, or null JTI.")
|
||||||
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
|
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
|
||||||
|
|
|
||||||
10
api/chalicelib/core/alerts/__init__.py
Normal file
10
api/chalicelib/core/alerts/__init__.py
Normal file
|
|
@ -0,0 +1,10 @@
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from decouple import config
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
if config("EXP_ALERTS", cast=bool, default=False):
|
||||||
|
logging.info(">>> Using experimental alerts")
|
||||||
|
from . import alerts_processor_ch as alerts_processor
|
||||||
|
else:
|
||||||
|
from . import alerts_processor as alerts_processor
|
||||||
|
|
@ -7,8 +7,8 @@ from decouple import config
|
||||||
|
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core import notifications, webhook
|
from chalicelib.core import notifications, webhook
|
||||||
from chalicelib.core.collaboration_msteams import MSTeams
|
from chalicelib.core.collaborations.collaboration_msteams import MSTeams
|
||||||
from chalicelib.core.collaboration_slack import Slack
|
from chalicelib.core.collaborations.collaboration_slack import Slack
|
||||||
from chalicelib.utils import pg_client, helper, email_helper, smtp
|
from chalicelib.utils import pg_client, helper, email_helper, smtp
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
|
|
||||||
|
|
@ -1,9 +1,10 @@
|
||||||
|
from chalicelib.core.alerts.modules import TENANT_ID
|
||||||
from chalicelib.utils import pg_client, helper
|
from chalicelib.utils import pg_client, helper
|
||||||
|
|
||||||
|
|
||||||
def get_all_alerts():
|
def get_all_alerts():
|
||||||
with pg_client.PostgresClient(long_query=True) as cur:
|
with pg_client.PostgresClient(long_query=True) as cur:
|
||||||
query = """SELECT tenant_id,
|
query = f"""SELECT {TENANT_ID} AS tenant_id,
|
||||||
alert_id,
|
alert_id,
|
||||||
projects.project_id,
|
projects.project_id,
|
||||||
projects.name AS project_name,
|
projects.name AS project_name,
|
||||||
|
|
@ -1,16 +1,16 @@
|
||||||
import decimal
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from pydantic_core._pydantic_core import ValidationError
|
from pydantic_core._pydantic_core import ValidationError
|
||||||
|
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core import alerts
|
from chalicelib.core.alerts import alerts, alerts_listener
|
||||||
from chalicelib.core import alerts_listener
|
from chalicelib.core.alerts.modules import alert_helpers
|
||||||
from chalicelib.core import sessions
|
from chalicelib.core.sessions import sessions_pg as sessions
|
||||||
from chalicelib.utils import pg_client
|
from chalicelib.utils import pg_client
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
LeftToDb = {
|
LeftToDb = {
|
||||||
schemas.AlertColumn.PERFORMANCE__DOM_CONTENT_LOADED__AVERAGE: {
|
schemas.AlertColumn.PERFORMANCE__DOM_CONTENT_LOADED__AVERAGE: {
|
||||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
|
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
|
||||||
|
|
@ -46,35 +46,6 @@ LeftToDb = {
|
||||||
"formula": "COUNT(DISTINCT session_id)", "condition": "source!='js_exception'", "joinSessions": False},
|
"formula": "COUNT(DISTINCT session_id)", "condition": "source!='js_exception'", "joinSessions": False},
|
||||||
}
|
}
|
||||||
|
|
||||||
# This is the frequency of execution for each threshold
|
|
||||||
TimeInterval = {
|
|
||||||
15: 3,
|
|
||||||
30: 5,
|
|
||||||
60: 10,
|
|
||||||
120: 20,
|
|
||||||
240: 30,
|
|
||||||
1440: 60,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def can_check(a) -> bool:
|
|
||||||
now = TimeUTC.now()
|
|
||||||
|
|
||||||
repetitionBase = a["options"]["currentPeriod"] \
|
|
||||||
if a["detectionMethod"] == schemas.AlertDetectionMethod.CHANGE \
|
|
||||||
and a["options"]["currentPeriod"] > a["options"]["previousPeriod"] \
|
|
||||||
else a["options"]["previousPeriod"]
|
|
||||||
|
|
||||||
if TimeInterval.get(repetitionBase) is None:
|
|
||||||
logger.error(f"repetitionBase: {repetitionBase} NOT FOUND")
|
|
||||||
return False
|
|
||||||
|
|
||||||
return (a["options"]["renotifyInterval"] <= 0 or
|
|
||||||
a["options"].get("lastNotification") is None or
|
|
||||||
a["options"]["lastNotification"] <= 0 or
|
|
||||||
((now - a["options"]["lastNotification"]) > a["options"]["renotifyInterval"] * 60 * 1000)) \
|
|
||||||
and ((now - a["createdAt"]) % (TimeInterval[repetitionBase] * 60 * 1000)) < 60 * 1000
|
|
||||||
|
|
||||||
|
|
||||||
def Build(a):
|
def Build(a):
|
||||||
now = TimeUTC.now()
|
now = TimeUTC.now()
|
||||||
|
|
@ -161,11 +132,12 @@ def Build(a):
|
||||||
|
|
||||||
|
|
||||||
def process():
|
def process():
|
||||||
|
logger.info("> processing alerts on PG")
|
||||||
notifications = []
|
notifications = []
|
||||||
all_alerts = alerts_listener.get_all_alerts()
|
all_alerts = alerts_listener.get_all_alerts()
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
for alert in all_alerts:
|
for alert in all_alerts:
|
||||||
if can_check(alert):
|
if alert_helpers.can_check(alert):
|
||||||
query, params = Build(alert)
|
query, params = Build(alert)
|
||||||
try:
|
try:
|
||||||
query = cur.mogrify(query, params)
|
query = cur.mogrify(query, params)
|
||||||
|
|
@ -181,7 +153,7 @@ def process():
|
||||||
result = cur.fetchone()
|
result = cur.fetchone()
|
||||||
if result["valid"]:
|
if result["valid"]:
|
||||||
logger.info(f"Valid alert, notifying users, alertId:{alert['alertId']} name: {alert['name']}")
|
logger.info(f"Valid alert, notifying users, alertId:{alert['alertId']} name: {alert['name']}")
|
||||||
notifications.append(generate_notification(alert, result))
|
notifications.append(alert_helpers.generate_notification(alert, result))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(
|
logger.error(
|
||||||
f"!!!Error while running alert query for alertId:{alert['alertId']} name: {alert['name']}")
|
f"!!!Error while running alert query for alertId:{alert['alertId']} name: {alert['name']}")
|
||||||
|
|
@ -195,42 +167,3 @@ def process():
|
||||||
WHERE alert_id IN %(ids)s;""", {"ids": tuple([n["alertId"] for n in notifications])}))
|
WHERE alert_id IN %(ids)s;""", {"ids": tuple([n["alertId"] for n in notifications])}))
|
||||||
if len(notifications) > 0:
|
if len(notifications) > 0:
|
||||||
alerts.process_notifications(notifications)
|
alerts.process_notifications(notifications)
|
||||||
|
|
||||||
|
|
||||||
def __format_value(x):
|
|
||||||
if x % 1 == 0:
|
|
||||||
x = int(x)
|
|
||||||
else:
|
|
||||||
x = round(x, 2)
|
|
||||||
return f"{x:,}"
|
|
||||||
|
|
||||||
|
|
||||||
def generate_notification(alert, result):
|
|
||||||
left = __format_value(result['value'])
|
|
||||||
right = __format_value(alert['query']['right'])
|
|
||||||
return {
|
|
||||||
"alertId": alert["alertId"],
|
|
||||||
"tenantId": alert["tenantId"],
|
|
||||||
"title": alert["name"],
|
|
||||||
"description": f"{alert['seriesName']} = {left} ({alert['query']['operator']} {right}).",
|
|
||||||
"buttonText": "Check metrics for more details",
|
|
||||||
"buttonUrl": f"/{alert['projectId']}/metrics",
|
|
||||||
"imageUrl": None,
|
|
||||||
"projectId": alert["projectId"],
|
|
||||||
"projectName": alert["projectName"],
|
|
||||||
"options": {"source": "ALERT", "sourceId": alert["alertId"],
|
|
||||||
"sourceMeta": alert["detectionMethod"],
|
|
||||||
"message": alert["options"]["message"], "projectId": alert["projectId"],
|
|
||||||
"data": {"title": alert["name"],
|
|
||||||
"limitValue": alert["query"]["right"],
|
|
||||||
"actualValue": float(result["value"]) \
|
|
||||||
if isinstance(result["value"], decimal.Decimal) \
|
|
||||||
else result["value"],
|
|
||||||
"operator": alert["query"]["operator"],
|
|
||||||
"trigger": alert["query"]["left"],
|
|
||||||
"alertId": alert["alertId"],
|
|
||||||
"detectionMethod": alert["detectionMethod"],
|
|
||||||
"currentPeriod": alert["options"]["currentPeriod"],
|
|
||||||
"previousPeriod": alert["options"]["previousPeriod"],
|
|
||||||
"createdAt": TimeUTC.now()}},
|
|
||||||
}
|
|
||||||
|
|
@ -3,11 +3,11 @@ import logging
|
||||||
from pydantic_core._pydantic_core import ValidationError
|
from pydantic_core._pydantic_core import ValidationError
|
||||||
|
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core import alerts
|
|
||||||
from chalicelib.core import alerts_listener, alerts_processor
|
|
||||||
from chalicelib.core import sessions_exp as sessions
|
|
||||||
from chalicelib.utils import pg_client, ch_client, exp_ch_helper
|
from chalicelib.utils import pg_client, ch_client, exp_ch_helper
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
|
from chalicelib.core.alerts import alerts, alerts_listener
|
||||||
|
from chalicelib.core.alerts.modules import alert_helpers
|
||||||
|
from chalicelib.core.sessions import sessions_ch as sessions
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
@ -156,16 +156,17 @@ def Build(a):
|
||||||
|
|
||||||
|
|
||||||
def process():
|
def process():
|
||||||
|
logger.info("> processing alerts on CH")
|
||||||
notifications = []
|
notifications = []
|
||||||
all_alerts = alerts_listener.get_all_alerts()
|
all_alerts = alerts_listener.get_all_alerts()
|
||||||
with pg_client.PostgresClient() as cur, ch_client.ClickHouseClient() as ch_cur:
|
with pg_client.PostgresClient() as cur, ch_client.ClickHouseClient() as ch_cur:
|
||||||
for alert in all_alerts:
|
for alert in all_alerts:
|
||||||
if alert["query"]["left"] != "CUSTOM":
|
if alert["query"]["left"] != "CUSTOM":
|
||||||
continue
|
continue
|
||||||
if alerts_processor.can_check(alert):
|
if alert_helpers.can_check(alert):
|
||||||
query, params = Build(alert)
|
query, params = Build(alert)
|
||||||
try:
|
try:
|
||||||
query = ch_cur.format(query, params)
|
query = ch_cur.format(query=query, parameters=params)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(
|
logger.error(
|
||||||
f"!!!Error while building alert query for alertId:{alert['alertId']} name: {alert['name']}")
|
f"!!!Error while building alert query for alertId:{alert['alertId']} name: {alert['name']}")
|
||||||
|
|
@ -174,13 +175,13 @@ def process():
|
||||||
logger.debug(alert)
|
logger.debug(alert)
|
||||||
logger.debug(query)
|
logger.debug(query)
|
||||||
try:
|
try:
|
||||||
result = ch_cur.execute(query)
|
result = ch_cur.execute(query=query)
|
||||||
if len(result) > 0:
|
if len(result) > 0:
|
||||||
result = result[0]
|
result = result[0]
|
||||||
|
|
||||||
if result["valid"]:
|
if result["valid"]:
|
||||||
logger.info("Valid alert, notifying users")
|
logger.info("Valid alert, notifying users")
|
||||||
notifications.append(alerts_processor.generate_notification(alert, result))
|
notifications.append(alert_helpers.generate_notification(alert, result))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"!!!Error while running alert query for alertId:{alert['alertId']}")
|
logger.error(f"!!!Error while running alert query for alertId:{alert['alertId']}")
|
||||||
logger.error(str(e))
|
logger.error(str(e))
|
||||||
3
api/chalicelib/core/alerts/modules/__init__.py
Normal file
3
api/chalicelib/core/alerts/modules/__init__.py
Normal file
|
|
@ -0,0 +1,3 @@
|
||||||
|
TENANT_ID = "-1"
|
||||||
|
|
||||||
|
from . import helpers as alert_helpers
|
||||||
74
api/chalicelib/core/alerts/modules/helpers.py
Normal file
74
api/chalicelib/core/alerts/modules/helpers.py
Normal file
|
|
@ -0,0 +1,74 @@
|
||||||
|
import decimal
|
||||||
|
import logging
|
||||||
|
|
||||||
|
import schemas
|
||||||
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
# This is the frequency of execution for each threshold
|
||||||
|
TimeInterval = {
|
||||||
|
15: 3,
|
||||||
|
30: 5,
|
||||||
|
60: 10,
|
||||||
|
120: 20,
|
||||||
|
240: 30,
|
||||||
|
1440: 60,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def __format_value(x):
|
||||||
|
if x % 1 == 0:
|
||||||
|
x = int(x)
|
||||||
|
else:
|
||||||
|
x = round(x, 2)
|
||||||
|
return f"{x:,}"
|
||||||
|
|
||||||
|
|
||||||
|
def can_check(a) -> bool:
|
||||||
|
now = TimeUTC.now()
|
||||||
|
|
||||||
|
repetitionBase = a["options"]["currentPeriod"] \
|
||||||
|
if a["detectionMethod"] == schemas.AlertDetectionMethod.CHANGE \
|
||||||
|
and a["options"]["currentPeriod"] > a["options"]["previousPeriod"] \
|
||||||
|
else a["options"]["previousPeriod"]
|
||||||
|
|
||||||
|
if TimeInterval.get(repetitionBase) is None:
|
||||||
|
logger.error(f"repetitionBase: {repetitionBase} NOT FOUND")
|
||||||
|
return False
|
||||||
|
|
||||||
|
return (a["options"]["renotifyInterval"] <= 0 or
|
||||||
|
a["options"].get("lastNotification") is None or
|
||||||
|
a["options"]["lastNotification"] <= 0 or
|
||||||
|
((now - a["options"]["lastNotification"]) > a["options"]["renotifyInterval"] * 60 * 1000)) \
|
||||||
|
and ((now - a["createdAt"]) % (TimeInterval[repetitionBase] * 60 * 1000)) < 60 * 1000
|
||||||
|
|
||||||
|
|
||||||
|
def generate_notification(alert, result):
|
||||||
|
left = __format_value(result['value'])
|
||||||
|
right = __format_value(alert['query']['right'])
|
||||||
|
return {
|
||||||
|
"alertId": alert["alertId"],
|
||||||
|
"tenantId": alert["tenantId"],
|
||||||
|
"title": alert["name"],
|
||||||
|
"description": f"{alert['seriesName']} = {left} ({alert['query']['operator']} {right}).",
|
||||||
|
"buttonText": "Check metrics for more details",
|
||||||
|
"buttonUrl": f"/{alert['projectId']}/metrics",
|
||||||
|
"imageUrl": None,
|
||||||
|
"projectId": alert["projectId"],
|
||||||
|
"projectName": alert["projectName"],
|
||||||
|
"options": {"source": "ALERT", "sourceId": alert["alertId"],
|
||||||
|
"sourceMeta": alert["detectionMethod"],
|
||||||
|
"message": alert["options"]["message"], "projectId": alert["projectId"],
|
||||||
|
"data": {"title": alert["name"],
|
||||||
|
"limitValue": alert["query"]["right"],
|
||||||
|
"actualValue": float(result["value"]) \
|
||||||
|
if isinstance(result["value"], decimal.Decimal) \
|
||||||
|
else result["value"],
|
||||||
|
"operator": alert["query"]["operator"],
|
||||||
|
"trigger": alert["query"]["left"],
|
||||||
|
"alertId": alert["alertId"],
|
||||||
|
"detectionMethod": alert["detectionMethod"],
|
||||||
|
"currentPeriod": alert["options"]["currentPeriod"],
|
||||||
|
"previousPeriod": alert["options"]["previousPeriod"],
|
||||||
|
"createdAt": TimeUTC.now()}},
|
||||||
|
}
|
||||||
|
|
@ -1,32 +0,0 @@
|
||||||
from chalicelib.utils import pg_client, helper
|
|
||||||
|
|
||||||
|
|
||||||
def get_all_alerts():
|
|
||||||
with pg_client.PostgresClient(long_query=True) as cur:
|
|
||||||
query = """SELECT -1 AS tenant_id,
|
|
||||||
alert_id,
|
|
||||||
projects.project_id,
|
|
||||||
projects.name AS project_name,
|
|
||||||
detection_method,
|
|
||||||
query,
|
|
||||||
options,
|
|
||||||
(EXTRACT(EPOCH FROM alerts.created_at) * 1000)::BIGINT AS created_at,
|
|
||||||
alerts.name,
|
|
||||||
alerts.series_id,
|
|
||||||
filter,
|
|
||||||
change,
|
|
||||||
COALESCE(metrics.name || '.' || (COALESCE(metric_series.name, 'series ' || index)) || '.count',
|
|
||||||
query ->> 'left') AS series_name
|
|
||||||
FROM public.alerts
|
|
||||||
INNER JOIN projects USING (project_id)
|
|
||||||
LEFT JOIN metric_series USING (series_id)
|
|
||||||
LEFT JOIN metrics USING (metric_id)
|
|
||||||
WHERE alerts.deleted_at ISNULL
|
|
||||||
AND alerts.active
|
|
||||||
AND projects.active
|
|
||||||
AND projects.deleted_at ISNULL
|
|
||||||
AND (alerts.series_id ISNULL OR metric_series.deleted_at ISNULL)
|
|
||||||
ORDER BY alerts.created_at;"""
|
|
||||||
cur.execute(query=query)
|
|
||||||
all_alerts = helper.list_to_camel_case(cur.fetchall())
|
|
||||||
return all_alerts
|
|
||||||
|
|
@ -1,3 +1,4 @@
|
||||||
|
import logging
|
||||||
from os import access, R_OK
|
from os import access, R_OK
|
||||||
from os.path import exists as path_exists, getsize
|
from os.path import exists as path_exists, getsize
|
||||||
|
|
||||||
|
|
@ -10,6 +11,8 @@ import schemas
|
||||||
from chalicelib.core import projects
|
from chalicelib.core import projects
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
ASSIST_KEY = config("ASSIST_KEY")
|
ASSIST_KEY = config("ASSIST_KEY")
|
||||||
ASSIST_URL = config("ASSIST_URL") % ASSIST_KEY
|
ASSIST_URL = config("ASSIST_URL") % ASSIST_KEY
|
||||||
|
|
||||||
|
|
@ -52,21 +55,21 @@ def __get_live_sessions_ws(project_id, data):
|
||||||
results = requests.post(ASSIST_URL + config("assist") + f"/{project_key}",
|
results = requests.post(ASSIST_URL + config("assist") + f"/{project_key}",
|
||||||
json=data, timeout=config("assistTimeout", cast=int, default=5))
|
json=data, timeout=config("assistTimeout", cast=int, default=5))
|
||||||
if results.status_code != 200:
|
if results.status_code != 200:
|
||||||
print(f"!! issue with the peer-server code:{results.status_code} for __get_live_sessions_ws")
|
logger.error(f"!! issue with the peer-server code:{results.status_code} for __get_live_sessions_ws")
|
||||||
print(results.text)
|
logger.error(results.text)
|
||||||
return {"total": 0, "sessions": []}
|
return {"total": 0, "sessions": []}
|
||||||
live_peers = results.json().get("data", [])
|
live_peers = results.json().get("data", [])
|
||||||
except requests.exceptions.Timeout:
|
except requests.exceptions.Timeout:
|
||||||
print("!! Timeout getting Assist response")
|
logger.error("!! Timeout getting Assist response")
|
||||||
live_peers = {"total": 0, "sessions": []}
|
live_peers = {"total": 0, "sessions": []}
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print("!! Issue getting Live-Assist response")
|
logger.error("!! Issue getting Live-Assist response")
|
||||||
print(str(e))
|
logger.exception(e)
|
||||||
print("expected JSON, received:")
|
logger.error("expected JSON, received:")
|
||||||
try:
|
try:
|
||||||
print(results.text)
|
logger.error(results.text)
|
||||||
except:
|
except:
|
||||||
print("couldn't get response")
|
logger.error("couldn't get response")
|
||||||
live_peers = {"total": 0, "sessions": []}
|
live_peers = {"total": 0, "sessions": []}
|
||||||
_live_peers = live_peers
|
_live_peers = live_peers
|
||||||
if "sessions" in live_peers:
|
if "sessions" in live_peers:
|
||||||
|
|
@ -102,8 +105,8 @@ def get_live_session_by_id(project_id, session_id):
|
||||||
results = requests.get(ASSIST_URL + config("assist") + f"/{project_key}/{session_id}",
|
results = requests.get(ASSIST_URL + config("assist") + f"/{project_key}/{session_id}",
|
||||||
timeout=config("assistTimeout", cast=int, default=5))
|
timeout=config("assistTimeout", cast=int, default=5))
|
||||||
if results.status_code != 200:
|
if results.status_code != 200:
|
||||||
print(f"!! issue with the peer-server code:{results.status_code} for get_live_session_by_id")
|
logger.error(f"!! issue with the peer-server code:{results.status_code} for get_live_session_by_id")
|
||||||
print(results.text)
|
logger.error(results.text)
|
||||||
return None
|
return None
|
||||||
results = results.json().get("data")
|
results = results.json().get("data")
|
||||||
if results is None:
|
if results is None:
|
||||||
|
|
@ -111,16 +114,16 @@ def get_live_session_by_id(project_id, session_id):
|
||||||
results["live"] = True
|
results["live"] = True
|
||||||
results["agentToken"] = __get_agent_token(project_id=project_id, project_key=project_key, session_id=session_id)
|
results["agentToken"] = __get_agent_token(project_id=project_id, project_key=project_key, session_id=session_id)
|
||||||
except requests.exceptions.Timeout:
|
except requests.exceptions.Timeout:
|
||||||
print("!! Timeout getting Assist response")
|
logger.error("!! Timeout getting Assist response")
|
||||||
return None
|
return None
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print("!! Issue getting Assist response")
|
logger.error("!! Issue getting Assist response")
|
||||||
print(str(e))
|
logger.exception(e)
|
||||||
print("expected JSON, received:")
|
logger.error("expected JSON, received:")
|
||||||
try:
|
try:
|
||||||
print(results.text)
|
logger.error(results.text)
|
||||||
except:
|
except:
|
||||||
print("couldn't get response")
|
logger.error("couldn't get response")
|
||||||
return None
|
return None
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
@ -132,21 +135,21 @@ def is_live(project_id, session_id, project_key=None):
|
||||||
results = requests.get(ASSIST_URL + config("assistList") + f"/{project_key}/{session_id}",
|
results = requests.get(ASSIST_URL + config("assistList") + f"/{project_key}/{session_id}",
|
||||||
timeout=config("assistTimeout", cast=int, default=5))
|
timeout=config("assistTimeout", cast=int, default=5))
|
||||||
if results.status_code != 200:
|
if results.status_code != 200:
|
||||||
print(f"!! issue with the peer-server code:{results.status_code} for is_live")
|
logger.error(f"!! issue with the peer-server code:{results.status_code} for is_live")
|
||||||
print(results.text)
|
logger.error(results.text)
|
||||||
return False
|
return False
|
||||||
results = results.json().get("data")
|
results = results.json().get("data")
|
||||||
except requests.exceptions.Timeout:
|
except requests.exceptions.Timeout:
|
||||||
print("!! Timeout getting Assist response")
|
logger.error("!! Timeout getting Assist response")
|
||||||
return False
|
return False
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print("!! Issue getting Assist response")
|
logger.error("!! Issue getting Assist response")
|
||||||
print(str(e))
|
logger.exception(e)
|
||||||
print("expected JSON, received:")
|
logger.error("expected JSON, received:")
|
||||||
try:
|
try:
|
||||||
print(results.text)
|
logger.error(results.text)
|
||||||
except:
|
except:
|
||||||
print("couldn't get response")
|
logger.error("couldn't get response")
|
||||||
return False
|
return False
|
||||||
return str(session_id) == results
|
return str(session_id) == results
|
||||||
|
|
||||||
|
|
@ -161,21 +164,21 @@ def autocomplete(project_id, q: str, key: str = None):
|
||||||
ASSIST_URL + config("assistList") + f"/{project_key}/autocomplete",
|
ASSIST_URL + config("assistList") + f"/{project_key}/autocomplete",
|
||||||
params=params, timeout=config("assistTimeout", cast=int, default=5))
|
params=params, timeout=config("assistTimeout", cast=int, default=5))
|
||||||
if results.status_code != 200:
|
if results.status_code != 200:
|
||||||
print(f"!! issue with the peer-server code:{results.status_code} for autocomplete")
|
logger.error(f"!! issue with the peer-server code:{results.status_code} for autocomplete")
|
||||||
print(results.text)
|
logger.error(results.text)
|
||||||
return {"errors": [f"Something went wrong wile calling assist:{results.text}"]}
|
return {"errors": [f"Something went wrong wile calling assist:{results.text}"]}
|
||||||
results = results.json().get("data", [])
|
results = results.json().get("data", [])
|
||||||
except requests.exceptions.Timeout:
|
except requests.exceptions.Timeout:
|
||||||
print("!! Timeout getting Assist response")
|
logger.error("!! Timeout getting Assist response")
|
||||||
return {"errors": ["Assist request timeout"]}
|
return {"errors": ["Assist request timeout"]}
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print("!! Issue getting Assist response")
|
logger.error("!! Issue getting Assist response")
|
||||||
print(str(e))
|
logger.exception(e)
|
||||||
print("expected JSON, received:")
|
logger.error("expected JSON, received:")
|
||||||
try:
|
try:
|
||||||
print(results.text)
|
logger.error(results.text)
|
||||||
except:
|
except:
|
||||||
print("couldn't get response")
|
logger.error("couldn't get response")
|
||||||
return {"errors": ["Something went wrong wile calling assist"]}
|
return {"errors": ["Something went wrong wile calling assist"]}
|
||||||
for r in results:
|
for r in results:
|
||||||
r["type"] = __change_keys(r["type"])
|
r["type"] = __change_keys(r["type"])
|
||||||
|
|
@ -239,24 +242,24 @@ def session_exists(project_id, session_id):
|
||||||
results = requests.get(ASSIST_URL + config("assist") + f"/{project_key}/{session_id}",
|
results = requests.get(ASSIST_URL + config("assist") + f"/{project_key}/{session_id}",
|
||||||
timeout=config("assistTimeout", cast=int, default=5))
|
timeout=config("assistTimeout", cast=int, default=5))
|
||||||
if results.status_code != 200:
|
if results.status_code != 200:
|
||||||
print(f"!! issue with the peer-server code:{results.status_code} for session_exists")
|
logger.error(f"!! issue with the peer-server code:{results.status_code} for session_exists")
|
||||||
print(results.text)
|
logger.error(results.text)
|
||||||
return None
|
return None
|
||||||
results = results.json().get("data")
|
results = results.json().get("data")
|
||||||
if results is None:
|
if results is None:
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
except requests.exceptions.Timeout:
|
except requests.exceptions.Timeout:
|
||||||
print("!! Timeout getting Assist response")
|
logger.error("!! Timeout getting Assist response")
|
||||||
return False
|
return False
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print("!! Issue getting Assist response")
|
logger.error("!! Issue getting Assist response")
|
||||||
print(str(e))
|
logger.exception(e)
|
||||||
print("expected JSON, received:")
|
logger.error("expected JSON, received:")
|
||||||
try:
|
try:
|
||||||
print(results.text)
|
logger.error(results.text)
|
||||||
except:
|
except:
|
||||||
print("couldn't get response")
|
logger.error("couldn't get response")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -37,8 +37,7 @@ def jwt_authorizer(scheme: str, token: str, leeway=0) -> dict | None:
|
||||||
logger.debug("! JWT Expired signature")
|
logger.debug("! JWT Expired signature")
|
||||||
return None
|
return None
|
||||||
except BaseException as e:
|
except BaseException as e:
|
||||||
logger.warning("! JWT Base Exception")
|
logger.warning("! JWT Base Exception", exc_info=e)
|
||||||
logger.debug(e)
|
|
||||||
return None
|
return None
|
||||||
return payload
|
return payload
|
||||||
|
|
||||||
|
|
@ -56,8 +55,7 @@ def jwt_refresh_authorizer(scheme: str, token: str):
|
||||||
logger.debug("! JWT-refresh Expired signature")
|
logger.debug("! JWT-refresh Expired signature")
|
||||||
return None
|
return None
|
||||||
except BaseException as e:
|
except BaseException as e:
|
||||||
logger.warning("! JWT-refresh Base Exception")
|
logger.error("! JWT-refresh Base Exception", exc_info=e)
|
||||||
logger.debug(e)
|
|
||||||
return None
|
return None
|
||||||
return payload
|
return payload
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -61,11 +61,11 @@ def __get_autocomplete_table(value, project_id):
|
||||||
try:
|
try:
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
print("--------- AUTOCOMPLETE SEARCH QUERY EXCEPTION -----------")
|
logger.exception("--------- AUTOCOMPLETE SEARCH QUERY EXCEPTION -----------")
|
||||||
print(query.decode('UTF-8'))
|
logger.exception(query.decode('UTF-8'))
|
||||||
print("--------- VALUE -----------")
|
logger.exception("--------- VALUE -----------")
|
||||||
print(value)
|
logger.exception(value)
|
||||||
print("--------------------")
|
logger.exception("--------------------")
|
||||||
raise err
|
raise err
|
||||||
results = cur.fetchall()
|
results = cur.fetchall()
|
||||||
for r in results:
|
for r in results:
|
||||||
|
|
@ -85,7 +85,8 @@ def __generic_query(typename, value_length=None):
|
||||||
ORDER BY value"""
|
ORDER BY value"""
|
||||||
|
|
||||||
if value_length is None or value_length > 2:
|
if value_length is None or value_length > 2:
|
||||||
return f"""(SELECT DISTINCT value, type
|
return f"""SELECT DISTINCT ON(value,type) value, type
|
||||||
|
((SELECT DISTINCT value, type
|
||||||
FROM {TABLE}
|
FROM {TABLE}
|
||||||
WHERE
|
WHERE
|
||||||
project_id = %(project_id)s
|
project_id = %(project_id)s
|
||||||
|
|
@ -101,7 +102,7 @@ def __generic_query(typename, value_length=None):
|
||||||
AND type='{typename.upper()}'
|
AND type='{typename.upper()}'
|
||||||
AND value ILIKE %(value)s
|
AND value ILIKE %(value)s
|
||||||
ORDER BY value
|
ORDER BY value
|
||||||
LIMIT 5);"""
|
LIMIT 5)) AS raw;"""
|
||||||
return f"""SELECT DISTINCT value, type
|
return f"""SELECT DISTINCT value, type
|
||||||
FROM {TABLE}
|
FROM {TABLE}
|
||||||
WHERE
|
WHERE
|
||||||
|
|
@ -124,7 +125,7 @@ def __generic_autocomplete(event: Event):
|
||||||
return f
|
return f
|
||||||
|
|
||||||
|
|
||||||
def __generic_autocomplete_metas(typename):
|
def generic_autocomplete_metas(typename):
|
||||||
def f(project_id, text):
|
def f(project_id, text):
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
params = {"project_id": project_id, "value": helper.string_to_sql_like(text),
|
params = {"project_id": project_id, "value": helper.string_to_sql_like(text),
|
||||||
|
|
@ -326,7 +327,7 @@ def __search_metadata(project_id, value, key=None, source=None):
|
||||||
AND {colname} ILIKE %(svalue)s LIMIT 5)""")
|
AND {colname} ILIKE %(svalue)s LIMIT 5)""")
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
cur.execute(cur.mogrify(f"""\
|
cur.execute(cur.mogrify(f"""\
|
||||||
SELECT key, value, 'METADATA' AS TYPE
|
SELECT DISTINCT ON(key, value) key, value, 'METADATA' AS TYPE
|
||||||
FROM({" UNION ALL ".join(sub_from)}) AS all_metas
|
FROM({" UNION ALL ".join(sub_from)}) AS all_metas
|
||||||
LIMIT 5;""", {"project_id": project_id, "value": helper.string_to_sql_like(value),
|
LIMIT 5;""", {"project_id": project_id, "value": helper.string_to_sql_like(value),
|
||||||
"svalue": helper.string_to_sql_like("^" + value)}))
|
"svalue": helper.string_to_sql_like("^" + value)}))
|
||||||
|
|
@ -1,7 +1,8 @@
|
||||||
from chalicelib.utils import pg_client
|
from chalicelib.core import projects
|
||||||
from chalicelib.core import projects, log_tool_datadog, log_tool_stackdriver, log_tool_sentry
|
|
||||||
|
|
||||||
from chalicelib.core import users
|
from chalicelib.core import users
|
||||||
|
from chalicelib.core.log_tools import datadog, stackdriver, sentry
|
||||||
|
from chalicelib.core.modules import TENANT_CONDITION
|
||||||
|
from chalicelib.utils import pg_client
|
||||||
|
|
||||||
|
|
||||||
def get_state(tenant_id):
|
def get_state(tenant_id):
|
||||||
|
|
@ -12,47 +13,61 @@ def get_state(tenant_id):
|
||||||
|
|
||||||
if len(pids) > 0:
|
if len(pids) > 0:
|
||||||
cur.execute(
|
cur.execute(
|
||||||
cur.mogrify("""SELECT EXISTS(( SELECT 1
|
cur.mogrify(
|
||||||
|
"""SELECT EXISTS(( SELECT 1
|
||||||
FROM public.sessions AS s
|
FROM public.sessions AS s
|
||||||
WHERE s.project_id IN %(ids)s)) AS exists;""",
|
WHERE s.project_id IN %(ids)s)) AS exists;""",
|
||||||
{"ids": tuple(pids)})
|
{"ids": tuple(pids)},
|
||||||
|
)
|
||||||
)
|
)
|
||||||
recorded = cur.fetchone()["exists"]
|
recorded = cur.fetchone()["exists"]
|
||||||
meta = False
|
meta = False
|
||||||
if recorded:
|
if recorded:
|
||||||
cur.execute("""SELECT EXISTS((SELECT 1
|
query = cur.mogrify(
|
||||||
|
f"""SELECT EXISTS((SELECT 1
|
||||||
FROM public.projects AS p
|
FROM public.projects AS p
|
||||||
LEFT JOIN LATERAL ( SELECT 1
|
LEFT JOIN LATERAL ( SELECT 1
|
||||||
FROM public.sessions
|
FROM public.sessions
|
||||||
WHERE sessions.project_id = p.project_id
|
WHERE sessions.project_id = p.project_id
|
||||||
AND sessions.user_id IS NOT NULL
|
AND sessions.user_id IS NOT NULL
|
||||||
LIMIT 1) AS sessions(user_id) ON (TRUE)
|
LIMIT 1) AS sessions(user_id) ON (TRUE)
|
||||||
WHERE p.deleted_at ISNULL
|
WHERE {TENANT_CONDITION} AND p.deleted_at ISNULL
|
||||||
AND ( sessions.user_id IS NOT NULL OR p.metadata_1 IS NOT NULL
|
AND ( sessions.user_id IS NOT NULL OR p.metadata_1 IS NOT NULL
|
||||||
OR p.metadata_2 IS NOT NULL OR p.metadata_3 IS NOT NULL
|
OR p.metadata_2 IS NOT NULL OR p.metadata_3 IS NOT NULL
|
||||||
OR p.metadata_4 IS NOT NULL OR p.metadata_5 IS NOT NULL
|
OR p.metadata_4 IS NOT NULL OR p.metadata_5 IS NOT NULL
|
||||||
OR p.metadata_6 IS NOT NULL OR p.metadata_7 IS NOT NULL
|
OR p.metadata_6 IS NOT NULL OR p.metadata_7 IS NOT NULL
|
||||||
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
|
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
|
||||||
OR p.metadata_10 IS NOT NULL )
|
OR p.metadata_10 IS NOT NULL )
|
||||||
)) AS exists;""")
|
)) AS exists;""",
|
||||||
|
{"tenant_id": tenant_id},
|
||||||
|
)
|
||||||
|
cur.execute(query)
|
||||||
|
|
||||||
meta = cur.fetchone()["exists"]
|
meta = cur.fetchone()["exists"]
|
||||||
|
|
||||||
return [
|
return [
|
||||||
{"task": "Install OpenReplay",
|
{
|
||||||
"done": recorded,
|
"task": "Install OpenReplay",
|
||||||
"URL": "https://docs.openreplay.com/getting-started/quick-start"},
|
"done": recorded,
|
||||||
{"task": "Identify Users",
|
"URL": "https://docs.openreplay.com/getting-started/quick-start",
|
||||||
"done": meta,
|
},
|
||||||
"URL": "https://docs.openreplay.com/data-privacy-security/metadata"},
|
{
|
||||||
{"task": "Invite Team Members",
|
"task": "Identify Users",
|
||||||
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
|
"done": meta,
|
||||||
"URL": "https://app.openreplay.com/client/manage-users"},
|
"URL": "https://docs.openreplay.com/data-privacy-security/metadata",
|
||||||
{"task": "Integrations",
|
},
|
||||||
"done": len(log_tool_datadog.get_all(tenant_id=tenant_id)) > 0 \
|
{
|
||||||
or len(log_tool_sentry.get_all(tenant_id=tenant_id)) > 0 \
|
"task": "Invite Team Members",
|
||||||
or len(log_tool_stackdriver.get_all(tenant_id=tenant_id)) > 0,
|
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
|
||||||
"URL": "https://docs.openreplay.com/integrations"}
|
"URL": "https://app.openreplay.com/client/manage-users",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"task": "Integrations",
|
||||||
|
"done": len(datadog.get_all(tenant_id=tenant_id)) > 0
|
||||||
|
or len(sentry.get_all(tenant_id=tenant_id)) > 0
|
||||||
|
or len(stackdriver.get_all(tenant_id=tenant_id)) > 0,
|
||||||
|
"URL": "https://docs.openreplay.com/integrations",
|
||||||
|
},
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -63,52 +78,66 @@ def get_state_installing(tenant_id):
|
||||||
|
|
||||||
if len(pids) > 0:
|
if len(pids) > 0:
|
||||||
cur.execute(
|
cur.execute(
|
||||||
cur.mogrify("""SELECT EXISTS(( SELECT 1
|
cur.mogrify(
|
||||||
|
"""SELECT EXISTS(( SELECT 1
|
||||||
FROM public.sessions AS s
|
FROM public.sessions AS s
|
||||||
WHERE s.project_id IN %(ids)s)) AS exists;""",
|
WHERE s.project_id IN %(ids)s)) AS exists;""",
|
||||||
{"ids": tuple(pids)})
|
{"ids": tuple(pids)},
|
||||||
|
)
|
||||||
)
|
)
|
||||||
recorded = cur.fetchone()["exists"]
|
recorded = cur.fetchone()["exists"]
|
||||||
|
|
||||||
return {"task": "Install OpenReplay",
|
return {
|
||||||
"done": recorded,
|
"task": "Install OpenReplay",
|
||||||
"URL": "https://docs.openreplay.com/getting-started/quick-start"}
|
"done": recorded,
|
||||||
|
"URL": "https://docs.openreplay.com/getting-started/quick-start",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def get_state_identify_users(tenant_id):
|
def get_state_identify_users(tenant_id):
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
cur.execute("""SELECT EXISTS((SELECT 1
|
query = cur.mogrify(
|
||||||
|
f"""SELECT EXISTS((SELECT 1
|
||||||
FROM public.projects AS p
|
FROM public.projects AS p
|
||||||
LEFT JOIN LATERAL ( SELECT 1
|
LEFT JOIN LATERAL ( SELECT 1
|
||||||
FROM public.sessions
|
FROM public.sessions
|
||||||
WHERE sessions.project_id = p.project_id
|
WHERE sessions.project_id = p.project_id
|
||||||
AND sessions.user_id IS NOT NULL
|
AND sessions.user_id IS NOT NULL
|
||||||
LIMIT 1) AS sessions(user_id) ON (TRUE)
|
LIMIT 1) AS sessions(user_id) ON (TRUE)
|
||||||
WHERE p.deleted_at ISNULL
|
WHERE {TENANT_CONDITION} AND p.deleted_at ISNULL
|
||||||
AND ( sessions.user_id IS NOT NULL OR p.metadata_1 IS NOT NULL
|
AND ( sessions.user_id IS NOT NULL OR p.metadata_1 IS NOT NULL
|
||||||
OR p.metadata_2 IS NOT NULL OR p.metadata_3 IS NOT NULL
|
OR p.metadata_2 IS NOT NULL OR p.metadata_3 IS NOT NULL
|
||||||
OR p.metadata_4 IS NOT NULL OR p.metadata_5 IS NOT NULL
|
OR p.metadata_4 IS NOT NULL OR p.metadata_5 IS NOT NULL
|
||||||
OR p.metadata_6 IS NOT NULL OR p.metadata_7 IS NOT NULL
|
OR p.metadata_6 IS NOT NULL OR p.metadata_7 IS NOT NULL
|
||||||
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
|
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
|
||||||
OR p.metadata_10 IS NOT NULL )
|
OR p.metadata_10 IS NOT NULL )
|
||||||
)) AS exists;""")
|
)) AS exists;""",
|
||||||
|
{"tenant_id": tenant_id},
|
||||||
|
)
|
||||||
|
cur.execute(query)
|
||||||
|
|
||||||
meta = cur.fetchone()["exists"]
|
meta = cur.fetchone()["exists"]
|
||||||
|
|
||||||
return {"task": "Identify Users",
|
return {
|
||||||
"done": meta,
|
"task": "Identify Users",
|
||||||
"URL": "https://docs.openreplay.com/data-privacy-security/metadata"}
|
"done": meta,
|
||||||
|
"URL": "https://docs.openreplay.com/data-privacy-security/metadata",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def get_state_manage_users(tenant_id):
|
def get_state_manage_users(tenant_id):
|
||||||
return {"task": "Invite Team Members",
|
return {
|
||||||
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
|
"task": "Invite Team Members",
|
||||||
"URL": "https://app.openreplay.com/client/manage-users"}
|
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
|
||||||
|
"URL": "https://app.openreplay.com/client/manage-users",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def get_state_integrations(tenant_id):
|
def get_state_integrations(tenant_id):
|
||||||
return {"task": "Integrations",
|
return {
|
||||||
"done": len(log_tool_datadog.get_all(tenant_id=tenant_id)) > 0 \
|
"task": "Integrations",
|
||||||
or len(log_tool_sentry.get_all(tenant_id=tenant_id)) > 0 \
|
"done": len(datadog.get_all(tenant_id=tenant_id)) > 0
|
||||||
or len(log_tool_stackdriver.get_all(tenant_id=tenant_id)) > 0,
|
or len(sentry.get_all(tenant_id=tenant_id)) > 0
|
||||||
"URL": "https://docs.openreplay.com/integrations"}
|
or len(stackdriver.get_all(tenant_id=tenant_id)) > 0,
|
||||||
|
"URL": "https://docs.openreplay.com/integrations",
|
||||||
|
}
|
||||||
|
|
|
||||||
1
api/chalicelib/core/collaborations/__init__.py
Normal file
1
api/chalicelib/core/collaborations/__init__.py
Normal file
|
|
@ -0,0 +1 @@
|
||||||
|
from . import collaboration_base as _
|
||||||
|
|
@ -6,7 +6,7 @@ from fastapi import HTTPException, status
|
||||||
|
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core import webhook
|
from chalicelib.core import webhook
|
||||||
from chalicelib.core.collaboration_base import BaseCollaboration
|
from chalicelib.core.collaborations.collaboration_base import BaseCollaboration
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
@ -6,7 +6,7 @@ from fastapi import HTTPException, status
|
||||||
|
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core import webhook
|
from chalicelib.core import webhook
|
||||||
from chalicelib.core.collaboration_base import BaseCollaboration
|
from chalicelib.core.collaborations.collaboration_base import BaseCollaboration
|
||||||
|
|
||||||
|
|
||||||
class Slack(BaseCollaboration):
|
class Slack(BaseCollaboration):
|
||||||
|
|
@ -1,653 +0,0 @@
|
||||||
import json
|
|
||||||
import logging
|
|
||||||
|
|
||||||
from fastapi import HTTPException, status
|
|
||||||
|
|
||||||
import schemas
|
|
||||||
from chalicelib.core import sessions, funnels, errors, issues, heatmaps, product_analytics, \
|
|
||||||
custom_metrics_predefined
|
|
||||||
from chalicelib.utils import helper, pg_client
|
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
# TODO: refactor this to split
|
|
||||||
# timeseries /
|
|
||||||
# table of errors / table of issues / table of browsers / table of devices / table of countries / table of URLs
|
|
||||||
# remove "table of" calls from this function
|
|
||||||
def __try_live(project_id, data: schemas.CardSchema):
|
|
||||||
results = []
|
|
||||||
for i, s in enumerate(data.series):
|
|
||||||
results.append(sessions.search2_series(data=s.filter, project_id=project_id, density=data.density,
|
|
||||||
view_type=data.view_type, metric_type=data.metric_type,
|
|
||||||
metric_of=data.metric_of, metric_value=data.metric_value))
|
|
||||||
|
|
||||||
return results
|
|
||||||
|
|
||||||
|
|
||||||
def __get_table_of_series(project_id, data: schemas.CardSchema):
|
|
||||||
results = []
|
|
||||||
for i, s in enumerate(data.series):
|
|
||||||
results.append(sessions.search2_table(data=s.filter, project_id=project_id, density=data.density,
|
|
||||||
metric_of=data.metric_of, metric_value=data.metric_value,
|
|
||||||
metric_format=data.metric_format))
|
|
||||||
|
|
||||||
return results
|
|
||||||
|
|
||||||
|
|
||||||
def __get_funnel_chart(project: schemas.ProjectContext, data: schemas.CardFunnel, user_id: int = None):
|
|
||||||
if len(data.series) == 0:
|
|
||||||
return {
|
|
||||||
"stages": [],
|
|
||||||
"totalDropDueToIssues": 0
|
|
||||||
}
|
|
||||||
|
|
||||||
# return funnels.get_top_insights_on_the_fly_widget(project_id=project_id,
|
|
||||||
# data=data.series[0].filter,
|
|
||||||
# metric_format=data.metric_format)
|
|
||||||
return funnels.get_simple_funnel(project=project,
|
|
||||||
data=data.series[0].filter,
|
|
||||||
metric_format=data.metric_format)
|
|
||||||
|
|
||||||
|
|
||||||
def __get_errors_list(project: schemas.ProjectContext, user_id, data: schemas.CardSchema):
|
|
||||||
if len(data.series) == 0:
|
|
||||||
return {
|
|
||||||
"total": 0,
|
|
||||||
"errors": []
|
|
||||||
}
|
|
||||||
return errors.search(data.series[0].filter, project_id=project.project_id, user_id=user_id)
|
|
||||||
|
|
||||||
|
|
||||||
def __get_sessions_list(project: schemas.ProjectContext, user_id, data: schemas.CardSchema):
|
|
||||||
if len(data.series) == 0:
|
|
||||||
logger.debug("empty series")
|
|
||||||
return {
|
|
||||||
"total": 0,
|
|
||||||
"sessions": []
|
|
||||||
}
|
|
||||||
return sessions.search_sessions(data=data.series[0].filter, project_id=project.project_id, user_id=user_id)
|
|
||||||
|
|
||||||
|
|
||||||
def __get_heat_map_chart(project: schemas.ProjectContext, user_id, data: schemas.CardHeatMap,
|
|
||||||
include_mobs: bool = True):
|
|
||||||
if len(data.series) == 0:
|
|
||||||
return None
|
|
||||||
data.series[0].filter.filters += data.series[0].filter.events
|
|
||||||
data.series[0].filter.events = []
|
|
||||||
return heatmaps.search_short_session(project_id=project.project_id, user_id=user_id,
|
|
||||||
data=schemas.HeatMapSessionsSearch(
|
|
||||||
**data.series[0].filter.model_dump()),
|
|
||||||
include_mobs=include_mobs)
|
|
||||||
|
|
||||||
|
|
||||||
def __get_path_analysis_chart(project: schemas.ProjectContext, user_id: int, data: schemas.CardPathAnalysis):
|
|
||||||
if len(data.series) == 0:
|
|
||||||
data.series.append(
|
|
||||||
schemas.CardPathAnalysisSeriesSchema(startTimestamp=data.startTimestamp, endTimestamp=data.endTimestamp))
|
|
||||||
elif not isinstance(data.series[0].filter, schemas.PathAnalysisSchema):
|
|
||||||
data.series[0].filter = schemas.PathAnalysisSchema()
|
|
||||||
|
|
||||||
return product_analytics.path_analysis(project_id=project.project_id, data=data)
|
|
||||||
|
|
||||||
|
|
||||||
def __get_timeseries_chart(project: schemas.ProjectContext, data: schemas.CardTimeSeries, user_id: int = None):
|
|
||||||
series_charts = __try_live(project_id=project.project_id, data=data)
|
|
||||||
results = [{}] * len(series_charts[0])
|
|
||||||
for i in range(len(results)):
|
|
||||||
for j, series_chart in enumerate(series_charts):
|
|
||||||
results[i] = {**results[i], "timestamp": series_chart[i]["timestamp"],
|
|
||||||
data.series[j].name if data.series[j].name else j + 1: series_chart[i]["count"]}
|
|
||||||
return results
|
|
||||||
|
|
||||||
|
|
||||||
def not_supported(**args):
|
|
||||||
raise Exception("not supported")
|
|
||||||
|
|
||||||
|
|
||||||
def __get_table_of_user_ids(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
|
|
||||||
return __get_table_of_series(project_id=project.project_id, data=data)
|
|
||||||
|
|
||||||
|
|
||||||
def __get_table_of_sessions(project: schemas.ProjectContext, data: schemas.CardTable, user_id):
|
|
||||||
return __get_sessions_list(project=project, user_id=user_id, data=data)
|
|
||||||
|
|
||||||
|
|
||||||
def __get_table_of_errors(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int):
|
|
||||||
return __get_errors_list(project=project, user_id=user_id, data=data)
|
|
||||||
|
|
||||||
|
|
||||||
def __get_table_of_issues(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
|
|
||||||
return __get_table_of_series(project_id=project.project_id, data=data)
|
|
||||||
|
|
||||||
|
|
||||||
def __get_table_of_browsers(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
|
|
||||||
return __get_table_of_series(project_id=project.project_id, data=data)
|
|
||||||
|
|
||||||
|
|
||||||
def __get_table_of_devises(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
|
|
||||||
return __get_table_of_series(project_id=project.project_id, data=data)
|
|
||||||
|
|
||||||
|
|
||||||
def __get_table_of_countries(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
|
|
||||||
return __get_table_of_series(project_id=project.project_id, data=data)
|
|
||||||
|
|
||||||
|
|
||||||
def __get_table_of_urls(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
|
|
||||||
return __get_table_of_series(project_id=project.project_id, data=data)
|
|
||||||
|
|
||||||
|
|
||||||
def __get_table_of_referrers(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
|
|
||||||
return __get_table_of_series(project_id=project.project_id, data=data)
|
|
||||||
|
|
||||||
|
|
||||||
def __get_table_of_requests(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
|
|
||||||
return __get_table_of_series(project_id=project.project_id, data=data)
|
|
||||||
|
|
||||||
|
|
||||||
def __get_table_chart(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int):
|
|
||||||
supported = {
|
|
||||||
schemas.MetricOfTable.SESSIONS: __get_table_of_sessions,
|
|
||||||
schemas.MetricOfTable.ERRORS: __get_table_of_errors,
|
|
||||||
schemas.MetricOfTable.USER_ID: __get_table_of_user_ids,
|
|
||||||
schemas.MetricOfTable.ISSUES: __get_table_of_issues,
|
|
||||||
schemas.MetricOfTable.USER_BROWSER: __get_table_of_browsers,
|
|
||||||
schemas.MetricOfTable.USER_DEVICE: __get_table_of_devises,
|
|
||||||
schemas.MetricOfTable.USER_COUNTRY: __get_table_of_countries,
|
|
||||||
schemas.MetricOfTable.VISITED_URL: __get_table_of_urls,
|
|
||||||
schemas.MetricOfTable.REFERRER: __get_table_of_referrers,
|
|
||||||
schemas.MetricOfTable.FETCH: __get_table_of_requests
|
|
||||||
}
|
|
||||||
return supported.get(data.metric_of, not_supported)(project=project, data=data, user_id=user_id)
|
|
||||||
|
|
||||||
|
|
||||||
def get_chart(project: schemas.ProjectContext, data: schemas.CardSchema, user_id: int):
|
|
||||||
if data.is_predefined:
|
|
||||||
return custom_metrics_predefined.get_metric(key=data.metric_of,
|
|
||||||
project_id=project.project_id,
|
|
||||||
data=data.model_dump())
|
|
||||||
|
|
||||||
supported = {
|
|
||||||
schemas.MetricType.TIMESERIES: __get_timeseries_chart,
|
|
||||||
schemas.MetricType.TABLE: __get_table_chart,
|
|
||||||
schemas.MetricType.HEAT_MAP: __get_heat_map_chart,
|
|
||||||
schemas.MetricType.FUNNEL: __get_funnel_chart,
|
|
||||||
schemas.MetricType.INSIGHTS: not_supported,
|
|
||||||
schemas.MetricType.PATH_ANALYSIS: __get_path_analysis_chart
|
|
||||||
}
|
|
||||||
return supported.get(data.metric_type, not_supported)(project=project, data=data, user_id=user_id)
|
|
||||||
|
|
||||||
|
|
||||||
def get_sessions_by_card_id(project_id, user_id, metric_id, data: schemas.CardSessionsSchema):
|
|
||||||
# No need for this because UI is sending the full payload
|
|
||||||
# card: dict = get_card(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
|
|
||||||
# if card is None:
|
|
||||||
# return None
|
|
||||||
# metric: schemas.CardSchema = schemas.CardSchema(**card)
|
|
||||||
# metric: schemas.CardSchema = __merge_metric_with_data(metric=metric, data=data)
|
|
||||||
if not card_exists(metric_id=metric_id, project_id=project_id, user_id=user_id):
|
|
||||||
return None
|
|
||||||
results = []
|
|
||||||
for s in data.series:
|
|
||||||
results.append({"seriesId": s.series_id, "seriesName": s.name,
|
|
||||||
**sessions.search_sessions(data=s.filter, project_id=project_id, user_id=user_id)})
|
|
||||||
|
|
||||||
return results
|
|
||||||
|
|
||||||
|
|
||||||
def get_sessions(project_id, user_id, data: schemas.CardSessionsSchema):
|
|
||||||
results = []
|
|
||||||
if len(data.series) == 0:
|
|
||||||
return results
|
|
||||||
for s in data.series:
|
|
||||||
if len(data.filters) > 0:
|
|
||||||
s.filter.filters += data.filters
|
|
||||||
s.filter = schemas.SessionsSearchPayloadSchema(**s.filter.model_dump(by_alias=True))
|
|
||||||
|
|
||||||
results.append({"seriesId": None, "seriesName": s.name,
|
|
||||||
**sessions.search_sessions(data=s.filter, project_id=project_id, user_id=user_id)})
|
|
||||||
|
|
||||||
return results
|
|
||||||
|
|
||||||
|
|
||||||
def get_issues(project: schemas.ProjectContext, user_id: int, data: schemas.CardSchema):
|
|
||||||
if data.is_predefined:
|
|
||||||
return not_supported()
|
|
||||||
if data.metric_of == schemas.MetricOfTable.ISSUES:
|
|
||||||
return __get_table_of_issues(project=project, user_id=user_id, data=data)
|
|
||||||
supported = {
|
|
||||||
schemas.MetricType.TIMESERIES: not_supported,
|
|
||||||
schemas.MetricType.TABLE: not_supported,
|
|
||||||
schemas.MetricType.HEAT_MAP: not_supported,
|
|
||||||
schemas.MetricType.INSIGHTS: not_supported,
|
|
||||||
schemas.MetricType.PATH_ANALYSIS: not_supported,
|
|
||||||
}
|
|
||||||
return supported.get(data.metric_type, not_supported)()
|
|
||||||
|
|
||||||
|
|
||||||
def __get_path_analysis_card_info(data: schemas.CardPathAnalysis):
|
|
||||||
r = {"start_point": [s.model_dump() for s in data.start_point],
|
|
||||||
"start_type": data.start_type,
|
|
||||||
"excludes": [e.model_dump() for e in data.excludes],
|
|
||||||
"hideExcess": data.hide_excess}
|
|
||||||
return r
|
|
||||||
|
|
||||||
|
|
||||||
def create_card(project: schemas.ProjectContext, user_id, data: schemas.CardSchema, dashboard=False):
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
session_data = None
|
|
||||||
if data.metric_type == schemas.MetricType.HEAT_MAP:
|
|
||||||
if data.session_id is not None:
|
|
||||||
session_data = {"sessionId": data.session_id}
|
|
||||||
else:
|
|
||||||
session_data = __get_heat_map_chart(project=project, user_id=user_id,
|
|
||||||
data=data, include_mobs=False)
|
|
||||||
if session_data is not None:
|
|
||||||
session_data = {"sessionId": session_data["sessionId"]}
|
|
||||||
|
|
||||||
_data = {"session_data": json.dumps(session_data) if session_data is not None else None}
|
|
||||||
for i, s in enumerate(data.series):
|
|
||||||
for k in s.model_dump().keys():
|
|
||||||
_data[f"{k}_{i}"] = s.__getattribute__(k)
|
|
||||||
_data[f"index_{i}"] = i
|
|
||||||
_data[f"filter_{i}"] = s.filter.json()
|
|
||||||
series_len = len(data.series)
|
|
||||||
params = {"user_id": user_id, "project_id": project.project_id, **data.model_dump(), **_data,
|
|
||||||
"default_config": json.dumps(data.default_config.model_dump()), "card_info": None}
|
|
||||||
if data.metric_type == schemas.MetricType.PATH_ANALYSIS:
|
|
||||||
params["card_info"] = json.dumps(__get_path_analysis_card_info(data=data))
|
|
||||||
|
|
||||||
query = """INSERT INTO metrics (project_id, user_id, name, is_public,
|
|
||||||
view_type, metric_type, metric_of, metric_value,
|
|
||||||
metric_format, default_config, thumbnail, data,
|
|
||||||
card_info)
|
|
||||||
VALUES (%(project_id)s, %(user_id)s, %(name)s, %(is_public)s,
|
|
||||||
%(view_type)s, %(metric_type)s, %(metric_of)s, %(metric_value)s,
|
|
||||||
%(metric_format)s, %(default_config)s, %(thumbnail)s, %(session_data)s,
|
|
||||||
%(card_info)s)
|
|
||||||
RETURNING metric_id"""
|
|
||||||
if len(data.series) > 0:
|
|
||||||
query = f"""WITH m AS ({query})
|
|
||||||
INSERT INTO metric_series(metric_id, index, name, filter)
|
|
||||||
VALUES {",".join([f"((SELECT metric_id FROM m), %(index_{i})s, %(name_{i})s, %(filter_{i})s::jsonb)"
|
|
||||||
for i in range(series_len)])}
|
|
||||||
RETURNING metric_id;"""
|
|
||||||
|
|
||||||
query = cur.mogrify(query, params)
|
|
||||||
cur.execute(query)
|
|
||||||
r = cur.fetchone()
|
|
||||||
if dashboard:
|
|
||||||
return r["metric_id"]
|
|
||||||
return {"data": get_card(metric_id=r["metric_id"], project_id=project.project_id, user_id=user_id)}
|
|
||||||
|
|
||||||
|
|
||||||
def update_card(metric_id, user_id, project_id, data: schemas.CardSchema):
|
|
||||||
metric: dict = get_card(metric_id=metric_id, project_id=project_id,
|
|
||||||
user_id=user_id, flatten=False, include_data=True)
|
|
||||||
if metric is None:
|
|
||||||
return None
|
|
||||||
series_ids = [r["seriesId"] for r in metric["series"]]
|
|
||||||
n_series = []
|
|
||||||
d_series_ids = []
|
|
||||||
u_series = []
|
|
||||||
u_series_ids = []
|
|
||||||
params = {"metric_id": metric_id, "is_public": data.is_public, "name": data.name,
|
|
||||||
"user_id": user_id, "project_id": project_id, "view_type": data.view_type,
|
|
||||||
"metric_type": data.metric_type, "metric_of": data.metric_of,
|
|
||||||
"metric_value": data.metric_value, "metric_format": data.metric_format,
|
|
||||||
"config": json.dumps(data.default_config.model_dump()), "thumbnail": data.thumbnail}
|
|
||||||
for i, s in enumerate(data.series):
|
|
||||||
prefix = "u_"
|
|
||||||
if s.index is None:
|
|
||||||
s.index = i
|
|
||||||
if s.series_id is None or s.series_id not in series_ids:
|
|
||||||
n_series.append({"i": i, "s": s})
|
|
||||||
prefix = "n_"
|
|
||||||
else:
|
|
||||||
u_series.append({"i": i, "s": s})
|
|
||||||
u_series_ids.append(s.series_id)
|
|
||||||
ns = s.model_dump()
|
|
||||||
for k in ns.keys():
|
|
||||||
if k == "filter":
|
|
||||||
ns[k] = json.dumps(ns[k])
|
|
||||||
params[f"{prefix}{k}_{i}"] = ns[k]
|
|
||||||
for i in series_ids:
|
|
||||||
if i not in u_series_ids:
|
|
||||||
d_series_ids.append(i)
|
|
||||||
params["d_series_ids"] = tuple(d_series_ids)
|
|
||||||
params["card_info"] = None
|
|
||||||
params["session_data"] = json.dumps(metric["data"])
|
|
||||||
if data.metric_type == schemas.MetricType.PATH_ANALYSIS:
|
|
||||||
params["card_info"] = json.dumps(__get_path_analysis_card_info(data=data))
|
|
||||||
elif data.metric_type == schemas.MetricType.HEAT_MAP:
|
|
||||||
if data.session_id is not None:
|
|
||||||
params["session_data"] = json.dumps({"sessionId": data.session_id})
|
|
||||||
elif metric.get("data") and metric["data"].get("sessionId"):
|
|
||||||
params["session_data"] = json.dumps({"sessionId": metric["data"]["sessionId"]})
|
|
||||||
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
sub_queries = []
|
|
||||||
if len(n_series) > 0:
|
|
||||||
sub_queries.append(f"""\
|
|
||||||
n AS (INSERT INTO metric_series (metric_id, index, name, filter)
|
|
||||||
VALUES {",".join([f"(%(metric_id)s, %(n_index_{s['i']})s, %(n_name_{s['i']})s, %(n_filter_{s['i']})s::jsonb)"
|
|
||||||
for s in n_series])}
|
|
||||||
RETURNING 1)""")
|
|
||||||
if len(u_series) > 0:
|
|
||||||
sub_queries.append(f"""\
|
|
||||||
u AS (UPDATE metric_series
|
|
||||||
SET name=series.name,
|
|
||||||
filter=series.filter,
|
|
||||||
index=series.index
|
|
||||||
FROM (VALUES {",".join([f"(%(u_series_id_{s['i']})s,%(u_index_{s['i']})s,%(u_name_{s['i']})s,%(u_filter_{s['i']})s::jsonb)"
|
|
||||||
for s in u_series])}) AS series(series_id, index, name, filter)
|
|
||||||
WHERE metric_series.metric_id =%(metric_id)s AND metric_series.series_id=series.series_id
|
|
||||||
RETURNING 1)""")
|
|
||||||
if len(d_series_ids) > 0:
|
|
||||||
sub_queries.append("""\
|
|
||||||
d AS (DELETE FROM metric_series WHERE metric_id =%(metric_id)s AND series_id IN %(d_series_ids)s
|
|
||||||
RETURNING 1)""")
|
|
||||||
query = cur.mogrify(f"""\
|
|
||||||
{"WITH " if len(sub_queries) > 0 else ""}{",".join(sub_queries)}
|
|
||||||
UPDATE metrics
|
|
||||||
SET name = %(name)s, is_public= %(is_public)s,
|
|
||||||
view_type= %(view_type)s, metric_type= %(metric_type)s,
|
|
||||||
metric_of= %(metric_of)s, metric_value= %(metric_value)s,
|
|
||||||
metric_format= %(metric_format)s,
|
|
||||||
edited_at = timezone('utc'::text, now()),
|
|
||||||
default_config = %(config)s,
|
|
||||||
thumbnail = %(thumbnail)s,
|
|
||||||
card_info = %(card_info)s,
|
|
||||||
data = %(session_data)s
|
|
||||||
WHERE metric_id = %(metric_id)s
|
|
||||||
AND project_id = %(project_id)s
|
|
||||||
AND (user_id = %(user_id)s OR is_public)
|
|
||||||
RETURNING metric_id;""", params)
|
|
||||||
cur.execute(query)
|
|
||||||
return get_card(metric_id=metric_id, project_id=project_id, user_id=user_id)
|
|
||||||
|
|
||||||
|
|
||||||
def search_all(project_id, user_id, data: schemas.SearchCardsSchema, include_series=False):
|
|
||||||
constraints = ["metrics.project_id = %(project_id)s",
|
|
||||||
"metrics.deleted_at ISNULL"]
|
|
||||||
params = {"project_id": project_id, "user_id": user_id,
|
|
||||||
"offset": (data.page - 1) * data.limit,
|
|
||||||
"limit": data.limit, }
|
|
||||||
if data.mine_only:
|
|
||||||
constraints.append("user_id = %(user_id)s")
|
|
||||||
else:
|
|
||||||
constraints.append("(user_id = %(user_id)s OR metrics.is_public)")
|
|
||||||
if data.shared_only:
|
|
||||||
constraints.append("is_public")
|
|
||||||
|
|
||||||
if data.query is not None and len(data.query) > 0:
|
|
||||||
constraints.append("(name ILIKE %(query)s OR owner.owner_email ILIKE %(query)s)")
|
|
||||||
params["query"] = helper.values_for_operator(value=data.query,
|
|
||||||
op=schemas.SearchEventOperator.CONTAINS)
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
sub_join = ""
|
|
||||||
if include_series:
|
|
||||||
sub_join = """LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(metric_series.* ORDER BY index),'[]'::jsonb) AS series
|
|
||||||
FROM metric_series
|
|
||||||
WHERE metric_series.metric_id = metrics.metric_id
|
|
||||||
AND metric_series.deleted_at ISNULL
|
|
||||||
) AS metric_series ON (TRUE)"""
|
|
||||||
query = cur.mogrify(
|
|
||||||
f"""SELECT metric_id, project_id, user_id, name, is_public, created_at, edited_at,
|
|
||||||
metric_type, metric_of, metric_format, metric_value, view_type, is_pinned,
|
|
||||||
dashboards, owner_email, owner_name, default_config AS config, thumbnail
|
|
||||||
FROM metrics
|
|
||||||
{sub_join}
|
|
||||||
LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(connected_dashboards.* ORDER BY is_public,name),'[]'::jsonb) AS dashboards
|
|
||||||
FROM (SELECT DISTINCT dashboard_id, name, is_public
|
|
||||||
FROM dashboards INNER JOIN dashboard_widgets USING (dashboard_id)
|
|
||||||
WHERE deleted_at ISNULL
|
|
||||||
AND dashboard_widgets.metric_id = metrics.metric_id
|
|
||||||
AND project_id = %(project_id)s
|
|
||||||
AND ((dashboards.user_id = %(user_id)s OR is_public))) AS connected_dashboards
|
|
||||||
) AS connected_dashboards ON (TRUE)
|
|
||||||
LEFT JOIN LATERAL (SELECT email AS owner_email, name AS owner_name
|
|
||||||
FROM users
|
|
||||||
WHERE deleted_at ISNULL
|
|
||||||
AND users.user_id = metrics.user_id
|
|
||||||
) AS owner ON (TRUE)
|
|
||||||
WHERE {" AND ".join(constraints)}
|
|
||||||
ORDER BY created_at {data.order.value}
|
|
||||||
LIMIT %(limit)s OFFSET %(offset)s;""", params)
|
|
||||||
logger.debug("---------")
|
|
||||||
logger.debug(query)
|
|
||||||
logger.debug("---------")
|
|
||||||
cur.execute(query)
|
|
||||||
rows = cur.fetchall()
|
|
||||||
if include_series:
|
|
||||||
for r in rows:
|
|
||||||
for s in r["series"]:
|
|
||||||
s["filter"] = helper.old_search_payload_to_flat(s["filter"])
|
|
||||||
else:
|
|
||||||
for r in rows:
|
|
||||||
r["created_at"] = TimeUTC.datetime_to_timestamp(r["created_at"])
|
|
||||||
r["edited_at"] = TimeUTC.datetime_to_timestamp(r["edited_at"])
|
|
||||||
rows = helper.list_to_camel_case(rows)
|
|
||||||
return rows
|
|
||||||
|
|
||||||
|
|
||||||
def get_all(project_id, user_id):
|
|
||||||
default_search = schemas.SearchCardsSchema()
|
|
||||||
rows = search_all(project_id=project_id, user_id=user_id, data=default_search)
|
|
||||||
result = rows
|
|
||||||
while len(rows) == default_search.limit:
|
|
||||||
default_search.page += 1
|
|
||||||
rows = search_all(project_id=project_id, user_id=user_id, data=default_search)
|
|
||||||
result += rows
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def delete_card(project_id, metric_id, user_id):
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
cur.execute(
|
|
||||||
cur.mogrify("""\
|
|
||||||
UPDATE public.metrics
|
|
||||||
SET deleted_at = timezone('utc'::text, now()), edited_at = timezone('utc'::text, now())
|
|
||||||
WHERE project_id = %(project_id)s
|
|
||||||
AND metric_id = %(metric_id)s
|
|
||||||
AND (user_id = %(user_id)s OR is_public)
|
|
||||||
RETURNING data;""",
|
|
||||||
{"metric_id": metric_id, "project_id": project_id, "user_id": user_id})
|
|
||||||
)
|
|
||||||
|
|
||||||
return {"state": "success"}
|
|
||||||
|
|
||||||
|
|
||||||
def __get_path_analysis_attributes(row):
|
|
||||||
card_info = row.pop("cardInfo")
|
|
||||||
row["excludes"] = card_info.get("excludes", [])
|
|
||||||
row["startPoint"] = card_info.get("startPoint", [])
|
|
||||||
row["startType"] = card_info.get("startType", "start")
|
|
||||||
row["hideExcess"] = card_info.get("hideExcess", False)
|
|
||||||
return row
|
|
||||||
|
|
||||||
|
|
||||||
def get_card(metric_id, project_id, user_id, flatten: bool = True, include_data: bool = False):
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
query = cur.mogrify(
|
|
||||||
f"""SELECT metric_id, project_id, user_id, name, is_public, created_at, deleted_at, edited_at, metric_type,
|
|
||||||
view_type, metric_of, metric_value, metric_format, is_pinned, default_config,
|
|
||||||
default_config AS config,series, dashboards, owner_email, card_info
|
|
||||||
{',data' if include_data else ''}
|
|
||||||
FROM metrics
|
|
||||||
LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(metric_series.* ORDER BY index),'[]'::jsonb) AS series
|
|
||||||
FROM metric_series
|
|
||||||
WHERE metric_series.metric_id = metrics.metric_id
|
|
||||||
AND metric_series.deleted_at ISNULL
|
|
||||||
) AS metric_series ON (TRUE)
|
|
||||||
LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(connected_dashboards.* ORDER BY is_public,name),'[]'::jsonb) AS dashboards
|
|
||||||
FROM (SELECT dashboard_id, name, is_public
|
|
||||||
FROM dashboards INNER JOIN dashboard_widgets USING (dashboard_id)
|
|
||||||
WHERE deleted_at ISNULL
|
|
||||||
AND project_id = %(project_id)s
|
|
||||||
AND ((dashboards.user_id = %(user_id)s OR is_public))
|
|
||||||
AND metric_id = %(metric_id)s) AS connected_dashboards
|
|
||||||
) AS connected_dashboards ON (TRUE)
|
|
||||||
LEFT JOIN LATERAL (SELECT email AS owner_email
|
|
||||||
FROM users
|
|
||||||
WHERE deleted_at ISNULL
|
|
||||||
AND users.user_id = metrics.user_id
|
|
||||||
) AS owner ON (TRUE)
|
|
||||||
WHERE metrics.project_id = %(project_id)s
|
|
||||||
AND metrics.deleted_at ISNULL
|
|
||||||
AND (metrics.user_id = %(user_id)s OR metrics.is_public)
|
|
||||||
AND metrics.metric_id = %(metric_id)s
|
|
||||||
ORDER BY created_at;""",
|
|
||||||
{"metric_id": metric_id, "project_id": project_id, "user_id": user_id}
|
|
||||||
)
|
|
||||||
cur.execute(query)
|
|
||||||
row = cur.fetchone()
|
|
||||||
if row is None:
|
|
||||||
return None
|
|
||||||
row["created_at"] = TimeUTC.datetime_to_timestamp(row["created_at"])
|
|
||||||
row["edited_at"] = TimeUTC.datetime_to_timestamp(row["edited_at"])
|
|
||||||
if flatten:
|
|
||||||
for s in row["series"]:
|
|
||||||
s["filter"] = helper.old_search_payload_to_flat(s["filter"])
|
|
||||||
row = helper.dict_to_camel_case(row)
|
|
||||||
if row["metricType"] == schemas.MetricType.PATH_ANALYSIS:
|
|
||||||
row = __get_path_analysis_attributes(row=row)
|
|
||||||
return row
|
|
||||||
|
|
||||||
|
|
||||||
def get_series_for_alert(project_id, user_id):
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
cur.execute(
|
|
||||||
cur.mogrify(
|
|
||||||
"""SELECT series_id AS value,
|
|
||||||
metrics.name || '.' || (COALESCE(metric_series.name, 'series ' || index)) || '.count' AS name,
|
|
||||||
'count' AS unit,
|
|
||||||
FALSE AS predefined,
|
|
||||||
metric_id,
|
|
||||||
series_id
|
|
||||||
FROM metric_series
|
|
||||||
INNER JOIN metrics USING (metric_id)
|
|
||||||
WHERE metrics.deleted_at ISNULL
|
|
||||||
AND metrics.project_id = %(project_id)s
|
|
||||||
AND metrics.metric_type = 'timeseries'
|
|
||||||
AND (user_id = %(user_id)s OR is_public)
|
|
||||||
ORDER BY name;""",
|
|
||||||
{"project_id": project_id, "user_id": user_id}
|
|
||||||
)
|
|
||||||
)
|
|
||||||
rows = cur.fetchall()
|
|
||||||
return helper.list_to_camel_case(rows)
|
|
||||||
|
|
||||||
|
|
||||||
def change_state(project_id, metric_id, user_id, status):
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
cur.execute(
|
|
||||||
cur.mogrify("""\
|
|
||||||
UPDATE public.metrics
|
|
||||||
SET active = %(status)s
|
|
||||||
WHERE metric_id = %(metric_id)s
|
|
||||||
AND (user_id = %(user_id)s OR is_public);""",
|
|
||||||
{"metric_id": metric_id, "status": status, "user_id": user_id})
|
|
||||||
)
|
|
||||||
return get_card(metric_id=metric_id, project_id=project_id, user_id=user_id)
|
|
||||||
|
|
||||||
|
|
||||||
def get_funnel_sessions_by_issue(user_id, project_id, metric_id, issue_id,
|
|
||||||
data: schemas.CardSessionsSchema
|
|
||||||
# , range_value=None, start_date=None, end_date=None
|
|
||||||
):
|
|
||||||
# No need for this because UI is sending the full payload
|
|
||||||
# card: dict = get_card(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
|
|
||||||
# if card is None:
|
|
||||||
# return None
|
|
||||||
# metric: schemas.CardSchema = schemas.CardSchema(**card)
|
|
||||||
# metric: schemas.CardSchema = __merge_metric_with_data(metric=metric, data=data)
|
|
||||||
# if metric is None:
|
|
||||||
# return None
|
|
||||||
if not card_exists(metric_id=metric_id, project_id=project_id, user_id=user_id):
|
|
||||||
return None
|
|
||||||
for s in data.series:
|
|
||||||
s.filter.startTimestamp = data.startTimestamp
|
|
||||||
s.filter.endTimestamp = data.endTimestamp
|
|
||||||
s.filter.limit = data.limit
|
|
||||||
s.filter.page = data.page
|
|
||||||
issues_list = funnels.get_issues_on_the_fly_widget(project_id=project_id, data=s.filter).get("issues", {})
|
|
||||||
issues_list = issues_list.get("significant", []) + issues_list.get("insignificant", [])
|
|
||||||
issue = None
|
|
||||||
for i in issues_list:
|
|
||||||
if i.get("issueId", "") == issue_id:
|
|
||||||
issue = i
|
|
||||||
break
|
|
||||||
if issue is None:
|
|
||||||
issue = issues.get(project_id=project_id, issue_id=issue_id)
|
|
||||||
if issue is not None:
|
|
||||||
issue = {**issue,
|
|
||||||
"affectedSessions": 0,
|
|
||||||
"affectedUsers": 0,
|
|
||||||
"conversionImpact": 0,
|
|
||||||
"lostConversions": 0,
|
|
||||||
"unaffectedSessions": 0}
|
|
||||||
return {"seriesId": s.series_id, "seriesName": s.name,
|
|
||||||
"sessions": sessions.search_sessions(user_id=user_id, project_id=project_id,
|
|
||||||
issue=issue, data=s.filter)
|
|
||||||
if issue is not None else {"total": 0, "sessions": []},
|
|
||||||
"issue": issue}
|
|
||||||
|
|
||||||
|
|
||||||
def make_chart_from_card(project: schemas.ProjectContext, user_id, metric_id, data: schemas.CardSessionsSchema):
|
|
||||||
raw_metric: dict = get_card(metric_id=metric_id, project_id=project.project_id, user_id=user_id, include_data=True)
|
|
||||||
|
|
||||||
if raw_metric is None:
|
|
||||||
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="card not found")
|
|
||||||
raw_metric["startTimestamp"] = data.startTimestamp
|
|
||||||
raw_metric["endTimestamp"] = data.endTimestamp
|
|
||||||
raw_metric["limit"] = data.limit
|
|
||||||
raw_metric["density"] = data.density
|
|
||||||
metric: schemas.CardSchema = schemas.CardSchema(**raw_metric)
|
|
||||||
|
|
||||||
if metric.is_predefined:
|
|
||||||
return custom_metrics_predefined.get_metric(key=metric.metric_of,
|
|
||||||
project_id=project.project_id,
|
|
||||||
data=data.model_dump())
|
|
||||||
elif metric.metric_type == schemas.MetricType.HEAT_MAP:
|
|
||||||
if raw_metric["data"] and raw_metric["data"].get("sessionId"):
|
|
||||||
return heatmaps.get_selected_session(project_id=project.project_id,
|
|
||||||
session_id=raw_metric["data"]["sessionId"])
|
|
||||||
else:
|
|
||||||
return heatmaps.search_short_session(project_id=project.project_id,
|
|
||||||
data=schemas.HeatMapSessionsSearch(**metric.model_dump()),
|
|
||||||
user_id=user_id)
|
|
||||||
|
|
||||||
return get_chart(project=project, data=metric, user_id=user_id)
|
|
||||||
|
|
||||||
|
|
||||||
def card_exists(metric_id, project_id, user_id) -> bool:
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
query = cur.mogrify(
|
|
||||||
f"""SELECT 1
|
|
||||||
FROM metrics
|
|
||||||
LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(connected_dashboards.* ORDER BY is_public,name),'[]'::jsonb) AS dashboards
|
|
||||||
FROM (SELECT dashboard_id, name, is_public
|
|
||||||
FROM dashboards INNER JOIN dashboard_widgets USING (dashboard_id)
|
|
||||||
WHERE deleted_at ISNULL
|
|
||||||
AND project_id = %(project_id)s
|
|
||||||
AND ((dashboards.user_id = %(user_id)s OR is_public))
|
|
||||||
AND metric_id = %(metric_id)s) AS connected_dashboards
|
|
||||||
) AS connected_dashboards ON (TRUE)
|
|
||||||
LEFT JOIN LATERAL (SELECT email AS owner_email
|
|
||||||
FROM users
|
|
||||||
WHERE deleted_at ISNULL
|
|
||||||
AND users.user_id = metrics.user_id
|
|
||||||
) AS owner ON (TRUE)
|
|
||||||
WHERE metrics.project_id = %(project_id)s
|
|
||||||
AND metrics.deleted_at ISNULL
|
|
||||||
AND (metrics.user_id = %(user_id)s OR metrics.is_public)
|
|
||||||
AND metrics.metric_id = %(metric_id)s
|
|
||||||
ORDER BY created_at;""",
|
|
||||||
{"metric_id": metric_id, "project_id": project_id, "user_id": user_id}
|
|
||||||
)
|
|
||||||
cur.execute(query)
|
|
||||||
row = cur.fetchone()
|
|
||||||
return row is not None
|
|
||||||
|
|
@ -1,25 +0,0 @@
|
||||||
import logging
|
|
||||||
from typing import Union
|
|
||||||
|
|
||||||
import schemas
|
|
||||||
from chalicelib.core import metrics
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def get_metric(key: Union[schemas.MetricOfWebVitals, schemas.MetricOfErrors], project_id: int, data: dict):
|
|
||||||
supported = {
|
|
||||||
schemas.MetricOfWebVitals.COUNT_SESSIONS: metrics.get_processed_sessions,
|
|
||||||
schemas.MetricOfWebVitals.AVG_VISITED_PAGES: metrics.get_user_activity_avg_visited_pages,
|
|
||||||
schemas.MetricOfWebVitals.COUNT_REQUESTS: metrics.get_top_metrics_count_requests,
|
|
||||||
schemas.MetricOfErrors.IMPACTED_SESSIONS_BY_JS_ERRORS: metrics.get_impacted_sessions_by_js_errors,
|
|
||||||
schemas.MetricOfErrors.DOMAINS_ERRORS_4XX: metrics.get_domains_errors_4xx,
|
|
||||||
schemas.MetricOfErrors.DOMAINS_ERRORS_5XX: metrics.get_domains_errors_5xx,
|
|
||||||
schemas.MetricOfErrors.ERRORS_PER_DOMAINS: metrics.get_errors_per_domains,
|
|
||||||
schemas.MetricOfErrors.ERRORS_PER_TYPE: metrics.get_errors_per_type,
|
|
||||||
schemas.MetricOfErrors.RESOURCES_BY_PARTY: metrics.get_resources_by_party,
|
|
||||||
schemas.MetricOfWebVitals.COUNT_USERS: metrics.get_unique_users,
|
|
||||||
schemas.MetricOfWebVitals.SPEED_LOCATION: metrics.get_speed_index_location,
|
|
||||||
}
|
|
||||||
|
|
||||||
return supported.get(key, lambda *args: None)(project_id=project_id, **data)
|
|
||||||
|
|
@ -1,602 +0,0 @@
|
||||||
import json
|
|
||||||
|
|
||||||
import schemas
|
|
||||||
from chalicelib.core import sourcemaps, sessions
|
|
||||||
from chalicelib.utils import errors_helper
|
|
||||||
from chalicelib.utils import pg_client, helper
|
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
|
||||||
from chalicelib.utils.metrics_helper import __get_step_size
|
|
||||||
|
|
||||||
|
|
||||||
def get(error_id, family=False):
|
|
||||||
if family:
|
|
||||||
return get_batch([error_id])
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
# trying: return only 1 error, without event details
|
|
||||||
query = cur.mogrify(
|
|
||||||
# "SELECT * FROM events.errors AS e INNER JOIN public.errors AS re USING(error_id) WHERE error_id = %(error_id)s;",
|
|
||||||
"SELECT * FROM public.errors WHERE error_id = %(error_id)s LIMIT 1;",
|
|
||||||
{"error_id": error_id})
|
|
||||||
cur.execute(query=query)
|
|
||||||
result = cur.fetchone()
|
|
||||||
if result is not None:
|
|
||||||
result["stacktrace_parsed_at"] = TimeUTC.datetime_to_timestamp(result["stacktrace_parsed_at"])
|
|
||||||
return helper.dict_to_camel_case(result)
|
|
||||||
|
|
||||||
|
|
||||||
def get_batch(error_ids):
|
|
||||||
if len(error_ids) == 0:
|
|
||||||
return []
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
query = cur.mogrify(
|
|
||||||
"""
|
|
||||||
WITH RECURSIVE error_family AS (
|
|
||||||
SELECT *
|
|
||||||
FROM public.errors
|
|
||||||
WHERE error_id IN %(error_ids)s
|
|
||||||
UNION
|
|
||||||
SELECT child_errors.*
|
|
||||||
FROM public.errors AS child_errors
|
|
||||||
INNER JOIN error_family ON error_family.error_id = child_errors.parent_error_id OR error_family.parent_error_id = child_errors.error_id
|
|
||||||
)
|
|
||||||
SELECT *
|
|
||||||
FROM error_family;""",
|
|
||||||
{"error_ids": tuple(error_ids)})
|
|
||||||
cur.execute(query=query)
|
|
||||||
errors = cur.fetchall()
|
|
||||||
for e in errors:
|
|
||||||
e["stacktrace_parsed_at"] = TimeUTC.datetime_to_timestamp(e["stacktrace_parsed_at"])
|
|
||||||
return helper.list_to_camel_case(errors)
|
|
||||||
|
|
||||||
|
|
||||||
def __flatten_sort_key_count_version(data, merge_nested=False):
|
|
||||||
if data is None:
|
|
||||||
return []
|
|
||||||
return sorted(
|
|
||||||
[
|
|
||||||
{
|
|
||||||
"name": f'{o["name"]}@{v["version"]}',
|
|
||||||
"count": v["count"]
|
|
||||||
} for o in data for v in o["partition"]
|
|
||||||
],
|
|
||||||
key=lambda o: o["count"], reverse=True) if merge_nested else \
|
|
||||||
[
|
|
||||||
{
|
|
||||||
"name": o["name"],
|
|
||||||
"count": o["count"],
|
|
||||||
} for o in data
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def __process_tags(row):
|
|
||||||
return [
|
|
||||||
{"name": "browser", "partitions": __flatten_sort_key_count_version(data=row.get("browsers_partition"))},
|
|
||||||
{"name": "browser.ver",
|
|
||||||
"partitions": __flatten_sort_key_count_version(data=row.pop("browsers_partition"), merge_nested=True)},
|
|
||||||
{"name": "OS", "partitions": __flatten_sort_key_count_version(data=row.get("os_partition"))},
|
|
||||||
{"name": "OS.ver",
|
|
||||||
"partitions": __flatten_sort_key_count_version(data=row.pop("os_partition"), merge_nested=True)},
|
|
||||||
{"name": "device.family", "partitions": __flatten_sort_key_count_version(data=row.get("device_partition"))},
|
|
||||||
{"name": "device",
|
|
||||||
"partitions": __flatten_sort_key_count_version(data=row.pop("device_partition"), merge_nested=True)},
|
|
||||||
{"name": "country", "partitions": row.pop("country_partition")}
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def get_details(project_id, error_id, user_id, **data):
|
|
||||||
pg_sub_query24 = __get_basic_constraints(time_constraint=False, chart=True, step_size_name="step_size24")
|
|
||||||
pg_sub_query24.append("error_id = %(error_id)s")
|
|
||||||
pg_sub_query30_session = __get_basic_constraints(time_constraint=True, chart=False,
|
|
||||||
startTime_arg_name="startDate30",
|
|
||||||
endTime_arg_name="endDate30", project_key="sessions.project_id")
|
|
||||||
pg_sub_query30_session.append("sessions.start_ts >= %(startDate30)s")
|
|
||||||
pg_sub_query30_session.append("sessions.start_ts <= %(endDate30)s")
|
|
||||||
pg_sub_query30_session.append("error_id = %(error_id)s")
|
|
||||||
pg_sub_query30_err = __get_basic_constraints(time_constraint=True, chart=False, startTime_arg_name="startDate30",
|
|
||||||
endTime_arg_name="endDate30", project_key="errors.project_id")
|
|
||||||
pg_sub_query30_err.append("sessions.project_id = %(project_id)s")
|
|
||||||
pg_sub_query30_err.append("sessions.start_ts >= %(startDate30)s")
|
|
||||||
pg_sub_query30_err.append("sessions.start_ts <= %(endDate30)s")
|
|
||||||
pg_sub_query30_err.append("error_id = %(error_id)s")
|
|
||||||
pg_sub_query30_err.append("source ='js_exception'")
|
|
||||||
pg_sub_query30 = __get_basic_constraints(time_constraint=False, chart=True, step_size_name="step_size30")
|
|
||||||
pg_sub_query30.append("error_id = %(error_id)s")
|
|
||||||
pg_basic_query = __get_basic_constraints(time_constraint=False)
|
|
||||||
pg_basic_query.append("error_id = %(error_id)s")
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
data["startDate24"] = TimeUTC.now(-1)
|
|
||||||
data["endDate24"] = TimeUTC.now()
|
|
||||||
data["startDate30"] = TimeUTC.now(-30)
|
|
||||||
data["endDate30"] = TimeUTC.now()
|
|
||||||
density24 = int(data.get("density24", 24))
|
|
||||||
step_size24 = __get_step_size(data["startDate24"], data["endDate24"], density24, factor=1)
|
|
||||||
density30 = int(data.get("density30", 30))
|
|
||||||
step_size30 = __get_step_size(data["startDate30"], data["endDate30"], density30, factor=1)
|
|
||||||
params = {
|
|
||||||
"startDate24": data['startDate24'],
|
|
||||||
"endDate24": data['endDate24'],
|
|
||||||
"startDate30": data['startDate30'],
|
|
||||||
"endDate30": data['endDate30'],
|
|
||||||
"project_id": project_id,
|
|
||||||
"userId": user_id,
|
|
||||||
"step_size24": step_size24,
|
|
||||||
"step_size30": step_size30,
|
|
||||||
"error_id": error_id}
|
|
||||||
|
|
||||||
main_pg_query = f"""\
|
|
||||||
SELECT error_id,
|
|
||||||
name,
|
|
||||||
message,
|
|
||||||
users,
|
|
||||||
sessions,
|
|
||||||
last_occurrence,
|
|
||||||
first_occurrence,
|
|
||||||
last_session_id,
|
|
||||||
browsers_partition,
|
|
||||||
os_partition,
|
|
||||||
device_partition,
|
|
||||||
country_partition,
|
|
||||||
chart24,
|
|
||||||
chart30,
|
|
||||||
custom_tags
|
|
||||||
FROM (SELECT error_id,
|
|
||||||
name,
|
|
||||||
message,
|
|
||||||
COUNT(DISTINCT user_id) AS users,
|
|
||||||
COUNT(DISTINCT session_id) AS sessions
|
|
||||||
FROM public.errors
|
|
||||||
INNER JOIN events.errors AS s_errors USING (error_id)
|
|
||||||
INNER JOIN public.sessions USING (session_id)
|
|
||||||
WHERE {" AND ".join(pg_sub_query30_err)}
|
|
||||||
GROUP BY error_id, name, message) AS details
|
|
||||||
INNER JOIN (SELECT MAX(timestamp) AS last_occurrence,
|
|
||||||
MIN(timestamp) AS first_occurrence
|
|
||||||
FROM events.errors
|
|
||||||
WHERE error_id = %(error_id)s) AS time_details ON (TRUE)
|
|
||||||
INNER JOIN (SELECT session_id AS last_session_id,
|
|
||||||
coalesce(custom_tags, '[]')::jsonb AS custom_tags
|
|
||||||
FROM events.errors
|
|
||||||
LEFT JOIN LATERAL (
|
|
||||||
SELECT jsonb_agg(jsonb_build_object(errors_tags.key, errors_tags.value)) AS custom_tags
|
|
||||||
FROM errors_tags
|
|
||||||
WHERE errors_tags.error_id = %(error_id)s
|
|
||||||
AND errors_tags.session_id = errors.session_id
|
|
||||||
AND errors_tags.message_id = errors.message_id) AS errors_tags ON (TRUE)
|
|
||||||
WHERE error_id = %(error_id)s
|
|
||||||
ORDER BY errors.timestamp DESC
|
|
||||||
LIMIT 1) AS last_session_details ON (TRUE)
|
|
||||||
INNER JOIN (SELECT jsonb_agg(browser_details) AS browsers_partition
|
|
||||||
FROM (SELECT *
|
|
||||||
FROM (SELECT user_browser AS name,
|
|
||||||
COUNT(session_id) AS count
|
|
||||||
FROM events.errors
|
|
||||||
INNER JOIN sessions USING (session_id)
|
|
||||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
|
||||||
GROUP BY user_browser
|
|
||||||
ORDER BY count DESC) AS count_per_browser_query
|
|
||||||
INNER JOIN LATERAL (SELECT JSONB_AGG(version_details) AS partition
|
|
||||||
FROM (SELECT user_browser_version AS version,
|
|
||||||
COUNT(session_id) AS count
|
|
||||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
|
||||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
|
||||||
AND sessions.user_browser = count_per_browser_query.name
|
|
||||||
GROUP BY user_browser_version
|
|
||||||
ORDER BY count DESC) AS version_details
|
|
||||||
) AS browser_version_details ON (TRUE)) AS browser_details) AS browser_details ON (TRUE)
|
|
||||||
INNER JOIN (SELECT jsonb_agg(os_details) AS os_partition
|
|
||||||
FROM (SELECT *
|
|
||||||
FROM (SELECT user_os AS name,
|
|
||||||
COUNT(session_id) AS count
|
|
||||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
|
||||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
|
||||||
GROUP BY user_os
|
|
||||||
ORDER BY count DESC) AS count_per_os_details
|
|
||||||
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_version_details) AS partition
|
|
||||||
FROM (SELECT COALESCE(user_os_version,'unknown') AS version, COUNT(session_id) AS count
|
|
||||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
|
||||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
|
||||||
AND sessions.user_os = count_per_os_details.name
|
|
||||||
GROUP BY user_os_version
|
|
||||||
ORDER BY count DESC) AS count_per_version_details
|
|
||||||
GROUP BY count_per_os_details.name ) AS os_version_details
|
|
||||||
ON (TRUE)) AS os_details) AS os_details ON (TRUE)
|
|
||||||
INNER JOIN (SELECT jsonb_agg(device_details) AS device_partition
|
|
||||||
FROM (SELECT *
|
|
||||||
FROM (SELECT user_device_type AS name,
|
|
||||||
COUNT(session_id) AS count
|
|
||||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
|
||||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
|
||||||
GROUP BY user_device_type
|
|
||||||
ORDER BY count DESC) AS count_per_device_details
|
|
||||||
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_device_v_details) AS partition
|
|
||||||
FROM (SELECT CASE
|
|
||||||
WHEN user_device = '' OR user_device ISNULL
|
|
||||||
THEN 'unknown'
|
|
||||||
ELSE user_device END AS version,
|
|
||||||
COUNT(session_id) AS count
|
|
||||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
|
||||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
|
||||||
AND sessions.user_device_type = count_per_device_details.name
|
|
||||||
GROUP BY user_device
|
|
||||||
ORDER BY count DESC) AS count_per_device_v_details
|
|
||||||
GROUP BY count_per_device_details.name ) AS device_version_details
|
|
||||||
ON (TRUE)) AS device_details) AS device_details ON (TRUE)
|
|
||||||
INNER JOIN (SELECT jsonb_agg(count_per_country_details) AS country_partition
|
|
||||||
FROM (SELECT user_country AS name,
|
|
||||||
COUNT(session_id) AS count
|
|
||||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
|
||||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
|
||||||
GROUP BY user_country
|
|
||||||
ORDER BY count DESC) AS count_per_country_details) AS country_details ON (TRUE)
|
|
||||||
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart24
|
|
||||||
FROM (SELECT generated_timestamp AS timestamp,
|
|
||||||
COUNT(session_id) AS count
|
|
||||||
FROM generate_series(%(startDate24)s, %(endDate24)s, %(step_size24)s) AS generated_timestamp
|
|
||||||
LEFT JOIN LATERAL (SELECT DISTINCT session_id
|
|
||||||
FROM events.errors
|
|
||||||
INNER JOIN public.sessions USING (session_id)
|
|
||||||
WHERE {" AND ".join(pg_sub_query24)}
|
|
||||||
) AS chart_details ON (TRUE)
|
|
||||||
GROUP BY generated_timestamp
|
|
||||||
ORDER BY generated_timestamp) AS chart_details) AS chart_details24 ON (TRUE)
|
|
||||||
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart30
|
|
||||||
FROM (SELECT generated_timestamp AS timestamp,
|
|
||||||
COUNT(session_id) AS count
|
|
||||||
FROM generate_series(%(startDate30)s, %(endDate30)s, %(step_size30)s) AS generated_timestamp
|
|
||||||
LEFT JOIN LATERAL (SELECT DISTINCT session_id
|
|
||||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
|
||||||
WHERE {" AND ".join(pg_sub_query30)}) AS chart_details
|
|
||||||
ON (TRUE)
|
|
||||||
GROUP BY timestamp
|
|
||||||
ORDER BY timestamp) AS chart_details) AS chart_details30 ON (TRUE);
|
|
||||||
"""
|
|
||||||
|
|
||||||
# print("--------------------")
|
|
||||||
# print(cur.mogrify(main_pg_query, params))
|
|
||||||
# print("--------------------")
|
|
||||||
cur.execute(cur.mogrify(main_pg_query, params))
|
|
||||||
row = cur.fetchone()
|
|
||||||
if row is None:
|
|
||||||
return {"errors": ["error not found"]}
|
|
||||||
row["tags"] = __process_tags(row)
|
|
||||||
|
|
||||||
query = cur.mogrify(
|
|
||||||
f"""SELECT error_id, status, session_id, start_ts,
|
|
||||||
parent_error_id,session_id, user_anonymous_id,
|
|
||||||
user_id, user_uuid, user_browser, user_browser_version,
|
|
||||||
user_os, user_os_version, user_device, payload,
|
|
||||||
FALSE AS favorite,
|
|
||||||
True AS viewed
|
|
||||||
FROM public.errors AS pe
|
|
||||||
INNER JOIN events.errors AS ee USING (error_id)
|
|
||||||
INNER JOIN public.sessions USING (session_id)
|
|
||||||
WHERE pe.project_id = %(project_id)s
|
|
||||||
AND error_id = %(error_id)s
|
|
||||||
ORDER BY start_ts DESC
|
|
||||||
LIMIT 1;""",
|
|
||||||
{"project_id": project_id, "error_id": error_id, "user_id": user_id})
|
|
||||||
cur.execute(query=query)
|
|
||||||
status = cur.fetchone()
|
|
||||||
|
|
||||||
if status is not None:
|
|
||||||
row["stack"] = errors_helper.format_first_stack_frame(status).pop("stack")
|
|
||||||
row["status"] = status.pop("status")
|
|
||||||
row["parent_error_id"] = status.pop("parent_error_id")
|
|
||||||
row["favorite"] = status.pop("favorite")
|
|
||||||
row["viewed"] = status.pop("viewed")
|
|
||||||
row["last_hydrated_session"] = status
|
|
||||||
else:
|
|
||||||
row["stack"] = []
|
|
||||||
row["last_hydrated_session"] = None
|
|
||||||
row["status"] = "untracked"
|
|
||||||
row["parent_error_id"] = None
|
|
||||||
row["favorite"] = False
|
|
||||||
row["viewed"] = False
|
|
||||||
return {"data": helper.dict_to_camel_case(row)}
|
|
||||||
|
|
||||||
|
|
||||||
def __get_basic_constraints(platform=None, time_constraint=True, startTime_arg_name="startDate",
|
|
||||||
endTime_arg_name="endDate", chart=False, step_size_name="step_size",
|
|
||||||
project_key="project_id"):
|
|
||||||
if project_key is None:
|
|
||||||
ch_sub_query = []
|
|
||||||
else:
|
|
||||||
ch_sub_query = [f"{project_key} =%(project_id)s"]
|
|
||||||
if time_constraint:
|
|
||||||
ch_sub_query += [f"timestamp >= %({startTime_arg_name})s",
|
|
||||||
f"timestamp < %({endTime_arg_name})s"]
|
|
||||||
if chart:
|
|
||||||
ch_sub_query += [f"timestamp >= generated_timestamp",
|
|
||||||
f"timestamp < generated_timestamp + %({step_size_name})s"]
|
|
||||||
if platform == schemas.PlatformType.MOBILE:
|
|
||||||
ch_sub_query.append("user_device_type = 'mobile'")
|
|
||||||
elif platform == schemas.PlatformType.DESKTOP:
|
|
||||||
ch_sub_query.append("user_device_type = 'desktop'")
|
|
||||||
return ch_sub_query
|
|
||||||
|
|
||||||
|
|
||||||
def __get_sort_key(key):
|
|
||||||
return {
|
|
||||||
schemas.ErrorSort.OCCURRENCE: "max_datetime",
|
|
||||||
schemas.ErrorSort.USERS_COUNT: "users",
|
|
||||||
schemas.ErrorSort.SESSIONS_COUNT: "sessions"
|
|
||||||
}.get(key, 'max_datetime')
|
|
||||||
|
|
||||||
|
|
||||||
def search(data: schemas.SearchErrorsSchema, project_id, user_id):
|
|
||||||
empty_response = {
|
|
||||||
'total': 0,
|
|
||||||
'errors': []
|
|
||||||
}
|
|
||||||
|
|
||||||
platform = None
|
|
||||||
for f in data.filters:
|
|
||||||
if f.type == schemas.FilterType.PLATFORM and len(f.value) > 0:
|
|
||||||
platform = f.value[0]
|
|
||||||
pg_sub_query = __get_basic_constraints(platform, project_key="sessions.project_id")
|
|
||||||
pg_sub_query += ["sessions.start_ts>=%(startDate)s", "sessions.start_ts<%(endDate)s", "source ='js_exception'",
|
|
||||||
"pe.project_id=%(project_id)s"]
|
|
||||||
# To ignore Script error
|
|
||||||
pg_sub_query.append("pe.message!='Script error.'")
|
|
||||||
pg_sub_query_chart = __get_basic_constraints(platform, time_constraint=False, chart=True, project_key=None)
|
|
||||||
if platform:
|
|
||||||
pg_sub_query_chart += ["start_ts>=%(startDate)s", "start_ts<%(endDate)s", "project_id=%(project_id)s"]
|
|
||||||
pg_sub_query_chart.append("errors.error_id =details.error_id")
|
|
||||||
statuses = []
|
|
||||||
error_ids = None
|
|
||||||
if data.startTimestamp is None:
|
|
||||||
data.startTimestamp = TimeUTC.now(-30)
|
|
||||||
if data.endTimestamp is None:
|
|
||||||
data.endTimestamp = TimeUTC.now(1)
|
|
||||||
if len(data.events) > 0 or len(data.filters) > 0:
|
|
||||||
print("-- searching for sessions before errors")
|
|
||||||
statuses = sessions.search_sessions(data=data, project_id=project_id, user_id=user_id, errors_only=True,
|
|
||||||
error_status=data.status)
|
|
||||||
if len(statuses) == 0:
|
|
||||||
return empty_response
|
|
||||||
error_ids = [e["errorId"] for e in statuses]
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
step_size = __get_step_size(data.startTimestamp, data.endTimestamp, data.density, factor=1)
|
|
||||||
sort = __get_sort_key('datetime')
|
|
||||||
if data.sort is not None:
|
|
||||||
sort = __get_sort_key(data.sort)
|
|
||||||
order = schemas.SortOrderType.DESC
|
|
||||||
if data.order is not None:
|
|
||||||
order = data.order
|
|
||||||
extra_join = ""
|
|
||||||
|
|
||||||
params = {
|
|
||||||
"startDate": data.startTimestamp,
|
|
||||||
"endDate": data.endTimestamp,
|
|
||||||
"project_id": project_id,
|
|
||||||
"userId": user_id,
|
|
||||||
"step_size": step_size}
|
|
||||||
if data.status != schemas.ErrorStatus.ALL:
|
|
||||||
pg_sub_query.append("status = %(error_status)s")
|
|
||||||
params["error_status"] = data.status
|
|
||||||
if data.limit is not None and data.page is not None:
|
|
||||||
params["errors_offset"] = (data.page - 1) * data.limit
|
|
||||||
params["errors_limit"] = data.limit
|
|
||||||
else:
|
|
||||||
params["errors_offset"] = 0
|
|
||||||
params["errors_limit"] = 200
|
|
||||||
|
|
||||||
if error_ids is not None:
|
|
||||||
params["error_ids"] = tuple(error_ids)
|
|
||||||
pg_sub_query.append("error_id IN %(error_ids)s")
|
|
||||||
# if data.bookmarked:
|
|
||||||
# pg_sub_query.append("ufe.user_id = %(userId)s")
|
|
||||||
# extra_join += " INNER JOIN public.user_favorite_errors AS ufe USING (error_id)"
|
|
||||||
if data.query is not None and len(data.query) > 0:
|
|
||||||
pg_sub_query.append("(pe.name ILIKE %(error_query)s OR pe.message ILIKE %(error_query)s)")
|
|
||||||
params["error_query"] = helper.values_for_operator(value=data.query,
|
|
||||||
op=schemas.SearchEventOperator.CONTAINS)
|
|
||||||
|
|
||||||
main_pg_query = f"""SELECT full_count,
|
|
||||||
error_id,
|
|
||||||
name,
|
|
||||||
message,
|
|
||||||
users,
|
|
||||||
sessions,
|
|
||||||
last_occurrence,
|
|
||||||
first_occurrence,
|
|
||||||
chart
|
|
||||||
FROM (SELECT COUNT(details) OVER () AS full_count, details.*
|
|
||||||
FROM (SELECT error_id,
|
|
||||||
name,
|
|
||||||
message,
|
|
||||||
COUNT(DISTINCT COALESCE(user_id,user_uuid::text)) AS users,
|
|
||||||
COUNT(DISTINCT session_id) AS sessions,
|
|
||||||
MAX(timestamp) AS max_datetime,
|
|
||||||
MIN(timestamp) AS min_datetime
|
|
||||||
FROM events.errors
|
|
||||||
INNER JOIN public.errors AS pe USING (error_id)
|
|
||||||
INNER JOIN public.sessions USING (session_id)
|
|
||||||
{extra_join}
|
|
||||||
WHERE {" AND ".join(pg_sub_query)}
|
|
||||||
GROUP BY error_id, name, message
|
|
||||||
ORDER BY {sort} {order}) AS details
|
|
||||||
LIMIT %(errors_limit)s OFFSET %(errors_offset)s
|
|
||||||
) AS details
|
|
||||||
INNER JOIN LATERAL (SELECT MAX(timestamp) AS last_occurrence,
|
|
||||||
MIN(timestamp) AS first_occurrence
|
|
||||||
FROM events.errors
|
|
||||||
WHERE errors.error_id = details.error_id) AS time_details ON (TRUE)
|
|
||||||
INNER JOIN LATERAL (SELECT jsonb_agg(chart_details) AS chart
|
|
||||||
FROM (SELECT generated_timestamp AS timestamp,
|
|
||||||
COUNT(session_id) AS count
|
|
||||||
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp
|
|
||||||
LEFT JOIN LATERAL (SELECT DISTINCT session_id
|
|
||||||
FROM events.errors
|
|
||||||
{"INNER JOIN public.sessions USING(session_id)" if platform else ""}
|
|
||||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
|
||||||
) AS sessions ON (TRUE)
|
|
||||||
GROUP BY timestamp
|
|
||||||
ORDER BY timestamp) AS chart_details) AS chart_details ON (TRUE);"""
|
|
||||||
|
|
||||||
# print("--------------------")
|
|
||||||
# print(cur.mogrify(main_pg_query, params))
|
|
||||||
# print("--------------------")
|
|
||||||
|
|
||||||
cur.execute(cur.mogrify(main_pg_query, params))
|
|
||||||
rows = cur.fetchall()
|
|
||||||
total = 0 if len(rows) == 0 else rows[0]["full_count"]
|
|
||||||
|
|
||||||
if total == 0:
|
|
||||||
rows = []
|
|
||||||
else:
|
|
||||||
if len(statuses) == 0:
|
|
||||||
query = cur.mogrify(
|
|
||||||
"""SELECT error_id,
|
|
||||||
COALESCE((SELECT TRUE
|
|
||||||
FROM public.user_viewed_errors AS ve
|
|
||||||
WHERE errors.error_id = ve.error_id
|
|
||||||
AND ve.user_id = %(user_id)s LIMIT 1), FALSE) AS viewed
|
|
||||||
FROM public.errors
|
|
||||||
WHERE project_id = %(project_id)s AND error_id IN %(error_ids)s;""",
|
|
||||||
{"project_id": project_id, "error_ids": tuple([r["error_id"] for r in rows]),
|
|
||||||
"user_id": user_id})
|
|
||||||
cur.execute(query=query)
|
|
||||||
statuses = helper.list_to_camel_case(cur.fetchall())
|
|
||||||
statuses = {
|
|
||||||
s["errorId"]: s for s in statuses
|
|
||||||
}
|
|
||||||
|
|
||||||
for r in rows:
|
|
||||||
r.pop("full_count")
|
|
||||||
if r["error_id"] in statuses:
|
|
||||||
r["viewed"] = statuses[r["error_id"]]["viewed"]
|
|
||||||
else:
|
|
||||||
r["viewed"] = False
|
|
||||||
|
|
||||||
return {
|
|
||||||
'total': total,
|
|
||||||
'errors': helper.list_to_camel_case(rows)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def __save_stacktrace(error_id, data):
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
query = cur.mogrify(
|
|
||||||
"""UPDATE public.errors
|
|
||||||
SET stacktrace=%(data)s::jsonb, stacktrace_parsed_at=timezone('utc'::text, now())
|
|
||||||
WHERE error_id = %(error_id)s;""",
|
|
||||||
{"error_id": error_id, "data": json.dumps(data)})
|
|
||||||
cur.execute(query=query)
|
|
||||||
|
|
||||||
|
|
||||||
def get_trace(project_id, error_id):
|
|
||||||
error = get(error_id=error_id, family=False)
|
|
||||||
if error is None:
|
|
||||||
return {"errors": ["error not found"]}
|
|
||||||
if error.get("source", "") != "js_exception":
|
|
||||||
return {"errors": ["this source of errors doesn't have a sourcemap"]}
|
|
||||||
if error.get("payload") is None:
|
|
||||||
return {"errors": ["null payload"]}
|
|
||||||
if error.get("stacktrace") is not None:
|
|
||||||
return {"sourcemapUploaded": True,
|
|
||||||
"trace": error.get("stacktrace"),
|
|
||||||
"preparsed": True}
|
|
||||||
trace, all_exists = sourcemaps.get_traces_group(project_id=project_id, payload=error["payload"])
|
|
||||||
if all_exists:
|
|
||||||
__save_stacktrace(error_id=error_id, data=trace)
|
|
||||||
return {"sourcemapUploaded": all_exists,
|
|
||||||
"trace": trace,
|
|
||||||
"preparsed": False}
|
|
||||||
|
|
||||||
|
|
||||||
def get_sessions(start_date, end_date, project_id, user_id, error_id):
|
|
||||||
extra_constraints = ["s.project_id = %(project_id)s",
|
|
||||||
"s.start_ts >= %(startDate)s",
|
|
||||||
"s.start_ts <= %(endDate)s",
|
|
||||||
"e.error_id = %(error_id)s"]
|
|
||||||
if start_date is None:
|
|
||||||
start_date = TimeUTC.now(-7)
|
|
||||||
if end_date is None:
|
|
||||||
end_date = TimeUTC.now()
|
|
||||||
|
|
||||||
params = {
|
|
||||||
"startDate": start_date,
|
|
||||||
"endDate": end_date,
|
|
||||||
"project_id": project_id,
|
|
||||||
"userId": user_id,
|
|
||||||
"error_id": error_id}
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
query = cur.mogrify(
|
|
||||||
f"""SELECT s.project_id,
|
|
||||||
s.session_id::text AS session_id,
|
|
||||||
s.user_uuid,
|
|
||||||
s.user_id,
|
|
||||||
s.user_agent,
|
|
||||||
s.user_os,
|
|
||||||
s.user_browser,
|
|
||||||
s.user_device,
|
|
||||||
s.user_country,
|
|
||||||
s.start_ts,
|
|
||||||
s.duration,
|
|
||||||
s.events_count,
|
|
||||||
s.pages_count,
|
|
||||||
s.errors_count,
|
|
||||||
s.issue_types,
|
|
||||||
COALESCE((SELECT TRUE
|
|
||||||
FROM public.user_favorite_sessions AS fs
|
|
||||||
WHERE s.session_id = fs.session_id
|
|
||||||
AND fs.user_id = %(userId)s LIMIT 1), FALSE) AS favorite,
|
|
||||||
COALESCE((SELECT TRUE
|
|
||||||
FROM public.user_viewed_sessions AS fs
|
|
||||||
WHERE s.session_id = fs.session_id
|
|
||||||
AND fs.user_id = %(userId)s LIMIT 1), FALSE) AS viewed
|
|
||||||
FROM public.sessions AS s INNER JOIN events.errors AS e USING (session_id)
|
|
||||||
WHERE {" AND ".join(extra_constraints)}
|
|
||||||
ORDER BY s.start_ts DESC;""",
|
|
||||||
params)
|
|
||||||
cur.execute(query=query)
|
|
||||||
sessions_list = []
|
|
||||||
total = cur.rowcount
|
|
||||||
row = cur.fetchone()
|
|
||||||
while row is not None and len(sessions_list) < 100:
|
|
||||||
sessions_list.append(row)
|
|
||||||
row = cur.fetchone()
|
|
||||||
|
|
||||||
return {
|
|
||||||
'total': total,
|
|
||||||
'sessions': helper.list_to_camel_case(sessions_list)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
ACTION_STATE = {
|
|
||||||
"unsolve": 'unresolved',
|
|
||||||
"solve": 'resolved',
|
|
||||||
"ignore": 'ignored'
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def change_state(project_id, user_id, error_id, action):
|
|
||||||
errors = get(error_id, family=True)
|
|
||||||
print(len(errors))
|
|
||||||
status = ACTION_STATE.get(action)
|
|
||||||
if errors is None or len(errors) == 0:
|
|
||||||
return {"errors": ["error not found"]}
|
|
||||||
if errors[0]["status"] == status:
|
|
||||||
return {"errors": [f"error is already {status}"]}
|
|
||||||
|
|
||||||
if errors[0]["status"] == ACTION_STATE["solve"] and status == ACTION_STATE["ignore"]:
|
|
||||||
return {"errors": [f"state transition not permitted {errors[0]['status']} -> {status}"]}
|
|
||||||
|
|
||||||
params = {
|
|
||||||
"userId": user_id,
|
|
||||||
"error_ids": tuple([e["errorId"] for e in errors]),
|
|
||||||
"status": status}
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
query = cur.mogrify(
|
|
||||||
"""UPDATE public.errors
|
|
||||||
SET status = %(status)s
|
|
||||||
WHERE error_id IN %(error_ids)s
|
|
||||||
RETURNING status""",
|
|
||||||
params)
|
|
||||||
cur.execute(query=query)
|
|
||||||
row = cur.fetchone()
|
|
||||||
if row is not None:
|
|
||||||
for e in errors:
|
|
||||||
e["status"] = row["status"]
|
|
||||||
return {"data": errors}
|
|
||||||
13
api/chalicelib/core/errors/__init__.py
Normal file
13
api/chalicelib/core/errors/__init__.py
Normal file
|
|
@ -0,0 +1,13 @@
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from decouple import config
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
from . import errors_pg as errors_legacy
|
||||||
|
|
||||||
|
if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
|
||||||
|
logger.info(">>> Using experimental error search")
|
||||||
|
from . import errors_ch as errors
|
||||||
|
else:
|
||||||
|
from . import errors_pg as errors
|
||||||
409
api/chalicelib/core/errors/errors_ch.py
Normal file
409
api/chalicelib/core/errors/errors_ch.py
Normal file
|
|
@ -0,0 +1,409 @@
|
||||||
|
import schemas
|
||||||
|
from chalicelib.core import metadata
|
||||||
|
from chalicelib.core.errors import errors_legacy
|
||||||
|
from chalicelib.core.errors.modules import errors_helper
|
||||||
|
from chalicelib.core.errors.modules import sessions
|
||||||
|
from chalicelib.utils import ch_client, exp_ch_helper
|
||||||
|
from chalicelib.utils import helper, metrics_helper
|
||||||
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
|
|
||||||
|
|
||||||
|
def _multiple_values(values, value_key="value"):
|
||||||
|
query_values = {}
|
||||||
|
if values is not None and isinstance(values, list):
|
||||||
|
for i in range(len(values)):
|
||||||
|
k = f"{value_key}_{i}"
|
||||||
|
query_values[k] = values[i]
|
||||||
|
return query_values
|
||||||
|
|
||||||
|
|
||||||
|
def __get_sql_operator(op: schemas.SearchEventOperator):
|
||||||
|
return {
|
||||||
|
schemas.SearchEventOperator.IS: "=",
|
||||||
|
schemas.SearchEventOperator.IS_ANY: "IN",
|
||||||
|
schemas.SearchEventOperator.ON: "=",
|
||||||
|
schemas.SearchEventOperator.ON_ANY: "IN",
|
||||||
|
schemas.SearchEventOperator.IS_NOT: "!=",
|
||||||
|
schemas.SearchEventOperator.NOT_ON: "!=",
|
||||||
|
schemas.SearchEventOperator.CONTAINS: "ILIKE",
|
||||||
|
schemas.SearchEventOperator.NOT_CONTAINS: "NOT ILIKE",
|
||||||
|
schemas.SearchEventOperator.STARTS_WITH: "ILIKE",
|
||||||
|
schemas.SearchEventOperator.ENDS_WITH: "ILIKE",
|
||||||
|
}.get(op, "=")
|
||||||
|
|
||||||
|
|
||||||
|
def _isAny_opreator(op: schemas.SearchEventOperator):
|
||||||
|
return op in [schemas.SearchEventOperator.ON_ANY, schemas.SearchEventOperator.IS_ANY]
|
||||||
|
|
||||||
|
|
||||||
|
def _isUndefined_operator(op: schemas.SearchEventOperator):
|
||||||
|
return op in [schemas.SearchEventOperator.IS_UNDEFINED]
|
||||||
|
|
||||||
|
|
||||||
|
def __is_negation_operator(op: schemas.SearchEventOperator):
|
||||||
|
return op in [schemas.SearchEventOperator.IS_NOT,
|
||||||
|
schemas.SearchEventOperator.NOT_ON,
|
||||||
|
schemas.SearchEventOperator.NOT_CONTAINS]
|
||||||
|
|
||||||
|
|
||||||
|
def _multiple_conditions(condition, values, value_key="value", is_not=False):
|
||||||
|
query = []
|
||||||
|
for i in range(len(values)):
|
||||||
|
k = f"{value_key}_{i}"
|
||||||
|
query.append(condition.replace(value_key, k))
|
||||||
|
return "(" + (" AND " if is_not else " OR ").join(query) + ")"
|
||||||
|
|
||||||
|
|
||||||
|
def get(error_id, family=False):
|
||||||
|
return errors_legacy.get(error_id=error_id, family=family)
|
||||||
|
|
||||||
|
|
||||||
|
def get_batch(error_ids):
|
||||||
|
return errors_legacy.get_batch(error_ids=error_ids)
|
||||||
|
|
||||||
|
|
||||||
|
def __get_basic_constraints_events(platform=None, time_constraint=True, startTime_arg_name="startDate",
|
||||||
|
endTime_arg_name="endDate", type_condition=True, project_key="project_id",
|
||||||
|
table_name=None):
|
||||||
|
ch_sub_query = [f"{project_key} =toUInt16(%(project_id)s)"]
|
||||||
|
if table_name is not None:
|
||||||
|
table_name = table_name + "."
|
||||||
|
else:
|
||||||
|
table_name = ""
|
||||||
|
if type_condition:
|
||||||
|
ch_sub_query.append(f"{table_name}`$event_name`='ERROR'")
|
||||||
|
if time_constraint:
|
||||||
|
ch_sub_query += [f"{table_name}created_at >= toDateTime(%({startTime_arg_name})s/1000)",
|
||||||
|
f"{table_name}created_at < toDateTime(%({endTime_arg_name})s/1000)"]
|
||||||
|
# if platform == schemas.PlatformType.MOBILE:
|
||||||
|
# ch_sub_query.append("user_device_type = 'mobile'")
|
||||||
|
# elif platform == schemas.PlatformType.DESKTOP:
|
||||||
|
# ch_sub_query.append("user_device_type = 'desktop'")
|
||||||
|
return ch_sub_query
|
||||||
|
|
||||||
|
|
||||||
|
def __get_sort_key(key):
|
||||||
|
return {
|
||||||
|
schemas.ErrorSort.OCCURRENCE: "max_datetime",
|
||||||
|
schemas.ErrorSort.USERS_COUNT: "users",
|
||||||
|
schemas.ErrorSort.SESSIONS_COUNT: "sessions"
|
||||||
|
}.get(key, 'max_datetime')
|
||||||
|
|
||||||
|
|
||||||
|
def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, user_id):
|
||||||
|
MAIN_EVENTS_TABLE = exp_ch_helper.get_main_events_table(data.startTimestamp)
|
||||||
|
MAIN_SESSIONS_TABLE = exp_ch_helper.get_main_sessions_table(data.startTimestamp)
|
||||||
|
|
||||||
|
platform = None
|
||||||
|
for f in data.filters:
|
||||||
|
if f.type == schemas.FilterType.PLATFORM and len(f.value) > 0:
|
||||||
|
platform = f.value[0]
|
||||||
|
ch_sessions_sub_query = errors_helper.__get_basic_constraints_ch(platform, type_condition=False)
|
||||||
|
# ignore platform for errors table
|
||||||
|
ch_sub_query = __get_basic_constraints_events(None, type_condition=True)
|
||||||
|
ch_sub_query.append("JSONExtractString(toString(`$properties`), 'source') = 'js_exception'")
|
||||||
|
|
||||||
|
# To ignore Script error
|
||||||
|
ch_sub_query.append("JSONExtractString(toString(`$properties`), 'message') != 'Script error.'")
|
||||||
|
error_ids = None
|
||||||
|
|
||||||
|
if data.startTimestamp is None:
|
||||||
|
data.startTimestamp = TimeUTC.now(-7)
|
||||||
|
if data.endTimestamp is None:
|
||||||
|
data.endTimestamp = TimeUTC.now(1)
|
||||||
|
|
||||||
|
subquery_part = ""
|
||||||
|
params = {}
|
||||||
|
if len(data.events) > 0:
|
||||||
|
errors_condition_count = 0
|
||||||
|
for i, e in enumerate(data.events):
|
||||||
|
if e.type == schemas.EventType.ERROR:
|
||||||
|
errors_condition_count += 1
|
||||||
|
is_any = _isAny_opreator(e.operator)
|
||||||
|
op = __get_sql_operator(e.operator)
|
||||||
|
e_k = f"e_value{i}"
|
||||||
|
params = {**params, **_multiple_values(e.value, value_key=e_k)}
|
||||||
|
if not is_any and len(e.value) > 0 and e.value[1] not in [None, "*", ""]:
|
||||||
|
ch_sub_query.append(
|
||||||
|
_multiple_conditions(f"(message {op} %({e_k})s OR name {op} %({e_k})s)",
|
||||||
|
e.value, value_key=e_k))
|
||||||
|
if len(data.events) > errors_condition_count:
|
||||||
|
subquery_part_args, subquery_part = sessions.search_query_parts_ch(data=data, error_status=data.status,
|
||||||
|
errors_only=True,
|
||||||
|
project_id=project.project_id,
|
||||||
|
user_id=user_id,
|
||||||
|
issue=None,
|
||||||
|
favorite_only=False)
|
||||||
|
subquery_part = f"INNER JOIN {subquery_part} USING(session_id)"
|
||||||
|
params = {**params, **subquery_part_args}
|
||||||
|
if len(data.filters) > 0:
|
||||||
|
meta_keys = None
|
||||||
|
# to reduce include a sub-query of sessions inside events query, in order to reduce the selected data
|
||||||
|
for i, f in enumerate(data.filters):
|
||||||
|
if not isinstance(f.value, list):
|
||||||
|
f.value = [f.value]
|
||||||
|
filter_type = f.type
|
||||||
|
f.value = helper.values_for_operator(value=f.value, op=f.operator)
|
||||||
|
f_k = f"f_value{i}"
|
||||||
|
params = {**params, f_k: f.value, **_multiple_values(f.value, value_key=f_k)}
|
||||||
|
op = __get_sql_operator(f.operator) \
|
||||||
|
if filter_type not in [schemas.FilterType.EVENTS_COUNT] else f.operator
|
||||||
|
is_any = _isAny_opreator(f.operator)
|
||||||
|
is_undefined = _isUndefined_operator(f.operator)
|
||||||
|
if not is_any and not is_undefined and len(f.value) == 0:
|
||||||
|
continue
|
||||||
|
is_not = False
|
||||||
|
if __is_negation_operator(f.operator):
|
||||||
|
is_not = True
|
||||||
|
if filter_type == schemas.FilterType.USER_BROWSER:
|
||||||
|
if is_any:
|
||||||
|
ch_sessions_sub_query.append('isNotNull(s.user_browser)')
|
||||||
|
else:
|
||||||
|
ch_sessions_sub_query.append(
|
||||||
|
_multiple_conditions(f's.user_browser {op} %({f_k})s', f.value, is_not=is_not,
|
||||||
|
value_key=f_k))
|
||||||
|
|
||||||
|
elif filter_type in [schemas.FilterType.USER_OS, schemas.FilterType.USER_OS_MOBILE]:
|
||||||
|
if is_any:
|
||||||
|
ch_sessions_sub_query.append('isNotNull(s.user_os)')
|
||||||
|
else:
|
||||||
|
ch_sessions_sub_query.append(
|
||||||
|
_multiple_conditions(f's.user_os {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
|
||||||
|
|
||||||
|
elif filter_type in [schemas.FilterType.USER_DEVICE, schemas.FilterType.USER_DEVICE_MOBILE]:
|
||||||
|
if is_any:
|
||||||
|
ch_sessions_sub_query.append('isNotNull(s.user_device)')
|
||||||
|
else:
|
||||||
|
ch_sessions_sub_query.append(
|
||||||
|
_multiple_conditions(f's.user_device {op} %({f_k})s', f.value, is_not=is_not,
|
||||||
|
value_key=f_k))
|
||||||
|
|
||||||
|
elif filter_type in [schemas.FilterType.USER_COUNTRY, schemas.FilterType.USER_COUNTRY_MOBILE]:
|
||||||
|
if is_any:
|
||||||
|
ch_sessions_sub_query.append('isNotNull(s.user_country)')
|
||||||
|
else:
|
||||||
|
ch_sessions_sub_query.append(
|
||||||
|
_multiple_conditions(f's.user_country {op} %({f_k})s', f.value, is_not=is_not,
|
||||||
|
value_key=f_k))
|
||||||
|
|
||||||
|
elif filter_type in [schemas.FilterType.UTM_SOURCE]:
|
||||||
|
if is_any:
|
||||||
|
ch_sessions_sub_query.append('isNotNull(s.utm_source)')
|
||||||
|
elif is_undefined:
|
||||||
|
ch_sessions_sub_query.append('isNull(s.utm_source)')
|
||||||
|
else:
|
||||||
|
ch_sessions_sub_query.append(
|
||||||
|
_multiple_conditions(f's.utm_source {op} toString(%({f_k})s)', f.value, is_not=is_not,
|
||||||
|
value_key=f_k))
|
||||||
|
|
||||||
|
elif filter_type in [schemas.FilterType.UTM_MEDIUM]:
|
||||||
|
if is_any:
|
||||||
|
ch_sessions_sub_query.append('isNotNull(s.utm_medium)')
|
||||||
|
elif is_undefined:
|
||||||
|
ch_sessions_sub_query.append('isNull(s.utm_medium)')
|
||||||
|
else:
|
||||||
|
ch_sessions_sub_query.append(
|
||||||
|
_multiple_conditions(f's.utm_medium {op} toString(%({f_k})s)', f.value, is_not=is_not,
|
||||||
|
value_key=f_k))
|
||||||
|
elif filter_type in [schemas.FilterType.UTM_CAMPAIGN]:
|
||||||
|
if is_any:
|
||||||
|
ch_sessions_sub_query.append('isNotNull(s.utm_campaign)')
|
||||||
|
elif is_undefined:
|
||||||
|
ch_sessions_sub_query.append('isNull(s.utm_campaign)')
|
||||||
|
else:
|
||||||
|
ch_sessions_sub_query.append(
|
||||||
|
_multiple_conditions(f's.utm_campaign {op} toString(%({f_k})s)', f.value, is_not=is_not,
|
||||||
|
value_key=f_k))
|
||||||
|
|
||||||
|
elif filter_type == schemas.FilterType.DURATION:
|
||||||
|
if len(f.value) > 0 and f.value[0] is not None:
|
||||||
|
ch_sessions_sub_query.append("s.duration >= %(minDuration)s")
|
||||||
|
params["minDuration"] = f.value[0]
|
||||||
|
if len(f.value) > 1 and f.value[1] is not None and int(f.value[1]) > 0:
|
||||||
|
ch_sessions_sub_query.append("s.duration <= %(maxDuration)s")
|
||||||
|
params["maxDuration"] = f.value[1]
|
||||||
|
|
||||||
|
elif filter_type == schemas.FilterType.REFERRER:
|
||||||
|
# extra_from += f"INNER JOIN {events.EventType.LOCATION.table} AS p USING(session_id)"
|
||||||
|
if is_any:
|
||||||
|
referrer_constraint = 'isNotNull(s.base_referrer)'
|
||||||
|
else:
|
||||||
|
referrer_constraint = _multiple_conditions(f"s.base_referrer {op} %({f_k})s", f.value,
|
||||||
|
is_not=is_not, value_key=f_k)
|
||||||
|
elif filter_type == schemas.FilterType.METADATA:
|
||||||
|
# get metadata list only if you need it
|
||||||
|
if meta_keys is None:
|
||||||
|
meta_keys = metadata.get(project_id=project.project_id)
|
||||||
|
meta_keys = {m["key"]: m["index"] for m in meta_keys}
|
||||||
|
if f.source in meta_keys.keys():
|
||||||
|
if is_any:
|
||||||
|
ch_sessions_sub_query.append(f"isNotNull(s.{metadata.index_to_colname(meta_keys[f.source])})")
|
||||||
|
elif is_undefined:
|
||||||
|
ch_sessions_sub_query.append(f"isNull(s.{metadata.index_to_colname(meta_keys[f.source])})")
|
||||||
|
else:
|
||||||
|
ch_sessions_sub_query.append(
|
||||||
|
_multiple_conditions(
|
||||||
|
f"s.{metadata.index_to_colname(meta_keys[f.source])} {op} toString(%({f_k})s)",
|
||||||
|
f.value, is_not=is_not, value_key=f_k))
|
||||||
|
|
||||||
|
elif filter_type in [schemas.FilterType.USER_ID, schemas.FilterType.USER_ID_MOBILE]:
|
||||||
|
if is_any:
|
||||||
|
ch_sessions_sub_query.append('isNotNull(s.user_id)')
|
||||||
|
elif is_undefined:
|
||||||
|
ch_sessions_sub_query.append('isNull(s.user_id)')
|
||||||
|
else:
|
||||||
|
ch_sessions_sub_query.append(
|
||||||
|
_multiple_conditions(f"s.user_id {op} toString(%({f_k})s)", f.value, is_not=is_not,
|
||||||
|
value_key=f_k))
|
||||||
|
elif filter_type in [schemas.FilterType.USER_ANONYMOUS_ID,
|
||||||
|
schemas.FilterType.USER_ANONYMOUS_ID_MOBILE]:
|
||||||
|
if is_any:
|
||||||
|
ch_sessions_sub_query.append('isNotNull(s.user_anonymous_id)')
|
||||||
|
elif is_undefined:
|
||||||
|
ch_sessions_sub_query.append('isNull(s.user_anonymous_id)')
|
||||||
|
else:
|
||||||
|
ch_sessions_sub_query.append(
|
||||||
|
_multiple_conditions(f"s.user_anonymous_id {op} toString(%({f_k})s)", f.value,
|
||||||
|
is_not=is_not,
|
||||||
|
value_key=f_k))
|
||||||
|
|
||||||
|
elif filter_type in [schemas.FilterType.REV_ID, schemas.FilterType.REV_ID_MOBILE]:
|
||||||
|
if is_any:
|
||||||
|
ch_sessions_sub_query.append('isNotNull(s.rev_id)')
|
||||||
|
elif is_undefined:
|
||||||
|
ch_sessions_sub_query.append('isNull(s.rev_id)')
|
||||||
|
else:
|
||||||
|
ch_sessions_sub_query.append(
|
||||||
|
_multiple_conditions(f"s.rev_id {op} toString(%({f_k})s)", f.value, is_not=is_not,
|
||||||
|
value_key=f_k))
|
||||||
|
|
||||||
|
elif filter_type == schemas.FilterType.PLATFORM:
|
||||||
|
# op = __get_sql_operator(f.operator)
|
||||||
|
ch_sessions_sub_query.append(
|
||||||
|
_multiple_conditions(f"s.user_device_type {op} %({f_k})s", f.value, is_not=is_not,
|
||||||
|
value_key=f_k))
|
||||||
|
# elif filter_type == schemas.FilterType.issue:
|
||||||
|
# if is_any:
|
||||||
|
# ch_sessions_sub_query.append("notEmpty(s.issue_types)")
|
||||||
|
# else:
|
||||||
|
# ch_sessions_sub_query.append(f"hasAny(s.issue_types,%({f_k})s)")
|
||||||
|
# # _multiple_conditions(f"%({f_k})s {op} ANY (s.issue_types)", f.value, is_not=is_not,
|
||||||
|
# # value_key=f_k))
|
||||||
|
#
|
||||||
|
# if is_not:
|
||||||
|
# extra_constraints[-1] = f"not({extra_constraints[-1]})"
|
||||||
|
# ss_constraints[-1] = f"not({ss_constraints[-1]})"
|
||||||
|
elif filter_type == schemas.FilterType.EVENTS_COUNT:
|
||||||
|
ch_sessions_sub_query.append(
|
||||||
|
_multiple_conditions(f"s.events_count {op} %({f_k})s", f.value, is_not=is_not,
|
||||||
|
value_key=f_k))
|
||||||
|
|
||||||
|
with ch_client.ClickHouseClient() as ch:
|
||||||
|
step_size = metrics_helper.get_step_size(data.startTimestamp, data.endTimestamp, data.density)
|
||||||
|
sort = __get_sort_key('datetime')
|
||||||
|
if data.sort is not None:
|
||||||
|
sort = __get_sort_key(data.sort)
|
||||||
|
order = "DESC"
|
||||||
|
if data.order is not None:
|
||||||
|
order = data.order
|
||||||
|
params = {
|
||||||
|
**params,
|
||||||
|
"startDate": data.startTimestamp,
|
||||||
|
"endDate": data.endTimestamp,
|
||||||
|
"project_id": project.project_id,
|
||||||
|
"userId": user_id,
|
||||||
|
"step_size": step_size}
|
||||||
|
if data.limit is not None and data.page is not None:
|
||||||
|
params["errors_offset"] = (data.page - 1) * data.limit
|
||||||
|
params["errors_limit"] = data.limit
|
||||||
|
else:
|
||||||
|
params["errors_offset"] = 0
|
||||||
|
params["errors_limit"] = 200
|
||||||
|
# if data.bookmarked:
|
||||||
|
# cur.execute(cur.mogrify(f"""SELECT error_id
|
||||||
|
# FROM public.user_favorite_errors
|
||||||
|
# WHERE user_id = %(userId)s
|
||||||
|
# {"" if error_ids is None else "AND error_id IN %(error_ids)s"}""",
|
||||||
|
# {"userId": user_id, "error_ids": tuple(error_ids or [])}))
|
||||||
|
# error_ids = cur.fetchall()
|
||||||
|
# if len(error_ids) == 0:
|
||||||
|
# return empty_response
|
||||||
|
# error_ids = [e["error_id"] for e in error_ids]
|
||||||
|
|
||||||
|
if error_ids is not None:
|
||||||
|
params["error_ids"] = tuple(error_ids)
|
||||||
|
ch_sub_query.append("error_id IN %(error_ids)s")
|
||||||
|
|
||||||
|
main_ch_query = f"""\
|
||||||
|
SELECT details.error_id as error_id,
|
||||||
|
name, message, users, total,
|
||||||
|
sessions, last_occurrence, first_occurrence, chart
|
||||||
|
FROM (SELECT error_id,
|
||||||
|
JSONExtractString(toString(`$properties`), 'name') AS name,
|
||||||
|
JSONExtractString(toString(`$properties`), 'message') AS message,
|
||||||
|
COUNT(DISTINCT user_id) AS users,
|
||||||
|
COUNT(DISTINCT events.session_id) AS sessions,
|
||||||
|
MAX(created_at) AS max_datetime,
|
||||||
|
MIN(created_at) AS min_datetime,
|
||||||
|
COUNT(DISTINCT error_id)
|
||||||
|
OVER() AS total
|
||||||
|
FROM {MAIN_EVENTS_TABLE} AS events
|
||||||
|
INNER JOIN (SELECT session_id, coalesce(user_id,toString(user_uuid)) AS user_id
|
||||||
|
FROM {MAIN_SESSIONS_TABLE} AS s
|
||||||
|
{subquery_part}
|
||||||
|
WHERE {" AND ".join(ch_sessions_sub_query)}) AS sessions
|
||||||
|
ON (events.session_id = sessions.session_id)
|
||||||
|
WHERE {" AND ".join(ch_sub_query)}
|
||||||
|
GROUP BY error_id, name, message
|
||||||
|
ORDER BY {sort} {order}
|
||||||
|
LIMIT %(errors_limit)s OFFSET %(errors_offset)s) AS details
|
||||||
|
INNER JOIN (SELECT error_id,
|
||||||
|
toUnixTimestamp(MAX(created_at))*1000 AS last_occurrence,
|
||||||
|
toUnixTimestamp(MIN(created_at))*1000 AS first_occurrence
|
||||||
|
FROM {MAIN_EVENTS_TABLE}
|
||||||
|
WHERE project_id=%(project_id)s
|
||||||
|
AND `$event_name`='ERROR'
|
||||||
|
GROUP BY error_id) AS time_details
|
||||||
|
ON details.error_id=time_details.error_id
|
||||||
|
INNER JOIN (SELECT error_id, groupArray([timestamp, count]) AS chart
|
||||||
|
FROM (SELECT error_id,
|
||||||
|
gs.generate_series AS timestamp,
|
||||||
|
COUNT(DISTINCT session_id) AS count
|
||||||
|
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS gs
|
||||||
|
LEFT JOIN {MAIN_EVENTS_TABLE} ON(TRUE)
|
||||||
|
WHERE {" AND ".join(ch_sub_query)}
|
||||||
|
AND created_at >= toDateTime(timestamp / 1000)
|
||||||
|
AND created_at < toDateTime((timestamp + %(step_size)s) / 1000)
|
||||||
|
GROUP BY error_id, timestamp
|
||||||
|
ORDER BY timestamp) AS sub_table
|
||||||
|
GROUP BY error_id) AS chart_details ON details.error_id=chart_details.error_id;"""
|
||||||
|
|
||||||
|
# print("------------")
|
||||||
|
# print(ch.format(main_ch_query, params))
|
||||||
|
# print("------------")
|
||||||
|
query = ch.format(query=main_ch_query, parameters=params)
|
||||||
|
|
||||||
|
rows = ch.execute(query=query)
|
||||||
|
total = rows[0]["total"] if len(rows) > 0 else 0
|
||||||
|
|
||||||
|
for r in rows:
|
||||||
|
r["chart"] = list(r["chart"])
|
||||||
|
for i in range(len(r["chart"])):
|
||||||
|
r["chart"][i] = {"timestamp": r["chart"][i][0], "count": r["chart"][i][1]}
|
||||||
|
|
||||||
|
return {
|
||||||
|
'total': total,
|
||||||
|
'errors': helper.list_to_camel_case(rows)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def get_trace(project_id, error_id):
|
||||||
|
return errors_legacy.get_trace(project_id=project_id, error_id=error_id)
|
||||||
|
|
||||||
|
|
||||||
|
def get_sessions(start_date, end_date, project_id, user_id, error_id):
|
||||||
|
return errors_legacy.get_sessions(start_date=start_date,
|
||||||
|
end_date=end_date,
|
||||||
|
project_id=project_id,
|
||||||
|
user_id=user_id,
|
||||||
|
error_id=error_id)
|
||||||
248
api/chalicelib/core/errors/errors_details.py
Normal file
248
api/chalicelib/core/errors/errors_details.py
Normal file
|
|
@ -0,0 +1,248 @@
|
||||||
|
from chalicelib.core.errors.modules import errors_helper
|
||||||
|
|
||||||
|
from chalicelib.utils import pg_client, helper
|
||||||
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
|
from chalicelib.utils.metrics_helper import get_step_size
|
||||||
|
|
||||||
|
|
||||||
|
def __flatten_sort_key_count_version(data, merge_nested=False):
|
||||||
|
if data is None:
|
||||||
|
return []
|
||||||
|
return sorted(
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"name": f'{o["name"]}@{v["version"]}',
|
||||||
|
"count": v["count"]
|
||||||
|
} for o in data for v in o["partition"]
|
||||||
|
],
|
||||||
|
key=lambda o: o["count"], reverse=True) if merge_nested else \
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"name": o["name"],
|
||||||
|
"count": o["count"],
|
||||||
|
} for o in data
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def __process_tags(row):
|
||||||
|
return [
|
||||||
|
{"name": "browser", "partitions": __flatten_sort_key_count_version(data=row.get("browsers_partition"))},
|
||||||
|
{"name": "browser.ver",
|
||||||
|
"partitions": __flatten_sort_key_count_version(data=row.pop("browsers_partition"), merge_nested=True)},
|
||||||
|
{"name": "OS", "partitions": __flatten_sort_key_count_version(data=row.get("os_partition"))},
|
||||||
|
{"name": "OS.ver",
|
||||||
|
"partitions": __flatten_sort_key_count_version(data=row.pop("os_partition"), merge_nested=True)},
|
||||||
|
{"name": "device.family", "partitions": __flatten_sort_key_count_version(data=row.get("device_partition"))},
|
||||||
|
{"name": "device",
|
||||||
|
"partitions": __flatten_sort_key_count_version(data=row.pop("device_partition"), merge_nested=True)},
|
||||||
|
{"name": "country", "partitions": row.pop("country_partition")}
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def get_details(project_id, error_id, user_id, **data):
|
||||||
|
pg_sub_query24 = errors_helper.__get_basic_constraints(time_constraint=False, chart=True,
|
||||||
|
step_size_name="step_size24")
|
||||||
|
pg_sub_query24.append("error_id = %(error_id)s")
|
||||||
|
pg_sub_query30_session = errors_helper.__get_basic_constraints(time_constraint=True, chart=False,
|
||||||
|
startTime_arg_name="startDate30",
|
||||||
|
endTime_arg_name="endDate30",
|
||||||
|
project_key="sessions.project_id")
|
||||||
|
pg_sub_query30_session.append("sessions.start_ts >= %(startDate30)s")
|
||||||
|
pg_sub_query30_session.append("sessions.start_ts <= %(endDate30)s")
|
||||||
|
pg_sub_query30_session.append("error_id = %(error_id)s")
|
||||||
|
pg_sub_query30_err = errors_helper.__get_basic_constraints(time_constraint=True, chart=False,
|
||||||
|
startTime_arg_name="startDate30",
|
||||||
|
endTime_arg_name="endDate30",
|
||||||
|
project_key="errors.project_id")
|
||||||
|
pg_sub_query30_err.append("sessions.project_id = %(project_id)s")
|
||||||
|
pg_sub_query30_err.append("sessions.start_ts >= %(startDate30)s")
|
||||||
|
pg_sub_query30_err.append("sessions.start_ts <= %(endDate30)s")
|
||||||
|
pg_sub_query30_err.append("error_id = %(error_id)s")
|
||||||
|
pg_sub_query30_err.append("source ='js_exception'")
|
||||||
|
pg_sub_query30 = errors_helper.__get_basic_constraints(time_constraint=False, chart=True,
|
||||||
|
step_size_name="step_size30")
|
||||||
|
pg_sub_query30.append("error_id = %(error_id)s")
|
||||||
|
pg_basic_query = errors_helper.__get_basic_constraints(time_constraint=False)
|
||||||
|
pg_basic_query.append("error_id = %(error_id)s")
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
data["startDate24"] = TimeUTC.now(-1)
|
||||||
|
data["endDate24"] = TimeUTC.now()
|
||||||
|
data["startDate30"] = TimeUTC.now(-30)
|
||||||
|
data["endDate30"] = TimeUTC.now()
|
||||||
|
density24 = int(data.get("density24", 24))
|
||||||
|
step_size24 = get_step_size(data["startDate24"], data["endDate24"], density24, factor=1)
|
||||||
|
density30 = int(data.get("density30", 30))
|
||||||
|
step_size30 = get_step_size(data["startDate30"], data["endDate30"], density30, factor=1)
|
||||||
|
params = {
|
||||||
|
"startDate24": data['startDate24'],
|
||||||
|
"endDate24": data['endDate24'],
|
||||||
|
"startDate30": data['startDate30'],
|
||||||
|
"endDate30": data['endDate30'],
|
||||||
|
"project_id": project_id,
|
||||||
|
"userId": user_id,
|
||||||
|
"step_size24": step_size24,
|
||||||
|
"step_size30": step_size30,
|
||||||
|
"error_id": error_id}
|
||||||
|
|
||||||
|
main_pg_query = f"""\
|
||||||
|
SELECT error_id,
|
||||||
|
name,
|
||||||
|
message,
|
||||||
|
users,
|
||||||
|
sessions,
|
||||||
|
last_occurrence,
|
||||||
|
first_occurrence,
|
||||||
|
last_session_id,
|
||||||
|
browsers_partition,
|
||||||
|
os_partition,
|
||||||
|
device_partition,
|
||||||
|
country_partition,
|
||||||
|
chart24,
|
||||||
|
chart30
|
||||||
|
FROM (SELECT error_id,
|
||||||
|
name,
|
||||||
|
message,
|
||||||
|
COUNT(DISTINCT user_id) AS users,
|
||||||
|
COUNT(DISTINCT session_id) AS sessions
|
||||||
|
FROM public.errors
|
||||||
|
INNER JOIN events.errors AS s_errors USING (error_id)
|
||||||
|
INNER JOIN public.sessions USING (session_id)
|
||||||
|
WHERE {" AND ".join(pg_sub_query30_err)}
|
||||||
|
GROUP BY error_id, name, message) AS details
|
||||||
|
INNER JOIN (SELECT MAX(timestamp) AS last_occurrence,
|
||||||
|
MIN(timestamp) AS first_occurrence
|
||||||
|
FROM events.errors
|
||||||
|
WHERE error_id = %(error_id)s) AS time_details ON (TRUE)
|
||||||
|
INNER JOIN (SELECT session_id AS last_session_id
|
||||||
|
FROM events.errors
|
||||||
|
WHERE error_id = %(error_id)s
|
||||||
|
ORDER BY errors.timestamp DESC
|
||||||
|
LIMIT 1) AS last_session_details ON (TRUE)
|
||||||
|
INNER JOIN (SELECT jsonb_agg(browser_details) AS browsers_partition
|
||||||
|
FROM (SELECT *
|
||||||
|
FROM (SELECT user_browser AS name,
|
||||||
|
COUNT(session_id) AS count
|
||||||
|
FROM events.errors
|
||||||
|
INNER JOIN sessions USING (session_id)
|
||||||
|
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||||
|
GROUP BY user_browser
|
||||||
|
ORDER BY count DESC) AS count_per_browser_query
|
||||||
|
INNER JOIN LATERAL (SELECT JSONB_AGG(version_details) AS partition
|
||||||
|
FROM (SELECT user_browser_version AS version,
|
||||||
|
COUNT(session_id) AS count
|
||||||
|
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||||
|
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||||
|
AND sessions.user_browser = count_per_browser_query.name
|
||||||
|
GROUP BY user_browser_version
|
||||||
|
ORDER BY count DESC) AS version_details
|
||||||
|
) AS browser_version_details ON (TRUE)) AS browser_details) AS browser_details ON (TRUE)
|
||||||
|
INNER JOIN (SELECT jsonb_agg(os_details) AS os_partition
|
||||||
|
FROM (SELECT *
|
||||||
|
FROM (SELECT user_os AS name,
|
||||||
|
COUNT(session_id) AS count
|
||||||
|
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||||
|
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||||
|
GROUP BY user_os
|
||||||
|
ORDER BY count DESC) AS count_per_os_details
|
||||||
|
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_version_details) AS partition
|
||||||
|
FROM (SELECT COALESCE(user_os_version,'unknown') AS version, COUNT(session_id) AS count
|
||||||
|
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||||
|
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||||
|
AND sessions.user_os = count_per_os_details.name
|
||||||
|
GROUP BY user_os_version
|
||||||
|
ORDER BY count DESC) AS count_per_version_details
|
||||||
|
GROUP BY count_per_os_details.name ) AS os_version_details
|
||||||
|
ON (TRUE)) AS os_details) AS os_details ON (TRUE)
|
||||||
|
INNER JOIN (SELECT jsonb_agg(device_details) AS device_partition
|
||||||
|
FROM (SELECT *
|
||||||
|
FROM (SELECT user_device_type AS name,
|
||||||
|
COUNT(session_id) AS count
|
||||||
|
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||||
|
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||||
|
GROUP BY user_device_type
|
||||||
|
ORDER BY count DESC) AS count_per_device_details
|
||||||
|
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_device_v_details) AS partition
|
||||||
|
FROM (SELECT CASE
|
||||||
|
WHEN user_device = '' OR user_device ISNULL
|
||||||
|
THEN 'unknown'
|
||||||
|
ELSE user_device END AS version,
|
||||||
|
COUNT(session_id) AS count
|
||||||
|
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||||
|
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||||
|
AND sessions.user_device_type = count_per_device_details.name
|
||||||
|
GROUP BY user_device
|
||||||
|
ORDER BY count DESC) AS count_per_device_v_details
|
||||||
|
GROUP BY count_per_device_details.name ) AS device_version_details
|
||||||
|
ON (TRUE)) AS device_details) AS device_details ON (TRUE)
|
||||||
|
INNER JOIN (SELECT jsonb_agg(count_per_country_details) AS country_partition
|
||||||
|
FROM (SELECT user_country AS name,
|
||||||
|
COUNT(session_id) AS count
|
||||||
|
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||||
|
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||||
|
GROUP BY user_country
|
||||||
|
ORDER BY count DESC) AS count_per_country_details) AS country_details ON (TRUE)
|
||||||
|
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart24
|
||||||
|
FROM (SELECT generated_timestamp AS timestamp,
|
||||||
|
COUNT(session_id) AS count
|
||||||
|
FROM generate_series(%(startDate24)s, %(endDate24)s, %(step_size24)s) AS generated_timestamp
|
||||||
|
LEFT JOIN LATERAL (SELECT DISTINCT session_id
|
||||||
|
FROM events.errors
|
||||||
|
INNER JOIN public.sessions USING (session_id)
|
||||||
|
WHERE {" AND ".join(pg_sub_query24)}
|
||||||
|
) AS chart_details ON (TRUE)
|
||||||
|
GROUP BY generated_timestamp
|
||||||
|
ORDER BY generated_timestamp) AS chart_details) AS chart_details24 ON (TRUE)
|
||||||
|
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart30
|
||||||
|
FROM (SELECT generated_timestamp AS timestamp,
|
||||||
|
COUNT(session_id) AS count
|
||||||
|
FROM generate_series(%(startDate30)s, %(endDate30)s, %(step_size30)s) AS generated_timestamp
|
||||||
|
LEFT JOIN LATERAL (SELECT DISTINCT session_id
|
||||||
|
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||||
|
WHERE {" AND ".join(pg_sub_query30)}) AS chart_details
|
||||||
|
ON (TRUE)
|
||||||
|
GROUP BY timestamp
|
||||||
|
ORDER BY timestamp) AS chart_details) AS chart_details30 ON (TRUE);
|
||||||
|
"""
|
||||||
|
|
||||||
|
# print("--------------------")
|
||||||
|
# print(cur.mogrify(main_pg_query, params))
|
||||||
|
# print("--------------------")
|
||||||
|
cur.execute(cur.mogrify(main_pg_query, params))
|
||||||
|
row = cur.fetchone()
|
||||||
|
if row is None:
|
||||||
|
return {"errors": ["error not found"]}
|
||||||
|
row["tags"] = __process_tags(row)
|
||||||
|
|
||||||
|
query = cur.mogrify(
|
||||||
|
f"""SELECT error_id, status, session_id, start_ts,
|
||||||
|
parent_error_id,session_id, user_anonymous_id,
|
||||||
|
user_id, user_uuid, user_browser, user_browser_version,
|
||||||
|
user_os, user_os_version, user_device, payload,
|
||||||
|
FALSE AS favorite,
|
||||||
|
True AS viewed
|
||||||
|
FROM public.errors AS pe
|
||||||
|
INNER JOIN events.errors AS ee USING (error_id)
|
||||||
|
INNER JOIN public.sessions USING (session_id)
|
||||||
|
WHERE pe.project_id = %(project_id)s
|
||||||
|
AND error_id = %(error_id)s
|
||||||
|
ORDER BY start_ts DESC
|
||||||
|
LIMIT 1;""",
|
||||||
|
{"project_id": project_id, "error_id": error_id, "user_id": user_id})
|
||||||
|
cur.execute(query=query)
|
||||||
|
status = cur.fetchone()
|
||||||
|
|
||||||
|
if status is not None:
|
||||||
|
row["stack"] = errors_helper.format_first_stack_frame(status).pop("stack")
|
||||||
|
row["status"] = status.pop("status")
|
||||||
|
row["parent_error_id"] = status.pop("parent_error_id")
|
||||||
|
row["favorite"] = status.pop("favorite")
|
||||||
|
row["viewed"] = status.pop("viewed")
|
||||||
|
row["last_hydrated_session"] = status
|
||||||
|
else:
|
||||||
|
row["stack"] = []
|
||||||
|
row["last_hydrated_session"] = None
|
||||||
|
row["status"] = "untracked"
|
||||||
|
row["parent_error_id"] = None
|
||||||
|
row["favorite"] = False
|
||||||
|
row["viewed"] = False
|
||||||
|
return {"data": helper.dict_to_camel_case(row)}
|
||||||
294
api/chalicelib/core/errors/errors_pg.py
Normal file
294
api/chalicelib/core/errors/errors_pg.py
Normal file
|
|
@ -0,0 +1,294 @@
|
||||||
|
import json
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
import schemas
|
||||||
|
from chalicelib.core.errors.modules import errors_helper
|
||||||
|
from chalicelib.core.sessions import sessions_search
|
||||||
|
from chalicelib.core.sourcemaps import sourcemaps
|
||||||
|
from chalicelib.utils import pg_client, helper
|
||||||
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
|
from chalicelib.utils.metrics_helper import get_step_size
|
||||||
|
|
||||||
|
|
||||||
|
def get(error_id, family=False) -> dict | List[dict]:
|
||||||
|
if family:
|
||||||
|
return get_batch([error_id])
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
query = cur.mogrify(
|
||||||
|
"""SELECT *
|
||||||
|
FROM public.errors
|
||||||
|
WHERE error_id = %(error_id)s
|
||||||
|
LIMIT 1;""",
|
||||||
|
{"error_id": error_id})
|
||||||
|
cur.execute(query=query)
|
||||||
|
result = cur.fetchone()
|
||||||
|
if result is not None:
|
||||||
|
result["stacktrace_parsed_at"] = TimeUTC.datetime_to_timestamp(result["stacktrace_parsed_at"])
|
||||||
|
return helper.dict_to_camel_case(result)
|
||||||
|
|
||||||
|
|
||||||
|
def get_batch(error_ids):
|
||||||
|
if len(error_ids) == 0:
|
||||||
|
return []
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
query = cur.mogrify(
|
||||||
|
"""
|
||||||
|
WITH RECURSIVE error_family AS (
|
||||||
|
SELECT *
|
||||||
|
FROM public.errors
|
||||||
|
WHERE error_id IN %(error_ids)s
|
||||||
|
UNION
|
||||||
|
SELECT child_errors.*
|
||||||
|
FROM public.errors AS child_errors
|
||||||
|
INNER JOIN error_family ON error_family.error_id = child_errors.parent_error_id OR error_family.parent_error_id = child_errors.error_id
|
||||||
|
)
|
||||||
|
SELECT *
|
||||||
|
FROM error_family;""",
|
||||||
|
{"error_ids": tuple(error_ids)})
|
||||||
|
cur.execute(query=query)
|
||||||
|
errors = cur.fetchall()
|
||||||
|
for e in errors:
|
||||||
|
e["stacktrace_parsed_at"] = TimeUTC.datetime_to_timestamp(e["stacktrace_parsed_at"])
|
||||||
|
return helper.list_to_camel_case(errors)
|
||||||
|
|
||||||
|
|
||||||
|
def __get_sort_key(key):
|
||||||
|
return {
|
||||||
|
schemas.ErrorSort.OCCURRENCE: "max_datetime",
|
||||||
|
schemas.ErrorSort.USERS_COUNT: "users",
|
||||||
|
schemas.ErrorSort.SESSIONS_COUNT: "sessions"
|
||||||
|
}.get(key, 'max_datetime')
|
||||||
|
|
||||||
|
|
||||||
|
def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, user_id):
|
||||||
|
empty_response = {
|
||||||
|
'total': 0,
|
||||||
|
'errors': []
|
||||||
|
}
|
||||||
|
|
||||||
|
platform = None
|
||||||
|
for f in data.filters:
|
||||||
|
if f.type == schemas.FilterType.PLATFORM and len(f.value) > 0:
|
||||||
|
platform = f.value[0]
|
||||||
|
pg_sub_query = errors_helper.__get_basic_constraints(platform, project_key="sessions.project_id")
|
||||||
|
pg_sub_query += ["sessions.start_ts>=%(startDate)s", "sessions.start_ts<%(endDate)s", "source ='js_exception'",
|
||||||
|
"pe.project_id=%(project_id)s"]
|
||||||
|
# To ignore Script error
|
||||||
|
pg_sub_query.append("pe.message!='Script error.'")
|
||||||
|
pg_sub_query_chart = errors_helper.__get_basic_constraints(platform, time_constraint=False, chart=True,
|
||||||
|
project_key=None)
|
||||||
|
if platform:
|
||||||
|
pg_sub_query_chart += ["start_ts>=%(startDate)s", "start_ts<%(endDate)s", "project_id=%(project_id)s"]
|
||||||
|
pg_sub_query_chart.append("errors.error_id =details.error_id")
|
||||||
|
statuses = []
|
||||||
|
error_ids = None
|
||||||
|
if data.startTimestamp is None:
|
||||||
|
data.startTimestamp = TimeUTC.now(-30)
|
||||||
|
if data.endTimestamp is None:
|
||||||
|
data.endTimestamp = TimeUTC.now(1)
|
||||||
|
if len(data.events) > 0 or len(data.filters) > 0:
|
||||||
|
print("-- searching for sessions before errors")
|
||||||
|
statuses = sessions_search.search_sessions(data=data, project=project, user_id=user_id, errors_only=True,
|
||||||
|
error_status=data.status)
|
||||||
|
if len(statuses) == 0:
|
||||||
|
return empty_response
|
||||||
|
error_ids = [e["errorId"] for e in statuses]
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
step_size = get_step_size(data.startTimestamp, data.endTimestamp, data.density, factor=1)
|
||||||
|
sort = __get_sort_key('datetime')
|
||||||
|
if data.sort is not None:
|
||||||
|
sort = __get_sort_key(data.sort)
|
||||||
|
order = schemas.SortOrderType.DESC
|
||||||
|
if data.order is not None:
|
||||||
|
order = data.order
|
||||||
|
extra_join = ""
|
||||||
|
|
||||||
|
params = {
|
||||||
|
"startDate": data.startTimestamp,
|
||||||
|
"endDate": data.endTimestamp,
|
||||||
|
"project_id": project.project_id,
|
||||||
|
"userId": user_id,
|
||||||
|
"step_size": step_size}
|
||||||
|
if data.status != schemas.ErrorStatus.ALL:
|
||||||
|
pg_sub_query.append("status = %(error_status)s")
|
||||||
|
params["error_status"] = data.status
|
||||||
|
if data.limit is not None and data.page is not None:
|
||||||
|
params["errors_offset"] = (data.page - 1) * data.limit
|
||||||
|
params["errors_limit"] = data.limit
|
||||||
|
else:
|
||||||
|
params["errors_offset"] = 0
|
||||||
|
params["errors_limit"] = 200
|
||||||
|
|
||||||
|
if error_ids is not None:
|
||||||
|
params["error_ids"] = tuple(error_ids)
|
||||||
|
pg_sub_query.append("error_id IN %(error_ids)s")
|
||||||
|
# if data.bookmarked:
|
||||||
|
# pg_sub_query.append("ufe.user_id = %(userId)s")
|
||||||
|
# extra_join += " INNER JOIN public.user_favorite_errors AS ufe USING (error_id)"
|
||||||
|
if data.query is not None and len(data.query) > 0:
|
||||||
|
pg_sub_query.append("(pe.name ILIKE %(error_query)s OR pe.message ILIKE %(error_query)s)")
|
||||||
|
params["error_query"] = helper.values_for_operator(value=data.query,
|
||||||
|
op=schemas.SearchEventOperator.CONTAINS)
|
||||||
|
|
||||||
|
main_pg_query = f"""SELECT full_count,
|
||||||
|
error_id,
|
||||||
|
name,
|
||||||
|
message,
|
||||||
|
users,
|
||||||
|
sessions,
|
||||||
|
last_occurrence,
|
||||||
|
first_occurrence,
|
||||||
|
chart
|
||||||
|
FROM (SELECT COUNT(details) OVER () AS full_count, details.*
|
||||||
|
FROM (SELECT error_id,
|
||||||
|
name,
|
||||||
|
message,
|
||||||
|
COUNT(DISTINCT COALESCE(user_id,user_uuid::text)) AS users,
|
||||||
|
COUNT(DISTINCT session_id) AS sessions,
|
||||||
|
MAX(timestamp) AS max_datetime,
|
||||||
|
MIN(timestamp) AS min_datetime
|
||||||
|
FROM events.errors
|
||||||
|
INNER JOIN public.errors AS pe USING (error_id)
|
||||||
|
INNER JOIN public.sessions USING (session_id)
|
||||||
|
{extra_join}
|
||||||
|
WHERE {" AND ".join(pg_sub_query)}
|
||||||
|
GROUP BY error_id, name, message
|
||||||
|
ORDER BY {sort} {order}) AS details
|
||||||
|
LIMIT %(errors_limit)s OFFSET %(errors_offset)s
|
||||||
|
) AS details
|
||||||
|
INNER JOIN LATERAL (SELECT MAX(timestamp) AS last_occurrence,
|
||||||
|
MIN(timestamp) AS first_occurrence
|
||||||
|
FROM events.errors
|
||||||
|
WHERE errors.error_id = details.error_id) AS time_details ON (TRUE)
|
||||||
|
INNER JOIN LATERAL (SELECT jsonb_agg(chart_details) AS chart
|
||||||
|
FROM (SELECT generated_timestamp AS timestamp,
|
||||||
|
COUNT(session_id) AS count
|
||||||
|
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp
|
||||||
|
LEFT JOIN LATERAL (SELECT DISTINCT session_id
|
||||||
|
FROM events.errors
|
||||||
|
{"INNER JOIN public.sessions USING(session_id)" if platform else ""}
|
||||||
|
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||||
|
) AS sessions ON (TRUE)
|
||||||
|
GROUP BY timestamp
|
||||||
|
ORDER BY timestamp) AS chart_details) AS chart_details ON (TRUE);"""
|
||||||
|
|
||||||
|
# print("--------------------")
|
||||||
|
# print(cur.mogrify(main_pg_query, params))
|
||||||
|
# print("--------------------")
|
||||||
|
|
||||||
|
cur.execute(cur.mogrify(main_pg_query, params))
|
||||||
|
rows = cur.fetchall()
|
||||||
|
total = 0 if len(rows) == 0 else rows[0]["full_count"]
|
||||||
|
|
||||||
|
if total == 0:
|
||||||
|
rows = []
|
||||||
|
else:
|
||||||
|
if len(statuses) == 0:
|
||||||
|
query = cur.mogrify(
|
||||||
|
"""SELECT error_id
|
||||||
|
FROM public.errors
|
||||||
|
WHERE project_id = %(project_id)s AND error_id IN %(error_ids)s;""",
|
||||||
|
{"project_id": project.project_id, "error_ids": tuple([r["error_id"] for r in rows]),
|
||||||
|
"user_id": user_id})
|
||||||
|
cur.execute(query=query)
|
||||||
|
statuses = helper.list_to_camel_case(cur.fetchall())
|
||||||
|
statuses = {
|
||||||
|
s["errorId"]: s for s in statuses
|
||||||
|
}
|
||||||
|
|
||||||
|
for r in rows:
|
||||||
|
r.pop("full_count")
|
||||||
|
|
||||||
|
return {
|
||||||
|
'total': total,
|
||||||
|
'errors': helper.list_to_camel_case(rows)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def __save_stacktrace(error_id, data):
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
query = cur.mogrify(
|
||||||
|
"""UPDATE public.errors
|
||||||
|
SET stacktrace=%(data)s::jsonb, stacktrace_parsed_at=timezone('utc'::text, now())
|
||||||
|
WHERE error_id = %(error_id)s;""",
|
||||||
|
{"error_id": error_id, "data": json.dumps(data)})
|
||||||
|
cur.execute(query=query)
|
||||||
|
|
||||||
|
|
||||||
|
def get_trace(project_id, error_id):
|
||||||
|
error = get(error_id=error_id, family=False)
|
||||||
|
if error is None:
|
||||||
|
return {"errors": ["error not found"]}
|
||||||
|
if error.get("source", "") != "js_exception":
|
||||||
|
return {"errors": ["this source of errors doesn't have a sourcemap"]}
|
||||||
|
if error.get("payload") is None:
|
||||||
|
return {"errors": ["null payload"]}
|
||||||
|
if error.get("stacktrace") is not None:
|
||||||
|
return {"sourcemapUploaded": True,
|
||||||
|
"trace": error.get("stacktrace"),
|
||||||
|
"preparsed": True}
|
||||||
|
trace, all_exists = sourcemaps.get_traces_group(project_id=project_id, payload=error["payload"])
|
||||||
|
if all_exists:
|
||||||
|
__save_stacktrace(error_id=error_id, data=trace)
|
||||||
|
return {"sourcemapUploaded": all_exists,
|
||||||
|
"trace": trace,
|
||||||
|
"preparsed": False}
|
||||||
|
|
||||||
|
|
||||||
|
def get_sessions(start_date, end_date, project_id, user_id, error_id):
|
||||||
|
extra_constraints = ["s.project_id = %(project_id)s",
|
||||||
|
"s.start_ts >= %(startDate)s",
|
||||||
|
"s.start_ts <= %(endDate)s",
|
||||||
|
"e.error_id = %(error_id)s"]
|
||||||
|
if start_date is None:
|
||||||
|
start_date = TimeUTC.now(-7)
|
||||||
|
if end_date is None:
|
||||||
|
end_date = TimeUTC.now()
|
||||||
|
|
||||||
|
params = {
|
||||||
|
"startDate": start_date,
|
||||||
|
"endDate": end_date,
|
||||||
|
"project_id": project_id,
|
||||||
|
"userId": user_id,
|
||||||
|
"error_id": error_id}
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
query = cur.mogrify(
|
||||||
|
f"""SELECT s.project_id,
|
||||||
|
s.session_id::text AS session_id,
|
||||||
|
s.user_uuid,
|
||||||
|
s.user_id,
|
||||||
|
s.user_agent,
|
||||||
|
s.user_os,
|
||||||
|
s.user_browser,
|
||||||
|
s.user_device,
|
||||||
|
s.user_country,
|
||||||
|
s.start_ts,
|
||||||
|
s.duration,
|
||||||
|
s.events_count,
|
||||||
|
s.pages_count,
|
||||||
|
s.errors_count,
|
||||||
|
s.issue_types,
|
||||||
|
COALESCE((SELECT TRUE
|
||||||
|
FROM public.user_favorite_sessions AS fs
|
||||||
|
WHERE s.session_id = fs.session_id
|
||||||
|
AND fs.user_id = %(userId)s LIMIT 1), FALSE) AS favorite,
|
||||||
|
COALESCE((SELECT TRUE
|
||||||
|
FROM public.user_viewed_sessions AS fs
|
||||||
|
WHERE s.session_id = fs.session_id
|
||||||
|
AND fs.user_id = %(userId)s LIMIT 1), FALSE) AS viewed
|
||||||
|
FROM public.sessions AS s INNER JOIN events.errors AS e USING (session_id)
|
||||||
|
WHERE {" AND ".join(extra_constraints)}
|
||||||
|
ORDER BY s.start_ts DESC;""",
|
||||||
|
params)
|
||||||
|
cur.execute(query=query)
|
||||||
|
sessions_list = []
|
||||||
|
total = cur.rowcount
|
||||||
|
row = cur.fetchone()
|
||||||
|
while row is not None and len(sessions_list) < 100:
|
||||||
|
sessions_list.append(row)
|
||||||
|
row = cur.fetchone()
|
||||||
|
|
||||||
|
return {
|
||||||
|
'total': total,
|
||||||
|
'sessions': helper.list_to_camel_case(sessions_list)
|
||||||
|
}
|
||||||
11
api/chalicelib/core/errors/modules/__init__.py
Normal file
11
api/chalicelib/core/errors/modules/__init__.py
Normal file
|
|
@ -0,0 +1,11 @@
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from decouple import config
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
from . import helper as errors_helper
|
||||||
|
|
||||||
|
if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
|
||||||
|
import chalicelib.core.sessions.sessions_ch as sessions
|
||||||
|
else:
|
||||||
|
import chalicelib.core.sessions.sessions_pg as sessions
|
||||||
58
api/chalicelib/core/errors/modules/helper.py
Normal file
58
api/chalicelib/core/errors/modules/helper.py
Normal file
|
|
@ -0,0 +1,58 @@
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
import schemas
|
||||||
|
from chalicelib.core.sourcemaps import sourcemaps
|
||||||
|
|
||||||
|
|
||||||
|
def __get_basic_constraints(platform: Optional[schemas.PlatformType] = None, time_constraint: bool = True,
|
||||||
|
startTime_arg_name: str = "startDate", endTime_arg_name: str = "endDate",
|
||||||
|
chart: bool = False, step_size_name: str = "step_size",
|
||||||
|
project_key: Optional[str] = "project_id"):
|
||||||
|
if project_key is None:
|
||||||
|
ch_sub_query = []
|
||||||
|
else:
|
||||||
|
ch_sub_query = [f"{project_key} =%(project_id)s"]
|
||||||
|
if time_constraint:
|
||||||
|
ch_sub_query += [f"timestamp >= %({startTime_arg_name})s",
|
||||||
|
f"timestamp < %({endTime_arg_name})s"]
|
||||||
|
if chart:
|
||||||
|
ch_sub_query += [f"timestamp >= generated_timestamp",
|
||||||
|
f"timestamp < generated_timestamp + %({step_size_name})s"]
|
||||||
|
if platform == schemas.PlatformType.MOBILE:
|
||||||
|
ch_sub_query.append("user_device_type = 'mobile'")
|
||||||
|
elif platform == schemas.PlatformType.DESKTOP:
|
||||||
|
ch_sub_query.append("user_device_type = 'desktop'")
|
||||||
|
return ch_sub_query
|
||||||
|
|
||||||
|
|
||||||
|
def __get_basic_constraints_ch(platform=None, time_constraint=True, startTime_arg_name="startDate",
|
||||||
|
endTime_arg_name="endDate", type_condition=True, project_key="project_id",
|
||||||
|
table_name=None):
|
||||||
|
ch_sub_query = [f"{project_key} =toUInt16(%(project_id)s)"]
|
||||||
|
if table_name is not None:
|
||||||
|
table_name = table_name + "."
|
||||||
|
else:
|
||||||
|
table_name = ""
|
||||||
|
if type_condition:
|
||||||
|
ch_sub_query.append(f"{table_name}`$event_name`='ERROR'")
|
||||||
|
if time_constraint:
|
||||||
|
ch_sub_query += [f"{table_name}datetime >= toDateTime(%({startTime_arg_name})s/1000)",
|
||||||
|
f"{table_name}datetime < toDateTime(%({endTime_arg_name})s/1000)"]
|
||||||
|
if platform == schemas.PlatformType.MOBILE:
|
||||||
|
ch_sub_query.append("user_device_type = 'mobile'")
|
||||||
|
elif platform == schemas.PlatformType.DESKTOP:
|
||||||
|
ch_sub_query.append("user_device_type = 'desktop'")
|
||||||
|
return ch_sub_query
|
||||||
|
|
||||||
|
|
||||||
|
def format_first_stack_frame(error):
|
||||||
|
error["stack"] = sourcemaps.format_payload(error.pop("payload"), truncate_to_first=True)
|
||||||
|
for s in error["stack"]:
|
||||||
|
for c in s.get("context", []):
|
||||||
|
for sci, sc in enumerate(c):
|
||||||
|
if isinstance(sc, str) and len(sc) > 1000:
|
||||||
|
c[sci] = sc[:1000]
|
||||||
|
# convert bytes to string:
|
||||||
|
if isinstance(s["filename"], bytes):
|
||||||
|
s["filename"] = s["filename"].decode("utf-8")
|
||||||
|
return error
|
||||||
|
|
@ -1,48 +0,0 @@
|
||||||
from chalicelib.utils import pg_client
|
|
||||||
|
|
||||||
|
|
||||||
def add_favorite_error(project_id, user_id, error_id):
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
cur.execute(
|
|
||||||
cur.mogrify(f"""INSERT INTO public.user_favorite_errors(user_id, error_id)
|
|
||||||
VALUES (%(userId)s,%(error_id)s);""",
|
|
||||||
{"userId": user_id, "error_id": error_id})
|
|
||||||
)
|
|
||||||
return {"errorId": error_id, "favorite": True}
|
|
||||||
|
|
||||||
|
|
||||||
def remove_favorite_error(project_id, user_id, error_id):
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
cur.execute(
|
|
||||||
cur.mogrify(f"""DELETE FROM public.user_favorite_errors
|
|
||||||
WHERE
|
|
||||||
user_id = %(userId)s
|
|
||||||
AND error_id = %(error_id)s;""",
|
|
||||||
{"userId": user_id, "error_id": error_id})
|
|
||||||
)
|
|
||||||
return {"errorId": error_id, "favorite": False}
|
|
||||||
|
|
||||||
|
|
||||||
def favorite_error(project_id, user_id, error_id):
|
|
||||||
exists, favorite = error_exists_and_favorite(user_id=user_id, error_id=error_id)
|
|
||||||
if not exists:
|
|
||||||
return {"errors": ["cannot bookmark non-rehydrated errors"]}
|
|
||||||
if favorite:
|
|
||||||
return remove_favorite_error(project_id=project_id, user_id=user_id, error_id=error_id)
|
|
||||||
return add_favorite_error(project_id=project_id, user_id=user_id, error_id=error_id)
|
|
||||||
|
|
||||||
|
|
||||||
def error_exists_and_favorite(user_id, error_id):
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
cur.execute(
|
|
||||||
cur.mogrify(
|
|
||||||
"""SELECT errors.error_id AS exists, ufe.error_id AS favorite
|
|
||||||
FROM public.errors
|
|
||||||
LEFT JOIN (SELECT error_id FROM public.user_favorite_errors WHERE user_id = %(userId)s) AS ufe USING (error_id)
|
|
||||||
WHERE error_id = %(error_id)s;""",
|
|
||||||
{"userId": user_id, "error_id": error_id})
|
|
||||||
)
|
|
||||||
r = cur.fetchone()
|
|
||||||
if r is None:
|
|
||||||
return False, False
|
|
||||||
return True, r.get("favorite") is not None
|
|
||||||
|
|
@ -1,37 +0,0 @@
|
||||||
from chalicelib.utils import pg_client
|
|
||||||
|
|
||||||
|
|
||||||
def add_viewed_error(project_id, user_id, error_id):
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
cur.execute(
|
|
||||||
cur.mogrify("""INSERT INTO public.user_viewed_errors(user_id, error_id)
|
|
||||||
VALUES (%(userId)s,%(error_id)s);""",
|
|
||||||
{"userId": user_id, "error_id": error_id})
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def viewed_error_exists(user_id, error_id):
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
query = cur.mogrify(
|
|
||||||
"""SELECT
|
|
||||||
errors.error_id AS hydrated,
|
|
||||||
COALESCE((SELECT TRUE
|
|
||||||
FROM public.user_viewed_errors AS ve
|
|
||||||
WHERE ve.error_id = %(error_id)s
|
|
||||||
AND ve.user_id = %(userId)s LIMIT 1), FALSE) AS viewed
|
|
||||||
FROM public.errors
|
|
||||||
WHERE error_id = %(error_id)s""",
|
|
||||||
{"userId": user_id, "error_id": error_id})
|
|
||||||
cur.execute(
|
|
||||||
query=query
|
|
||||||
)
|
|
||||||
r = cur.fetchone()
|
|
||||||
if r:
|
|
||||||
return r.get("viewed")
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def viewed_error(project_id, user_id, error_id):
|
|
||||||
if viewed_error_exists(user_id=user_id, error_id=error_id):
|
|
||||||
return None
|
|
||||||
return add_viewed_error(project_id=project_id, user_id=user_id, error_id=error_id)
|
|
||||||
|
|
@ -1,9 +1,10 @@
|
||||||
|
from functools import cache
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core import autocomplete
|
|
||||||
from chalicelib.core import issues
|
from chalicelib.core import issues
|
||||||
from chalicelib.core import sessions_metas
|
from chalicelib.core.autocomplete import autocomplete
|
||||||
|
from chalicelib.core.sessions import sessions_metas
|
||||||
from chalicelib.utils import pg_client, helper
|
from chalicelib.utils import pg_client, helper
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
from chalicelib.utils.event_filter_definition import SupportedFilter, Event
|
from chalicelib.utils.event_filter_definition import SupportedFilter, Event
|
||||||
|
|
@ -137,52 +138,57 @@ class EventType:
|
||||||
column=None) # column=None because errors are searched by name or message
|
column=None) # column=None because errors are searched by name or message
|
||||||
|
|
||||||
|
|
||||||
SUPPORTED_TYPES = {
|
@cache
|
||||||
EventType.CLICK.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK),
|
def supported_types():
|
||||||
query=autocomplete.__generic_query(typename=EventType.CLICK.ui_type)),
|
return {
|
||||||
EventType.INPUT.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT),
|
EventType.CLICK.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK),
|
||||||
query=autocomplete.__generic_query(typename=EventType.INPUT.ui_type)),
|
query=autocomplete.__generic_query(typename=EventType.CLICK.ui_type)),
|
||||||
EventType.LOCATION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.LOCATION),
|
EventType.INPUT.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT),
|
||||||
query=autocomplete.__generic_query(
|
query=autocomplete.__generic_query(typename=EventType.INPUT.ui_type)),
|
||||||
typename=EventType.LOCATION.ui_type)),
|
EventType.LOCATION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.LOCATION),
|
||||||
EventType.CUSTOM.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CUSTOM),
|
query=autocomplete.__generic_query(
|
||||||
query=autocomplete.__generic_query(typename=EventType.CUSTOM.ui_type)),
|
typename=EventType.LOCATION.ui_type)),
|
||||||
EventType.REQUEST.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.REQUEST),
|
EventType.CUSTOM.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CUSTOM),
|
||||||
query=autocomplete.__generic_query(
|
query=autocomplete.__generic_query(
|
||||||
typename=EventType.REQUEST.ui_type)),
|
typename=EventType.CUSTOM.ui_type)),
|
||||||
EventType.GRAPHQL.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.GRAPHQL),
|
EventType.REQUEST.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.REQUEST),
|
||||||
query=autocomplete.__generic_query(
|
|
||||||
typename=EventType.GRAPHQL.ui_type)),
|
|
||||||
EventType.STATEACTION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.STATEACTION),
|
|
||||||
query=autocomplete.__generic_query(
|
query=autocomplete.__generic_query(
|
||||||
typename=EventType.STATEACTION.ui_type)),
|
typename=EventType.REQUEST.ui_type)),
|
||||||
EventType.TAG.ui_type: SupportedFilter(get=_search_tags, query=None),
|
EventType.GRAPHQL.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.GRAPHQL),
|
||||||
EventType.ERROR.ui_type: SupportedFilter(get=autocomplete.__search_errors,
|
|
||||||
query=None),
|
|
||||||
EventType.METADATA.ui_type: SupportedFilter(get=autocomplete.__search_metadata,
|
|
||||||
query=None),
|
|
||||||
# MOBILE
|
|
||||||
EventType.CLICK_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK_MOBILE),
|
|
||||||
query=autocomplete.__generic_query(
|
|
||||||
typename=EventType.CLICK_MOBILE.ui_type)),
|
|
||||||
EventType.SWIPE_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.SWIPE_MOBILE),
|
|
||||||
query=autocomplete.__generic_query(
|
|
||||||
typename=EventType.SWIPE_MOBILE.ui_type)),
|
|
||||||
EventType.INPUT_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT_MOBILE),
|
|
||||||
query=autocomplete.__generic_query(
|
|
||||||
typename=EventType.INPUT_MOBILE.ui_type)),
|
|
||||||
EventType.VIEW_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.VIEW_MOBILE),
|
|
||||||
query=autocomplete.__generic_query(
|
query=autocomplete.__generic_query(
|
||||||
typename=EventType.VIEW_MOBILE.ui_type)),
|
typename=EventType.GRAPHQL.ui_type)),
|
||||||
EventType.CUSTOM_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CUSTOM_MOBILE),
|
EventType.STATEACTION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.STATEACTION),
|
||||||
query=autocomplete.__generic_query(
|
query=autocomplete.__generic_query(
|
||||||
typename=EventType.CUSTOM_MOBILE.ui_type)),
|
typename=EventType.STATEACTION.ui_type)),
|
||||||
EventType.REQUEST_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.REQUEST_MOBILE),
|
EventType.TAG.ui_type: SupportedFilter(get=_search_tags, query=None),
|
||||||
query=autocomplete.__generic_query(
|
EventType.ERROR.ui_type: SupportedFilter(get=autocomplete.__search_errors,
|
||||||
typename=EventType.REQUEST_MOBILE.ui_type)),
|
query=None),
|
||||||
EventType.CRASH_MOBILE.ui_type: SupportedFilter(get=autocomplete.__search_errors_mobile,
|
EventType.METADATA.ui_type: SupportedFilter(get=autocomplete.__search_metadata,
|
||||||
query=None),
|
query=None),
|
||||||
}
|
# MOBILE
|
||||||
|
EventType.CLICK_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK_MOBILE),
|
||||||
|
query=autocomplete.__generic_query(
|
||||||
|
typename=EventType.CLICK_MOBILE.ui_type)),
|
||||||
|
EventType.SWIPE_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.SWIPE_MOBILE),
|
||||||
|
query=autocomplete.__generic_query(
|
||||||
|
typename=EventType.SWIPE_MOBILE.ui_type)),
|
||||||
|
EventType.INPUT_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT_MOBILE),
|
||||||
|
query=autocomplete.__generic_query(
|
||||||
|
typename=EventType.INPUT_MOBILE.ui_type)),
|
||||||
|
EventType.VIEW_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.VIEW_MOBILE),
|
||||||
|
query=autocomplete.__generic_query(
|
||||||
|
typename=EventType.VIEW_MOBILE.ui_type)),
|
||||||
|
EventType.CUSTOM_MOBILE.ui_type: SupportedFilter(
|
||||||
|
get=autocomplete.__generic_autocomplete(EventType.CUSTOM_MOBILE),
|
||||||
|
query=autocomplete.__generic_query(
|
||||||
|
typename=EventType.CUSTOM_MOBILE.ui_type)),
|
||||||
|
EventType.REQUEST_MOBILE.ui_type: SupportedFilter(
|
||||||
|
get=autocomplete.__generic_autocomplete(EventType.REQUEST_MOBILE),
|
||||||
|
query=autocomplete.__generic_query(
|
||||||
|
typename=EventType.REQUEST_MOBILE.ui_type)),
|
||||||
|
EventType.CRASH_MOBILE.ui_type: SupportedFilter(get=autocomplete.__search_errors_mobile,
|
||||||
|
query=None),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def get_errors_by_session_id(session_id, project_id):
|
def get_errors_by_session_id(session_id, project_id):
|
||||||
|
|
@ -202,20 +208,17 @@ def search(text, event_type, project_id, source, key):
|
||||||
if not event_type:
|
if not event_type:
|
||||||
return {"data": autocomplete.__get_autocomplete_table(text, project_id)}
|
return {"data": autocomplete.__get_autocomplete_table(text, project_id)}
|
||||||
|
|
||||||
if event_type in SUPPORTED_TYPES.keys():
|
if event_type in supported_types().keys():
|
||||||
rows = SUPPORTED_TYPES[event_type].get(project_id=project_id, value=text, key=key, source=source)
|
rows = supported_types()[event_type].get(project_id=project_id, value=text, key=key, source=source)
|
||||||
# for IOS events autocomplete
|
elif event_type + "_MOBILE" in supported_types().keys():
|
||||||
# if event_type + "_IOS" in SUPPORTED_TYPES.keys():
|
rows = supported_types()[event_type + "_MOBILE"].get(project_id=project_id, value=text, key=key, source=source)
|
||||||
# rows += SUPPORTED_TYPES[event_type + "_IOS"].get(project_id=project_id, value=text, key=key,source=source)
|
elif event_type in sessions_metas.supported_types().keys():
|
||||||
elif event_type + "_MOBILE" in SUPPORTED_TYPES.keys():
|
|
||||||
rows = SUPPORTED_TYPES[event_type + "_MOBILE"].get(project_id=project_id, value=text, key=key, source=source)
|
|
||||||
elif event_type in sessions_metas.SUPPORTED_TYPES.keys():
|
|
||||||
return sessions_metas.search(text, event_type, project_id)
|
return sessions_metas.search(text, event_type, project_id)
|
||||||
elif event_type.endswith("_IOS") \
|
elif event_type.endswith("_IOS") \
|
||||||
and event_type[:-len("_IOS")] in sessions_metas.SUPPORTED_TYPES.keys():
|
and event_type[:-len("_IOS")] in sessions_metas.supported_types().keys():
|
||||||
return sessions_metas.search(text, event_type, project_id)
|
return sessions_metas.search(text, event_type, project_id)
|
||||||
elif event_type.endswith("_MOBILE") \
|
elif event_type.endswith("_MOBILE") \
|
||||||
and event_type[:-len("_MOBILE")] in sessions_metas.SUPPORTED_TYPES.keys():
|
and event_type[:-len("_MOBILE")] in sessions_metas.supported_types().keys():
|
||||||
return sessions_metas.search(text, event_type, project_id)
|
return sessions_metas.search(text, event_type, project_id)
|
||||||
else:
|
else:
|
||||||
return {"errors": ["unsupported event"]}
|
return {"errors": ["unsupported event"]}
|
||||||
|
|
|
||||||
|
|
@ -27,7 +27,6 @@ HEALTH_ENDPOINTS = {
|
||||||
"http": app_connection_string("http-openreplay", 8888, "metrics"),
|
"http": app_connection_string("http-openreplay", 8888, "metrics"),
|
||||||
"ingress-nginx": app_connection_string("ingress-nginx-openreplay", 80, "healthz"),
|
"ingress-nginx": app_connection_string("ingress-nginx-openreplay", 80, "healthz"),
|
||||||
"integrations": app_connection_string("integrations-openreplay", 8888, "metrics"),
|
"integrations": app_connection_string("integrations-openreplay", 8888, "metrics"),
|
||||||
"peers": app_connection_string("peers-openreplay", 8888, "health"),
|
|
||||||
"sink": app_connection_string("sink-openreplay", 8888, "metrics"),
|
"sink": app_connection_string("sink-openreplay", 8888, "metrics"),
|
||||||
"sourcemapreader": app_connection_string(
|
"sourcemapreader": app_connection_string(
|
||||||
"sourcemapreader-openreplay", 8888, "health"
|
"sourcemapreader-openreplay", 8888, "health"
|
||||||
|
|
@ -39,9 +38,7 @@ HEALTH_ENDPOINTS = {
|
||||||
def __check_database_pg(*_):
|
def __check_database_pg(*_):
|
||||||
fail_response = {
|
fail_response = {
|
||||||
"health": False,
|
"health": False,
|
||||||
"details": {
|
"details": {"errors": ["Postgres health-check failed"]},
|
||||||
"errors": ["Postgres health-check failed"]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
try:
|
try:
|
||||||
|
|
@ -63,29 +60,26 @@ def __check_database_pg(*_):
|
||||||
"details": {
|
"details": {
|
||||||
# "version": server_version["server_version"],
|
# "version": server_version["server_version"],
|
||||||
# "schema": schema_version["version"]
|
# "schema": schema_version["version"]
|
||||||
}
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def __always_healthy(*_):
|
def __always_healthy(*_):
|
||||||
return {
|
return {"health": True, "details": {}}
|
||||||
"health": True,
|
|
||||||
"details": {}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def __check_be_service(service_name):
|
def __check_be_service(service_name):
|
||||||
def fn(*_):
|
def fn(*_):
|
||||||
fail_response = {
|
fail_response = {
|
||||||
"health": False,
|
"health": False,
|
||||||
"details": {
|
"details": {"errors": ["server health-check failed"]},
|
||||||
"errors": ["server health-check failed"]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
try:
|
try:
|
||||||
results = requests.get(HEALTH_ENDPOINTS.get(service_name), timeout=2)
|
results = requests.get(HEALTH_ENDPOINTS.get(service_name), timeout=2)
|
||||||
if results.status_code != 200:
|
if results.status_code != 200:
|
||||||
logger.error(f"!! issue with the {service_name}-health code:{results.status_code}")
|
logger.error(
|
||||||
|
f"!! issue with the {service_name}-health code:{results.status_code}"
|
||||||
|
)
|
||||||
logger.error(results.text)
|
logger.error(results.text)
|
||||||
# fail_response["details"]["errors"].append(results.text)
|
# fail_response["details"]["errors"].append(results.text)
|
||||||
return fail_response
|
return fail_response
|
||||||
|
|
@ -103,10 +97,7 @@ def __check_be_service(service_name):
|
||||||
logger.error("couldn't get response")
|
logger.error("couldn't get response")
|
||||||
# fail_response["details"]["errors"].append(str(e))
|
# fail_response["details"]["errors"].append(str(e))
|
||||||
return fail_response
|
return fail_response
|
||||||
return {
|
return {"health": True, "details": {}}
|
||||||
"health": True,
|
|
||||||
"details": {}
|
|
||||||
}
|
|
||||||
|
|
||||||
return fn
|
return fn
|
||||||
|
|
||||||
|
|
@ -114,7 +105,7 @@ def __check_be_service(service_name):
|
||||||
def __check_redis(*_):
|
def __check_redis(*_):
|
||||||
fail_response = {
|
fail_response = {
|
||||||
"health": False,
|
"health": False,
|
||||||
"details": {"errors": ["server health-check failed"]}
|
"details": {"errors": ["server health-check failed"]},
|
||||||
}
|
}
|
||||||
if config("REDIS_STRING", default=None) is None:
|
if config("REDIS_STRING", default=None) is None:
|
||||||
# fail_response["details"]["errors"].append("REDIS_STRING not defined in env-vars")
|
# fail_response["details"]["errors"].append("REDIS_STRING not defined in env-vars")
|
||||||
|
|
@ -133,16 +124,14 @@ def __check_redis(*_):
|
||||||
"health": True,
|
"health": True,
|
||||||
"details": {
|
"details": {
|
||||||
# "version": r.execute_command('INFO')['redis_version']
|
# "version": r.execute_command('INFO')['redis_version']
|
||||||
}
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def __check_SSL(*_):
|
def __check_SSL(*_):
|
||||||
fail_response = {
|
fail_response = {
|
||||||
"health": False,
|
"health": False,
|
||||||
"details": {
|
"details": {"errors": ["SSL Certificate health-check failed"]},
|
||||||
"errors": ["SSL Certificate health-check failed"]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
try:
|
try:
|
||||||
requests.get(config("SITE_URL"), verify=True, allow_redirects=True)
|
requests.get(config("SITE_URL"), verify=True, allow_redirects=True)
|
||||||
|
|
@ -150,36 +139,28 @@ def __check_SSL(*_):
|
||||||
logger.error("!! health failed: SSL Certificate")
|
logger.error("!! health failed: SSL Certificate")
|
||||||
logger.exception(e)
|
logger.exception(e)
|
||||||
return fail_response
|
return fail_response
|
||||||
return {
|
return {"health": True, "details": {}}
|
||||||
"health": True,
|
|
||||||
"details": {}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def __get_sessions_stats(*_):
|
def __get_sessions_stats(*_):
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
constraints = ["projects.deleted_at IS NULL"]
|
constraints = ["projects.deleted_at IS NULL"]
|
||||||
query = cur.mogrify(f"""SELECT COALESCE(SUM(sessions_count),0) AS s_c,
|
query = cur.mogrify(
|
||||||
|
f"""SELECT COALESCE(SUM(sessions_count),0) AS s_c,
|
||||||
COALESCE(SUM(events_count),0) AS e_c
|
COALESCE(SUM(events_count),0) AS e_c
|
||||||
FROM public.projects_stats
|
FROM public.projects_stats
|
||||||
INNER JOIN public.projects USING(project_id)
|
INNER JOIN public.projects USING(project_id)
|
||||||
WHERE {" AND ".join(constraints)};""")
|
WHERE {" AND ".join(constraints)};"""
|
||||||
|
)
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
row = cur.fetchone()
|
row = cur.fetchone()
|
||||||
return {
|
return {"numberOfSessionsCaptured": row["s_c"], "numberOfEventCaptured": row["e_c"]}
|
||||||
"numberOfSessionsCaptured": row["s_c"],
|
|
||||||
"numberOfEventCaptured": row["e_c"]
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def get_health(tenant_id=None):
|
def get_health(tenant_id=None):
|
||||||
health_map = {
|
health_map = {
|
||||||
"databases": {
|
"databases": {"postgres": __check_database_pg},
|
||||||
"postgres": __check_database_pg
|
"ingestionPipeline": {"redis": __check_redis},
|
||||||
},
|
|
||||||
"ingestionPipeline": {
|
|
||||||
"redis": __check_redis
|
|
||||||
},
|
|
||||||
"backendServices": {
|
"backendServices": {
|
||||||
"alerts": __check_be_service("alerts"),
|
"alerts": __check_be_service("alerts"),
|
||||||
"assets": __check_be_service("assets"),
|
"assets": __check_be_service("assets"),
|
||||||
|
|
@ -192,13 +173,12 @@ def get_health(tenant_id=None):
|
||||||
"http": __check_be_service("http"),
|
"http": __check_be_service("http"),
|
||||||
"ingress-nginx": __always_healthy,
|
"ingress-nginx": __always_healthy,
|
||||||
"integrations": __check_be_service("integrations"),
|
"integrations": __check_be_service("integrations"),
|
||||||
"peers": __check_be_service("peers"),
|
|
||||||
"sink": __check_be_service("sink"),
|
"sink": __check_be_service("sink"),
|
||||||
"sourcemapreader": __check_be_service("sourcemapreader"),
|
"sourcemapreader": __check_be_service("sourcemapreader"),
|
||||||
"storage": __check_be_service("storage")
|
"storage": __check_be_service("storage"),
|
||||||
},
|
},
|
||||||
"details": __get_sessions_stats,
|
"details": __get_sessions_stats,
|
||||||
"ssl": __check_SSL
|
"ssl": __check_SSL,
|
||||||
}
|
}
|
||||||
return __process_health(health_map=health_map)
|
return __process_health(health_map=health_map)
|
||||||
|
|
||||||
|
|
@ -210,10 +190,16 @@ def __process_health(health_map):
|
||||||
response.pop(parent_key)
|
response.pop(parent_key)
|
||||||
elif isinstance(health_map[parent_key], dict):
|
elif isinstance(health_map[parent_key], dict):
|
||||||
for element_key in health_map[parent_key]:
|
for element_key in health_map[parent_key]:
|
||||||
if config(f"SKIP_H_{parent_key.upper()}_{element_key.upper()}", cast=bool, default=False):
|
if config(
|
||||||
|
f"SKIP_H_{parent_key.upper()}_{element_key.upper()}",
|
||||||
|
cast=bool,
|
||||||
|
default=False,
|
||||||
|
):
|
||||||
response[parent_key].pop(element_key)
|
response[parent_key].pop(element_key)
|
||||||
else:
|
else:
|
||||||
response[parent_key][element_key] = health_map[parent_key][element_key]()
|
response[parent_key][element_key] = health_map[parent_key][
|
||||||
|
element_key
|
||||||
|
]()
|
||||||
else:
|
else:
|
||||||
response[parent_key] = health_map[parent_key]()
|
response[parent_key] = health_map[parent_key]()
|
||||||
return response
|
return response
|
||||||
|
|
@ -221,7 +207,8 @@ def __process_health(health_map):
|
||||||
|
|
||||||
def cron():
|
def cron():
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
query = cur.mogrify("""SELECT projects.project_id,
|
query = cur.mogrify(
|
||||||
|
"""SELECT projects.project_id,
|
||||||
projects.created_at,
|
projects.created_at,
|
||||||
projects.sessions_last_check_at,
|
projects.sessions_last_check_at,
|
||||||
projects.first_recorded_session_at,
|
projects.first_recorded_session_at,
|
||||||
|
|
@ -229,7 +216,8 @@ def cron():
|
||||||
FROM public.projects
|
FROM public.projects
|
||||||
LEFT JOIN public.projects_stats USING (project_id)
|
LEFT JOIN public.projects_stats USING (project_id)
|
||||||
WHERE projects.deleted_at IS NULL
|
WHERE projects.deleted_at IS NULL
|
||||||
ORDER BY project_id;""")
|
ORDER BY project_id;"""
|
||||||
|
)
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
rows = cur.fetchall()
|
rows = cur.fetchall()
|
||||||
for r in rows:
|
for r in rows:
|
||||||
|
|
@ -250,20 +238,24 @@ def cron():
|
||||||
count_start_from = r["last_update_at"]
|
count_start_from = r["last_update_at"]
|
||||||
|
|
||||||
count_start_from = TimeUTC.datetime_to_timestamp(count_start_from)
|
count_start_from = TimeUTC.datetime_to_timestamp(count_start_from)
|
||||||
params = {"project_id": r["project_id"],
|
params = {
|
||||||
"start_ts": count_start_from,
|
"project_id": r["project_id"],
|
||||||
"end_ts": TimeUTC.now(),
|
"start_ts": count_start_from,
|
||||||
"sessions_count": 0,
|
"end_ts": TimeUTC.now(),
|
||||||
"events_count": 0}
|
"sessions_count": 0,
|
||||||
|
"events_count": 0,
|
||||||
|
}
|
||||||
|
|
||||||
query = cur.mogrify("""SELECT COUNT(1) AS sessions_count,
|
query = cur.mogrify(
|
||||||
|
"""SELECT COUNT(1) AS sessions_count,
|
||||||
COALESCE(SUM(events_count),0) AS events_count
|
COALESCE(SUM(events_count),0) AS events_count
|
||||||
FROM public.sessions
|
FROM public.sessions
|
||||||
WHERE project_id=%(project_id)s
|
WHERE project_id=%(project_id)s
|
||||||
AND start_ts>=%(start_ts)s
|
AND start_ts>=%(start_ts)s
|
||||||
AND start_ts<=%(end_ts)s
|
AND start_ts<=%(end_ts)s
|
||||||
AND duration IS NOT NULL;""",
|
AND duration IS NOT NULL;""",
|
||||||
params)
|
params,
|
||||||
|
)
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
row = cur.fetchone()
|
row = cur.fetchone()
|
||||||
if row is not None:
|
if row is not None:
|
||||||
|
|
@ -271,56 +263,68 @@ def cron():
|
||||||
params["events_count"] = row["events_count"]
|
params["events_count"] = row["events_count"]
|
||||||
|
|
||||||
if insert:
|
if insert:
|
||||||
query = cur.mogrify("""INSERT INTO public.projects_stats(project_id, sessions_count, events_count, last_update_at)
|
query = cur.mogrify(
|
||||||
|
"""INSERT INTO public.projects_stats(project_id, sessions_count, events_count, last_update_at)
|
||||||
VALUES (%(project_id)s, %(sessions_count)s, %(events_count)s, (now() AT TIME ZONE 'utc'::text));""",
|
VALUES (%(project_id)s, %(sessions_count)s, %(events_count)s, (now() AT TIME ZONE 'utc'::text));""",
|
||||||
params)
|
params,
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
query = cur.mogrify("""UPDATE public.projects_stats
|
query = cur.mogrify(
|
||||||
|
"""UPDATE public.projects_stats
|
||||||
SET sessions_count=sessions_count+%(sessions_count)s,
|
SET sessions_count=sessions_count+%(sessions_count)s,
|
||||||
events_count=events_count+%(events_count)s,
|
events_count=events_count+%(events_count)s,
|
||||||
last_update_at=(now() AT TIME ZONE 'utc'::text)
|
last_update_at=(now() AT TIME ZONE 'utc'::text)
|
||||||
WHERE project_id=%(project_id)s;""",
|
WHERE project_id=%(project_id)s;""",
|
||||||
params)
|
params,
|
||||||
|
)
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
|
|
||||||
|
|
||||||
# this cron is used to correct the sessions&events count every week
|
# this cron is used to correct the sessions&events count every week
|
||||||
def weekly_cron():
|
def weekly_cron():
|
||||||
with pg_client.PostgresClient(long_query=True) as cur:
|
with pg_client.PostgresClient(long_query=True) as cur:
|
||||||
query = cur.mogrify("""SELECT project_id,
|
query = cur.mogrify(
|
||||||
|
"""SELECT project_id,
|
||||||
projects_stats.last_update_at
|
projects_stats.last_update_at
|
||||||
FROM public.projects
|
FROM public.projects
|
||||||
LEFT JOIN public.projects_stats USING (project_id)
|
LEFT JOIN public.projects_stats USING (project_id)
|
||||||
WHERE projects.deleted_at IS NULL
|
WHERE projects.deleted_at IS NULL
|
||||||
ORDER BY project_id;""")
|
ORDER BY project_id;"""
|
||||||
|
)
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
rows = cur.fetchall()
|
rows = cur.fetchall()
|
||||||
for r in rows:
|
for r in rows:
|
||||||
if r["last_update_at"] is None:
|
if r["last_update_at"] is None:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
params = {"project_id": r["project_id"],
|
params = {
|
||||||
"end_ts": TimeUTC.now(),
|
"project_id": r["project_id"],
|
||||||
"sessions_count": 0,
|
"end_ts": TimeUTC.now(),
|
||||||
"events_count": 0}
|
"sessions_count": 0,
|
||||||
|
"events_count": 0,
|
||||||
|
}
|
||||||
|
|
||||||
query = cur.mogrify("""SELECT COUNT(1) AS sessions_count,
|
query = cur.mogrify(
|
||||||
|
"""SELECT COUNT(1) AS sessions_count,
|
||||||
COALESCE(SUM(events_count),0) AS events_count
|
COALESCE(SUM(events_count),0) AS events_count
|
||||||
FROM public.sessions
|
FROM public.sessions
|
||||||
WHERE project_id=%(project_id)s
|
WHERE project_id=%(project_id)s
|
||||||
AND start_ts<=%(end_ts)s
|
AND start_ts<=%(end_ts)s
|
||||||
AND duration IS NOT NULL;""",
|
AND duration IS NOT NULL;""",
|
||||||
params)
|
params,
|
||||||
|
)
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
row = cur.fetchone()
|
row = cur.fetchone()
|
||||||
if row is not None:
|
if row is not None:
|
||||||
params["sessions_count"] = row["sessions_count"]
|
params["sessions_count"] = row["sessions_count"]
|
||||||
params["events_count"] = row["events_count"]
|
params["events_count"] = row["events_count"]
|
||||||
|
|
||||||
query = cur.mogrify("""UPDATE public.projects_stats
|
query = cur.mogrify(
|
||||||
|
"""UPDATE public.projects_stats
|
||||||
SET sessions_count=%(sessions_count)s,
|
SET sessions_count=%(sessions_count)s,
|
||||||
events_count=%(events_count)s,
|
events_count=%(events_count)s,
|
||||||
last_update_at=(now() AT TIME ZONE 'utc'::text)
|
last_update_at=(now() AT TIME ZONE 'utc'::text)
|
||||||
WHERE project_id=%(project_id)s;""",
|
WHERE project_id=%(project_id)s;""",
|
||||||
params)
|
params,
|
||||||
|
)
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
|
|
|
||||||
|
|
@ -1,12 +1,12 @@
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core import integration_base
|
from chalicelib.core.issue_tracking import base
|
||||||
from chalicelib.core.integration_github_issue import GithubIntegrationIssue
|
from chalicelib.core.issue_tracking.github_issue import GithubIntegrationIssue
|
||||||
from chalicelib.utils import pg_client, helper
|
from chalicelib.utils import pg_client, helper
|
||||||
|
|
||||||
PROVIDER = schemas.IntegrationType.GITHUB
|
PROVIDER = schemas.IntegrationType.GITHUB
|
||||||
|
|
||||||
|
|
||||||
class GitHubIntegration(integration_base.BaseIntegration):
|
class GitHubIntegration(base.BaseIntegration):
|
||||||
|
|
||||||
def __init__(self, tenant_id, user_id):
|
def __init__(self, tenant_id, user_id):
|
||||||
self.__tenant_id = tenant_id
|
self.__tenant_id = tenant_id
|
||||||
|
|
@ -1,12 +1,12 @@
|
||||||
from chalicelib.core.integration_base_issue import BaseIntegrationIssue
|
from chalicelib.core.issue_tracking.base_issue import BaseIntegrationIssue
|
||||||
from chalicelib.utils import github_client_v3
|
from chalicelib.utils import github_client_v3
|
||||||
from chalicelib.utils.github_client_v3 import github_formatters as formatter
|
from chalicelib.utils.github_client_v3 import github_formatters as formatter
|
||||||
|
|
||||||
|
|
||||||
class GithubIntegrationIssue(BaseIntegrationIssue):
|
class GithubIntegrationIssue(BaseIntegrationIssue):
|
||||||
def __init__(self, integration_token):
|
def __init__(self, token):
|
||||||
self.__client = github_client_v3.githubV3Request(integration_token)
|
self.__client = github_client_v3.githubV3Request(token)
|
||||||
super(GithubIntegrationIssue, self).__init__("GITHUB", integration_token)
|
super(GithubIntegrationIssue, self).__init__("GITHUB", token)
|
||||||
|
|
||||||
def get_current_user(self):
|
def get_current_user(self):
|
||||||
return formatter.user(self.__client.get("/user"))
|
return formatter.user(self.__client.get("/user"))
|
||||||
|
|
@ -28,9 +28,9 @@ class GithubIntegrationIssue(BaseIntegrationIssue):
|
||||||
|
|
||||||
return meta
|
return meta
|
||||||
|
|
||||||
def create_new_assignment(self, integration_project_id, title, description, assignee,
|
def create_new_assignment(self, project_id, title, description, assignee,
|
||||||
issue_type):
|
issue_type):
|
||||||
repoId = integration_project_id
|
repoId = project_id
|
||||||
assignees = [assignee]
|
assignees = [assignee]
|
||||||
labels = [str(issue_type)]
|
labels = [str(issue_type)]
|
||||||
|
|
||||||
|
|
@ -59,11 +59,11 @@ class GithubIntegrationIssue(BaseIntegrationIssue):
|
||||||
def get_by_ids(self, saved_issues):
|
def get_by_ids(self, saved_issues):
|
||||||
results = []
|
results = []
|
||||||
for i in saved_issues:
|
for i in saved_issues:
|
||||||
results.append(self.get(integration_project_id=i["integrationProjectId"], assignment_id=i["id"]))
|
results.append(self.get(project_id=i["integrationProjectId"], assignment_id=i["id"]))
|
||||||
return {"issues": results}
|
return {"issues": results}
|
||||||
|
|
||||||
def get(self, integration_project_id, assignment_id):
|
def get(self, project_id, assignment_id):
|
||||||
repoId = integration_project_id
|
repoId = project_id
|
||||||
issueNumber = assignment_id
|
issueNumber = assignment_id
|
||||||
issue = self.__client.get(f"/repositories/{repoId}/issues/{issueNumber}")
|
issue = self.__client.get(f"/repositories/{repoId}/issues/{issueNumber}")
|
||||||
issue = formatter.issue(issue)
|
issue = formatter.issue(issue)
|
||||||
|
|
@ -72,17 +72,17 @@ class GithubIntegrationIssue(BaseIntegrationIssue):
|
||||||
self.__client.get(f"/repositories/{repoId}/issues/{issueNumber}/comments")]
|
self.__client.get(f"/repositories/{repoId}/issues/{issueNumber}/comments")]
|
||||||
return issue
|
return issue
|
||||||
|
|
||||||
def comment(self, integration_project_id, assignment_id, comment):
|
def comment(self, project_id, assignment_id, comment):
|
||||||
repoId = integration_project_id
|
repoId = project_id
|
||||||
issueNumber = assignment_id
|
issueNumber = assignment_id
|
||||||
commentCreated = self.__client.post(f"/repositories/{repoId}/issues/{issueNumber}/comments",
|
commentCreated = self.__client.post(f"/repositories/{repoId}/issues/{issueNumber}/comments",
|
||||||
body={"body": comment})
|
body={"body": comment})
|
||||||
return formatter.comment(commentCreated)
|
return formatter.comment(commentCreated)
|
||||||
|
|
||||||
def get_metas(self, integration_project_id):
|
def get_metas(self, project_id):
|
||||||
current_user = self.get_current_user()
|
current_user = self.get_current_user()
|
||||||
try:
|
try:
|
||||||
users = self.__client.get(f"/repositories/{integration_project_id}/collaborators")
|
users = self.__client.get(f"/repositories/{project_id}/collaborators")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
users = []
|
users = []
|
||||||
users = [formatter.user(u) for u in users]
|
users = [formatter.user(u) for u in users]
|
||||||
|
|
@ -92,7 +92,7 @@ class GithubIntegrationIssue(BaseIntegrationIssue):
|
||||||
return {"provider": self.provider.lower(),
|
return {"provider": self.provider.lower(),
|
||||||
'users': users,
|
'users': users,
|
||||||
'issueTypes': [formatter.label(l) for l in
|
'issueTypes': [formatter.label(l) for l in
|
||||||
self.__client.get(f"/repositories/{integration_project_id}/labels")]
|
self.__client.get(f"/repositories/{project_id}/labels")]
|
||||||
}
|
}
|
||||||
|
|
||||||
def get_projects(self):
|
def get_projects(self):
|
||||||
|
|
@ -1,4 +1,5 @@
|
||||||
import schemas
|
import schemas
|
||||||
|
from chalicelib.core.modules import TENANT_CONDITION
|
||||||
from chalicelib.utils import pg_client
|
from chalicelib.utils import pg_client
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -51,10 +52,10 @@ def get_global_integrations_status(tenant_id, user_id, project_id):
|
||||||
AND provider='elasticsearch')) AS {schemas.IntegrationType.ELASTICSEARCH.value},
|
AND provider='elasticsearch')) AS {schemas.IntegrationType.ELASTICSEARCH.value},
|
||||||
EXISTS((SELECT 1
|
EXISTS((SELECT 1
|
||||||
FROM public.webhooks
|
FROM public.webhooks
|
||||||
WHERE type='slack' AND deleted_at ISNULL)) AS {schemas.IntegrationType.SLACK.value},
|
WHERE type='slack' AND deleted_at ISNULL AND {TENANT_CONDITION})) AS {schemas.IntegrationType.SLACK.value},
|
||||||
EXISTS((SELECT 1
|
EXISTS((SELECT 1
|
||||||
FROM public.webhooks
|
FROM public.webhooks
|
||||||
WHERE type='msteams' AND deleted_at ISNULL)) AS {schemas.IntegrationType.MS_TEAMS.value},
|
WHERE type='msteams' AND deleted_at ISNULL AND {TENANT_CONDITION})) AS {schemas.IntegrationType.MS_TEAMS.value},
|
||||||
EXISTS((SELECT 1
|
EXISTS((SELECT 1
|
||||||
FROM public.integrations
|
FROM public.integrations
|
||||||
WHERE project_id=%(project_id)s AND provider='dynatrace')) AS {schemas.IntegrationType.DYNATRACE.value};""",
|
WHERE project_id=%(project_id)s AND provider='dynatrace')) AS {schemas.IntegrationType.DYNATRACE.value};""",
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
from chalicelib.core import integration_github, integration_jira_cloud
|
from chalicelib.core.issue_tracking import github, jira_cloud
|
||||||
from chalicelib.utils import pg_client
|
from chalicelib.utils import pg_client
|
||||||
|
|
||||||
SUPPORTED_TOOLS = [integration_github.PROVIDER, integration_jira_cloud.PROVIDER]
|
SUPPORTED_TOOLS = [github.PROVIDER, jira_cloud.PROVIDER]
|
||||||
|
|
||||||
|
|
||||||
def get_available_integrations(user_id):
|
def get_available_integrations(user_id):
|
||||||
|
|
@ -23,7 +23,7 @@ def get_available_integrations(user_id):
|
||||||
|
|
||||||
def __get_default_integration(user_id):
|
def __get_default_integration(user_id):
|
||||||
current_integrations = get_available_integrations(user_id)
|
current_integrations = get_available_integrations(user_id)
|
||||||
return integration_github.PROVIDER if current_integrations["github"] else integration_jira_cloud.PROVIDER if \
|
return github.PROVIDER if current_integrations["github"] else jira_cloud.PROVIDER if \
|
||||||
current_integrations["jira"] else None
|
current_integrations["jira"] else None
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -35,11 +35,11 @@ def get_integration(tenant_id, user_id, tool=None, for_delete=False):
|
||||||
tool = tool.upper()
|
tool = tool.upper()
|
||||||
if tool not in SUPPORTED_TOOLS:
|
if tool not in SUPPORTED_TOOLS:
|
||||||
return {"errors": [f"issue tracking tool not supported yet, available: {SUPPORTED_TOOLS}"]}, None
|
return {"errors": [f"issue tracking tool not supported yet, available: {SUPPORTED_TOOLS}"]}, None
|
||||||
if tool == integration_jira_cloud.PROVIDER:
|
if tool == jira_cloud.PROVIDER:
|
||||||
integration = integration_jira_cloud.JIRAIntegration(tenant_id=tenant_id, user_id=user_id)
|
integration = jira_cloud.JIRAIntegration(tenant_id=tenant_id, user_id=user_id)
|
||||||
if not for_delete and integration.integration is not None and not integration.integration.get("valid", True):
|
if not for_delete and integration.integration is not None and not integration.integration.get("valid", True):
|
||||||
return {"errors": ["JIRA: connexion issue/unauthorized"]}, integration
|
return {"errors": ["JIRA: connexion issue/unauthorized"]}, integration
|
||||||
return None, integration
|
return None, integration
|
||||||
elif tool == integration_github.PROVIDER:
|
elif tool == github.PROVIDER:
|
||||||
return None, integration_github.GitHubIntegration(tenant_id=tenant_id, user_id=user_id)
|
return None, github.GitHubIntegration(tenant_id=tenant_id, user_id=user_id)
|
||||||
return {"errors": ["lost integration"]}, None
|
return {"errors": ["lost integration"]}, None
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core import integration_base
|
from chalicelib.core.issue_tracking import base
|
||||||
from chalicelib.core.integration_jira_cloud_issue import JIRACloudIntegrationIssue
|
from chalicelib.core.issue_tracking.jira_cloud_issue import JIRACloudIntegrationIssue
|
||||||
from chalicelib.utils import pg_client, helper
|
from chalicelib.utils import pg_client, helper
|
||||||
|
|
||||||
PROVIDER = schemas.IntegrationType.JIRA
|
PROVIDER = schemas.IntegrationType.JIRA
|
||||||
|
|
@ -10,7 +10,7 @@ def obfuscate_string(string):
|
||||||
return "*" * (len(string) - 4) + string[-4:]
|
return "*" * (len(string) - 4) + string[-4:]
|
||||||
|
|
||||||
|
|
||||||
class JIRAIntegration(integration_base.BaseIntegration):
|
class JIRAIntegration(base.BaseIntegration):
|
||||||
def __init__(self, tenant_id, user_id):
|
def __init__(self, tenant_id, user_id):
|
||||||
self.__tenant_id = tenant_id
|
self.__tenant_id = tenant_id
|
||||||
# TODO: enable super-constructor when OAuth is done
|
# TODO: enable super-constructor when OAuth is done
|
||||||
|
|
@ -50,8 +50,8 @@ class JIRAIntegration(integration_base.BaseIntegration):
|
||||||
cur.execute(
|
cur.execute(
|
||||||
cur.mogrify(
|
cur.mogrify(
|
||||||
"""SELECT username, token, url
|
"""SELECT username, token, url
|
||||||
FROM public.jira_cloud
|
FROM public.jira_cloud
|
||||||
WHERE user_id=%(user_id)s;""",
|
WHERE user_id = %(user_id)s;""",
|
||||||
{"user_id": self._user_id})
|
{"user_id": self._user_id})
|
||||||
)
|
)
|
||||||
data = helper.dict_to_camel_case(cur.fetchone())
|
data = helper.dict_to_camel_case(cur.fetchone())
|
||||||
|
|
@ -95,10 +95,9 @@ class JIRAIntegration(integration_base.BaseIntegration):
|
||||||
def add(self, username, token, url, obfuscate=False):
|
def add(self, username, token, url, obfuscate=False):
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
cur.execute(
|
cur.execute(
|
||||||
cur.mogrify("""\
|
cur.mogrify(""" \
|
||||||
INSERT INTO public.jira_cloud(username, token, user_id,url)
|
INSERT INTO public.jira_cloud(username, token, user_id, url)
|
||||||
VALUES (%(username)s, %(token)s, %(user_id)s,%(url)s)
|
VALUES (%(username)s, %(token)s, %(user_id)s, %(url)s) RETURNING username, token, url;""",
|
||||||
RETURNING username, token, url;""",
|
|
||||||
{"user_id": self._user_id, "username": username,
|
{"user_id": self._user_id, "username": username,
|
||||||
"token": token, "url": url})
|
"token": token, "url": url})
|
||||||
)
|
)
|
||||||
|
|
@ -112,9 +111,10 @@ class JIRAIntegration(integration_base.BaseIntegration):
|
||||||
def delete(self):
|
def delete(self):
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
cur.execute(
|
cur.execute(
|
||||||
cur.mogrify("""\
|
cur.mogrify(""" \
|
||||||
DELETE FROM public.jira_cloud
|
DELETE
|
||||||
WHERE user_id=%(user_id)s;""",
|
FROM public.jira_cloud
|
||||||
|
WHERE user_id = %(user_id)s;""",
|
||||||
{"user_id": self._user_id})
|
{"user_id": self._user_id})
|
||||||
)
|
)
|
||||||
return {"state": "success"}
|
return {"state": "success"}
|
||||||
|
|
@ -125,7 +125,7 @@ class JIRAIntegration(integration_base.BaseIntegration):
|
||||||
changes={
|
changes={
|
||||||
"username": data.username,
|
"username": data.username,
|
||||||
"token": data.token if len(data.token) > 0 and data.token.find("***") == -1 \
|
"token": data.token if len(data.token) > 0 and data.token.find("***") == -1 \
|
||||||
else self.integration.token,
|
else self.integration["token"],
|
||||||
"url": str(data.url)
|
"url": str(data.url)
|
||||||
},
|
},
|
||||||
obfuscate=True
|
obfuscate=True
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
from chalicelib.utils import jira_client
|
from chalicelib.utils import jira_client
|
||||||
from chalicelib.core.integration_base_issue import BaseIntegrationIssue
|
from chalicelib.core.issue_tracking.base_issue import BaseIntegrationIssue
|
||||||
|
|
||||||
|
|
||||||
class JIRACloudIntegrationIssue(BaseIntegrationIssue):
|
class JIRACloudIntegrationIssue(BaseIntegrationIssue):
|
||||||
|
|
@ -9,8 +9,8 @@ class JIRACloudIntegrationIssue(BaseIntegrationIssue):
|
||||||
self._client = jira_client.JiraManager(self.url, self.username, token, None)
|
self._client = jira_client.JiraManager(self.url, self.username, token, None)
|
||||||
super(JIRACloudIntegrationIssue, self).__init__("JIRA", token)
|
super(JIRACloudIntegrationIssue, self).__init__("JIRA", token)
|
||||||
|
|
||||||
def create_new_assignment(self, integration_project_id, title, description, assignee, issue_type):
|
def create_new_assignment(self, project_id, title, description, assignee, issue_type):
|
||||||
self._client.set_jira_project_id(integration_project_id)
|
self._client.set_jira_project_id(project_id)
|
||||||
data = {
|
data = {
|
||||||
'summary': title,
|
'summary': title,
|
||||||
'description': description,
|
'description': description,
|
||||||
|
|
@ -28,26 +28,26 @@ class JIRACloudIntegrationIssue(BaseIntegrationIssue):
|
||||||
projects_map[i["integrationProjectId"]].append(i["id"])
|
projects_map[i["integrationProjectId"]].append(i["id"])
|
||||||
|
|
||||||
results = []
|
results = []
|
||||||
for integration_project_id in projects_map:
|
for project_id in projects_map:
|
||||||
self._client.set_jira_project_id(integration_project_id)
|
self._client.set_jira_project_id(project_id)
|
||||||
jql = 'labels = OpenReplay'
|
jql = 'labels = OpenReplay'
|
||||||
if len(projects_map[integration_project_id]) > 0:
|
if len(projects_map[project_id]) > 0:
|
||||||
jql += f" AND ID IN ({','.join(projects_map[integration_project_id])})"
|
jql += f" AND ID IN ({','.join(projects_map[project_id])})"
|
||||||
issues = self._client.get_issues(jql, offset=0)
|
issues = self._client.get_issues(jql, offset=0)
|
||||||
results += issues
|
results += issues
|
||||||
return {"issues": results}
|
return {"issues": results}
|
||||||
|
|
||||||
def get(self, integration_project_id, assignment_id):
|
def get(self, project_id, assignment_id):
|
||||||
self._client.set_jira_project_id(integration_project_id)
|
self._client.set_jira_project_id(project_id)
|
||||||
return self._client.get_issue_v3(assignment_id)
|
return self._client.get_issue_v3(assignment_id)
|
||||||
|
|
||||||
def comment(self, integration_project_id, assignment_id, comment):
|
def comment(self, project_id, assignment_id, comment):
|
||||||
self._client.set_jira_project_id(integration_project_id)
|
self._client.set_jira_project_id(project_id)
|
||||||
return self._client.add_comment_v3(assignment_id, comment)
|
return self._client.add_comment_v3(assignment_id, comment)
|
||||||
|
|
||||||
def get_metas(self, integration_project_id):
|
def get_metas(self, project_id):
|
||||||
meta = {}
|
meta = {}
|
||||||
self._client.set_jira_project_id(integration_project_id)
|
self._client.set_jira_project_id(project_id)
|
||||||
meta['issueTypes'] = self._client.get_issue_types()
|
meta['issueTypes'] = self._client.get_issue_types()
|
||||||
meta['users'] = self._client.get_assignable_users()
|
meta['users'] = self._client.get_assignable_users()
|
||||||
return {"provider": self.provider.lower(), **meta}
|
return {"provider": self.provider.lower(), **meta}
|
||||||
|
|
@ -1,6 +1,10 @@
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from chalicelib.core.sessions import sessions_mobs, sessions_devtool
|
||||||
from chalicelib.utils import pg_client, helper
|
from chalicelib.utils import pg_client, helper
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
from chalicelib.core import sessions_mobs, sessions_devtool
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class Actions:
|
class Actions:
|
||||||
|
|
@ -150,23 +154,23 @@ def get_scheduled_jobs():
|
||||||
def execute_jobs():
|
def execute_jobs():
|
||||||
jobs = get_scheduled_jobs()
|
jobs = get_scheduled_jobs()
|
||||||
for job in jobs:
|
for job in jobs:
|
||||||
print(f"Executing jobId:{job['jobId']}")
|
logger.info(f"Executing jobId:{job['jobId']}")
|
||||||
try:
|
try:
|
||||||
if job["action"] == Actions.DELETE_USER_DATA:
|
if job["action"] == Actions.DELETE_USER_DATA:
|
||||||
session_ids = __get_session_ids_by_user_ids(project_id=job["projectId"],
|
session_ids = __get_session_ids_by_user_ids(project_id=job["projectId"],
|
||||||
user_ids=[job["referenceId"]])
|
user_ids=[job["referenceId"]])
|
||||||
if len(session_ids) > 0:
|
if len(session_ids) > 0:
|
||||||
print(f"Deleting {len(session_ids)} sessions")
|
logger.info(f"Deleting {len(session_ids)} sessions")
|
||||||
__delete_sessions_by_session_ids(session_ids=session_ids)
|
__delete_sessions_by_session_ids(session_ids=session_ids)
|
||||||
__delete_session_mobs_by_session_ids(session_ids=session_ids, project_id=job["projectId"])
|
__delete_session_mobs_by_session_ids(session_ids=session_ids, project_id=job["projectId"])
|
||||||
else:
|
else:
|
||||||
raise Exception(f"The action '{job['action']}' not supported.")
|
raise Exception(f"The action '{job['action']}' not supported.")
|
||||||
|
|
||||||
job["status"] = JobStatus.COMPLETED
|
job["status"] = JobStatus.COMPLETED
|
||||||
print(f"Job completed {job['jobId']}")
|
logger.info(f"Job completed {job['jobId']}")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
job["status"] = JobStatus.FAILED
|
job["status"] = JobStatus.FAILED
|
||||||
job["errors"] = str(e)
|
job["errors"] = str(e)
|
||||||
print(f"Job failed {job['jobId']}")
|
logger.error(f"Job failed {job['jobId']}")
|
||||||
|
|
||||||
update(job["jobId"], job)
|
update(job["jobId"], job)
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,5 @@
|
||||||
from chalicelib.core import log_tools
|
|
||||||
import requests
|
import requests
|
||||||
|
from chalicelib.core.log_tools import log_tools
|
||||||
from schemas import schemas
|
from schemas import schemas
|
||||||
|
|
||||||
IN_TY = "bugsnag"
|
IN_TY = "bugsnag"
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
import boto3
|
import boto3
|
||||||
from chalicelib.core import log_tools
|
from chalicelib.core.log_tools import log_tools
|
||||||
from schemas import schemas
|
from schemas import schemas
|
||||||
|
|
||||||
IN_TY = "cloudwatch"
|
IN_TY = "cloudwatch"
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
from chalicelib.core import log_tools
|
from chalicelib.core.log_tools import log_tools
|
||||||
from schemas import schemas
|
from schemas import schemas
|
||||||
|
|
||||||
IN_TY = "datadog"
|
IN_TY = "datadog"
|
||||||
|
|
@ -1,8 +1,7 @@
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
from chalicelib.core.log_tools import log_tools
|
||||||
from elasticsearch import Elasticsearch
|
from elasticsearch import Elasticsearch
|
||||||
|
|
||||||
from chalicelib.core import log_tools
|
|
||||||
from schemas import schemas
|
from schemas import schemas
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
@ -1,6 +1,8 @@
|
||||||
from chalicelib.utils import pg_client, helper
|
|
||||||
import json
|
import json
|
||||||
|
|
||||||
|
from chalicelib.core.modules import TENANT_CONDITION
|
||||||
|
from chalicelib.utils import pg_client, helper
|
||||||
|
|
||||||
EXCEPT = ["jira_server", "jira_cloud"]
|
EXCEPT = ["jira_server", "jira_cloud"]
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -94,11 +96,11 @@ def get_all_by_tenant(tenant_id, integration):
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
cur.execute(
|
cur.execute(
|
||||||
cur.mogrify(
|
cur.mogrify(
|
||||||
"""SELECT integrations.*
|
f"""SELECT integrations.*
|
||||||
FROM public.integrations INNER JOIN public.projects USING(project_id)
|
FROM public.integrations INNER JOIN public.projects USING(project_id)
|
||||||
WHERE provider = %(provider)s
|
WHERE provider = %(provider)s AND {TENANT_CONDITION}
|
||||||
AND projects.deleted_at ISNULL;""",
|
AND projects.deleted_at ISNULL;""",
|
||||||
{"provider": integration})
|
{"tenant_id": tenant_id, "provider": integration})
|
||||||
)
|
)
|
||||||
r = cur.fetchall()
|
r = cur.fetchall()
|
||||||
return helper.list_to_camel_case(r, flatten=True)
|
return helper.list_to_camel_case(r, flatten=True)
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
from chalicelib.core import log_tools
|
from chalicelib.core.log_tools import log_tools
|
||||||
from schemas import schemas
|
from schemas import schemas
|
||||||
|
|
||||||
IN_TY = "newrelic"
|
IN_TY = "newrelic"
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
from chalicelib.core import log_tools
|
from chalicelib.core.log_tools import log_tools
|
||||||
from schemas import schemas
|
from schemas import schemas
|
||||||
|
|
||||||
IN_TY = "rollbar"
|
IN_TY = "rollbar"
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
import requests
|
import requests
|
||||||
from chalicelib.core import log_tools
|
from chalicelib.core.log_tools import log_tools
|
||||||
from schemas import schemas
|
from schemas import schemas
|
||||||
|
|
||||||
IN_TY = "sentry"
|
IN_TY = "sentry"
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
from chalicelib.core import log_tools
|
from chalicelib.core.log_tools import log_tools
|
||||||
from schemas import schemas
|
from schemas import schemas
|
||||||
|
|
||||||
IN_TY = "stackdriver"
|
IN_TY = "stackdriver"
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
from chalicelib.core import log_tools
|
from chalicelib.core.log_tools import log_tools
|
||||||
from schemas import schemas
|
from schemas import schemas
|
||||||
|
|
||||||
IN_TY = "sumologic"
|
IN_TY = "sumologic"
|
||||||
|
|
@ -98,17 +98,23 @@ def __edit(project_id, col_index, colname, new_name):
|
||||||
if col_index not in list(old_metas.keys()):
|
if col_index not in list(old_metas.keys()):
|
||||||
return {"errors": ["custom field not found"]}
|
return {"errors": ["custom field not found"]}
|
||||||
|
|
||||||
with pg_client.PostgresClient() as cur:
|
if old_metas[col_index]["key"] != new_name:
|
||||||
if old_metas[col_index]["key"] != new_name:
|
with pg_client.PostgresClient() as cur:
|
||||||
query = cur.mogrify(f"""UPDATE public.projects
|
query = cur.mogrify(f"""UPDATE public.projects
|
||||||
SET {colname} = %(value)s
|
SET {colname} = %(value)s
|
||||||
WHERE project_id = %(project_id)s
|
WHERE project_id = %(project_id)s
|
||||||
AND deleted_at ISNULL
|
AND deleted_at ISNULL
|
||||||
RETURNING {colname};""",
|
RETURNING {colname},
|
||||||
|
(SELECT {colname} FROM projects WHERE project_id = %(project_id)s) AS old_{colname};""",
|
||||||
{"project_id": project_id, "value": new_name})
|
{"project_id": project_id, "value": new_name})
|
||||||
cur.execute(query=query)
|
cur.execute(query=query)
|
||||||
new_name = cur.fetchone()[colname]
|
row = cur.fetchone()
|
||||||
|
new_name = row[colname]
|
||||||
|
old_name = row['old_' + colname]
|
||||||
old_metas[col_index]["key"] = new_name
|
old_metas[col_index]["key"] = new_name
|
||||||
|
projects.rename_metadata_condition(project_id=project_id,
|
||||||
|
old_metadata_key=old_name,
|
||||||
|
new_metadata_key=new_name)
|
||||||
return {"data": old_metas[col_index]}
|
return {"data": old_metas[col_index]}
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -121,8 +127,8 @@ def edit(tenant_id, project_id, index: int, new_name: str):
|
||||||
def delete(tenant_id, project_id, index: int):
|
def delete(tenant_id, project_id, index: int):
|
||||||
index = int(index)
|
index = int(index)
|
||||||
old_segments = get(project_id)
|
old_segments = get(project_id)
|
||||||
old_segments = [k["index"] for k in old_segments]
|
old_indexes = [k["index"] for k in old_segments]
|
||||||
if index not in old_segments:
|
if index not in old_indexes:
|
||||||
return {"errors": ["custom field not found"]}
|
return {"errors": ["custom field not found"]}
|
||||||
|
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
|
|
@ -132,7 +138,8 @@ def delete(tenant_id, project_id, index: int):
|
||||||
WHERE project_id = %(project_id)s AND deleted_at ISNULL;""",
|
WHERE project_id = %(project_id)s AND deleted_at ISNULL;""",
|
||||||
{"project_id": project_id})
|
{"project_id": project_id})
|
||||||
cur.execute(query=query)
|
cur.execute(query=query)
|
||||||
|
projects.delete_metadata_condition(project_id=project_id,
|
||||||
|
metadata_key=old_segments[old_indexes.index(index)]["key"])
|
||||||
return {"data": get(project_id)}
|
return {"data": get(project_id)}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,624 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
import schemas
|
|
||||||
from chalicelib.core import metadata
|
|
||||||
from chalicelib.utils import helper
|
|
||||||
from chalicelib.utils import pg_client
|
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
|
||||||
from chalicelib.utils.metrics_helper import __get_step_size
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def __get_constraints(project_id, time_constraint=True, chart=False, duration=True, project=True,
|
|
||||||
project_identifier="project_id",
|
|
||||||
main_table="sessions", time_column="start_ts", data={}):
|
|
||||||
pg_sub_query = []
|
|
||||||
main_table = main_table + "." if main_table is not None and len(main_table) > 0 else ""
|
|
||||||
if project:
|
|
||||||
pg_sub_query.append(f"{main_table}{project_identifier} =%({project_identifier})s")
|
|
||||||
if duration:
|
|
||||||
pg_sub_query.append(f"{main_table}duration>0")
|
|
||||||
if time_constraint:
|
|
||||||
pg_sub_query.append(f"{main_table}{time_column} >= %(startTimestamp)s")
|
|
||||||
pg_sub_query.append(f"{main_table}{time_column} < %(endTimestamp)s")
|
|
||||||
if chart:
|
|
||||||
pg_sub_query.append(f"{main_table}{time_column} >= generated_timestamp")
|
|
||||||
pg_sub_query.append(f"{main_table}{time_column} < generated_timestamp + %(step_size)s")
|
|
||||||
return pg_sub_query + __get_meta_constraint(project_id=project_id, data=data)
|
|
||||||
|
|
||||||
|
|
||||||
def __merge_charts(list1, list2, time_key="timestamp"):
|
|
||||||
if len(list1) != len(list2):
|
|
||||||
raise Exception("cannot merge unequal lists")
|
|
||||||
result = []
|
|
||||||
for i in range(len(list1)):
|
|
||||||
timestamp = min(list1[i][time_key], list2[i][time_key])
|
|
||||||
result.append({**list1[i], **list2[i], time_key: timestamp})
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def __get_constraint_values(data):
|
|
||||||
params = {}
|
|
||||||
for i, f in enumerate(data.get("filters", [])):
|
|
||||||
params[f"{f['key']}_{i}"] = f["value"]
|
|
||||||
return params
|
|
||||||
|
|
||||||
|
|
||||||
def __get_meta_constraint(project_id, data):
|
|
||||||
if len(data.get("filters", [])) == 0:
|
|
||||||
return []
|
|
||||||
constraints = []
|
|
||||||
meta_keys = metadata.get(project_id=project_id)
|
|
||||||
meta_keys = {m["key"]: m["index"] for m in meta_keys}
|
|
||||||
|
|
||||||
for i, f in enumerate(data.get("filters", [])):
|
|
||||||
if f["key"] in meta_keys.keys():
|
|
||||||
key = f"sessions.metadata_{meta_keys[f['key']]})"
|
|
||||||
if f["value"] in ["*", ""]:
|
|
||||||
constraints.append(f"{key} IS NOT NULL")
|
|
||||||
else:
|
|
||||||
constraints.append(f"{key} = %({f['key']}_{i})s")
|
|
||||||
else:
|
|
||||||
filter_type = f["key"].upper()
|
|
||||||
filter_type = [filter_type, "USER" + filter_type, filter_type[4:]]
|
|
||||||
if any(item in [schemas.FilterType.USER_BROWSER] \
|
|
||||||
for item in filter_type):
|
|
||||||
constraints.append(f"sessions.user_browser = %({f['key']}_{i})s")
|
|
||||||
elif any(item in [schemas.FilterType.USER_OS, schemas.FilterType.USER_OS_MOBILE] \
|
|
||||||
for item in filter_type):
|
|
||||||
constraints.append(f"sessions.user_os = %({f['key']}_{i})s")
|
|
||||||
elif any(item in [schemas.FilterType.USER_DEVICE, schemas.FilterType.USER_DEVICE_MOBILE] \
|
|
||||||
for item in filter_type):
|
|
||||||
constraints.append(f"sessions.user_device = %({f['key']}_{i})s")
|
|
||||||
elif any(item in [schemas.FilterType.USER_COUNTRY, schemas.FilterType.USER_COUNTRY_MOBILE] \
|
|
||||||
for item in filter_type):
|
|
||||||
constraints.append(f"sessions.user_country = %({f['key']}_{i})s")
|
|
||||||
elif any(item in [schemas.FilterType.USER_ID, schemas.FilterType.USER_ID_MOBILE] \
|
|
||||||
for item in filter_type):
|
|
||||||
constraints.append(f"sessions.user_id = %({f['key']}_{i})s")
|
|
||||||
elif any(item in [schemas.FilterType.USER_ANONYMOUS_ID, schemas.FilterType.USER_ANONYMOUS_ID_MOBILE] \
|
|
||||||
for item in filter_type):
|
|
||||||
constraints.append(f"sessions.user_anonymous_id = %({f['key']}_{i})s")
|
|
||||||
elif any(item in [schemas.FilterType.REV_ID, schemas.FilterType.REV_ID_MOBILE] \
|
|
||||||
for item in filter_type):
|
|
||||||
constraints.append(f"sessions.rev_id = %({f['key']}_{i})s")
|
|
||||||
return constraints
|
|
||||||
|
|
||||||
|
|
||||||
def get_processed_sessions(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
|
||||||
endTimestamp=TimeUTC.now(),
|
|
||||||
density=7, **args):
|
|
||||||
step_size = __get_step_size(startTimestamp, endTimestamp, density, factor=1)
|
|
||||||
pg_sub_query = __get_constraints(project_id=project_id, data=args)
|
|
||||||
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=True,
|
|
||||||
chart=True, data=args)
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
pg_query = f"""SELECT generated_timestamp AS timestamp,
|
|
||||||
COALESCE(COUNT(sessions), 0) AS value
|
|
||||||
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
|
|
||||||
LEFT JOIN LATERAL ( SELECT 1
|
|
||||||
FROM public.sessions
|
|
||||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
|
||||||
) AS sessions ON (TRUE)
|
|
||||||
GROUP BY generated_timestamp
|
|
||||||
ORDER BY generated_timestamp;"""
|
|
||||||
params = {"step_size": step_size, "project_id": project_id, "startTimestamp": startTimestamp,
|
|
||||||
"endTimestamp": endTimestamp, **__get_constraint_values(args)}
|
|
||||||
cur.execute(cur.mogrify(pg_query, params))
|
|
||||||
rows = cur.fetchall()
|
|
||||||
results = {
|
|
||||||
"value": sum([r["value"] for r in rows]),
|
|
||||||
"chart": rows
|
|
||||||
}
|
|
||||||
|
|
||||||
diff = endTimestamp - startTimestamp
|
|
||||||
endTimestamp = startTimestamp
|
|
||||||
startTimestamp = endTimestamp - diff
|
|
||||||
|
|
||||||
pg_query = f"""SELECT COUNT(sessions.session_id) AS count
|
|
||||||
FROM public.sessions
|
|
||||||
WHERE {" AND ".join(pg_sub_query)};"""
|
|
||||||
params = {"project_id": project_id, "startTimestamp": startTimestamp, "endTimestamp": endTimestamp,
|
|
||||||
**__get_constraint_values(args)}
|
|
||||||
|
|
||||||
cur.execute(cur.mogrify(pg_query, params))
|
|
||||||
|
|
||||||
count = cur.fetchone()["count"]
|
|
||||||
|
|
||||||
results["progress"] = helper.__progress(old_val=count, new_val=results["value"])
|
|
||||||
results["unit"] = schemas.TemplatePredefinedUnits.COUNT
|
|
||||||
return results
|
|
||||||
|
|
||||||
|
|
||||||
def __get_neutral(rows, add_All_if_empty=True):
|
|
||||||
neutral = {l: 0 for l in [i for k in [list(v.keys()) for v in rows] for i in k]}
|
|
||||||
if add_All_if_empty and len(neutral.keys()) <= 1:
|
|
||||||
neutral = {"All": 0}
|
|
||||||
return neutral
|
|
||||||
|
|
||||||
|
|
||||||
def __merge_rows_with_neutral(rows, neutral):
|
|
||||||
for i in range(len(rows)):
|
|
||||||
rows[i] = {**neutral, **rows[i]}
|
|
||||||
return rows
|
|
||||||
|
|
||||||
|
|
||||||
def __get_domains_errors_4xx_and_5xx(status, project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
|
||||||
endTimestamp=TimeUTC.now(), density=6, **args):
|
|
||||||
step_size = __get_step_size(startTimestamp, endTimestamp, density, factor=1)
|
|
||||||
pg_sub_query_subset = __get_constraints(project_id=project_id, time_constraint=True, chart=False, data=args)
|
|
||||||
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=False, chart=True,
|
|
||||||
data=args, main_table="requests", time_column="timestamp", project=False,
|
|
||||||
duration=False)
|
|
||||||
pg_sub_query_subset.append("requests.status_code/100 = %(status_code)s")
|
|
||||||
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
pg_query = f"""WITH requests AS (SELECT host, timestamp
|
|
||||||
FROM events_common.requests INNER JOIN public.sessions USING (session_id)
|
|
||||||
WHERE {" AND ".join(pg_sub_query_subset)}
|
|
||||||
)
|
|
||||||
SELECT generated_timestamp AS timestamp,
|
|
||||||
COALESCE(JSONB_AGG(requests) FILTER ( WHERE requests IS NOT NULL ), '[]'::JSONB) AS keys
|
|
||||||
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
|
|
||||||
LEFT JOIN LATERAL ( SELECT requests.host, COUNT(*) AS count
|
|
||||||
FROM requests
|
|
||||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
|
||||||
GROUP BY host
|
|
||||||
ORDER BY count DESC
|
|
||||||
LIMIT 5
|
|
||||||
) AS requests ON (TRUE)
|
|
||||||
GROUP BY generated_timestamp
|
|
||||||
ORDER BY generated_timestamp;"""
|
|
||||||
params = {"project_id": project_id,
|
|
||||||
"startTimestamp": startTimestamp,
|
|
||||||
"endTimestamp": endTimestamp,
|
|
||||||
"step_size": step_size,
|
|
||||||
"status_code": status, **__get_constraint_values(args)}
|
|
||||||
cur.execute(cur.mogrify(pg_query, params))
|
|
||||||
rows = cur.fetchall()
|
|
||||||
rows = __nested_array_to_dict_array(rows, key="host")
|
|
||||||
neutral = __get_neutral(rows)
|
|
||||||
rows = __merge_rows_with_neutral(rows, neutral)
|
|
||||||
|
|
||||||
return rows
|
|
||||||
|
|
||||||
|
|
||||||
def get_domains_errors_4xx(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
|
||||||
endTimestamp=TimeUTC.now(), density=6, **args):
|
|
||||||
return __get_domains_errors_4xx_and_5xx(status=4, project_id=project_id, startTimestamp=startTimestamp,
|
|
||||||
endTimestamp=endTimestamp, density=density, **args)
|
|
||||||
|
|
||||||
|
|
||||||
def get_domains_errors_5xx(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
|
||||||
endTimestamp=TimeUTC.now(), density=6, **args):
|
|
||||||
return __get_domains_errors_4xx_and_5xx(status=5, project_id=project_id, startTimestamp=startTimestamp,
|
|
||||||
endTimestamp=endTimestamp, density=density, **args)
|
|
||||||
|
|
||||||
|
|
||||||
def __nested_array_to_dict_array(rows, key="url_host", value="count"):
|
|
||||||
for r in rows:
|
|
||||||
for i in range(len(r["keys"])):
|
|
||||||
r[r["keys"][i][key]] = r["keys"][i][value]
|
|
||||||
r.pop("keys")
|
|
||||||
return rows
|
|
||||||
|
|
||||||
|
|
||||||
def get_errors_per_domains(project_id, limit, page, startTimestamp=TimeUTC.now(delta_days=-1),
|
|
||||||
endTimestamp=TimeUTC.now(), **args):
|
|
||||||
pg_sub_query = __get_constraints(project_id=project_id, data=args)
|
|
||||||
pg_sub_query.append("requests.success = FALSE")
|
|
||||||
params = {"project_id": project_id,
|
|
||||||
"startTimestamp": startTimestamp,
|
|
||||||
"endTimestamp": endTimestamp,
|
|
||||||
"limit_s": (page - 1) * limit,
|
|
||||||
"limit_e": page * limit,
|
|
||||||
**__get_constraint_values(args)}
|
|
||||||
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
pg_query = f"""SELECT COALESCE(SUM(errors_count),0)::INT AS count,
|
|
||||||
COUNT(raw.domain) AS total,
|
|
||||||
jsonb_agg(raw) FILTER ( WHERE rn > %(limit_s)s
|
|
||||||
AND rn <= %(limit_e)s ) AS values
|
|
||||||
FROM (SELECT requests.host AS domain,
|
|
||||||
COUNT(requests.session_id) AS errors_count,
|
|
||||||
row_number() over (ORDER BY COUNT(requests.session_id) DESC ) AS rn
|
|
||||||
FROM events_common.requests
|
|
||||||
INNER JOIN sessions USING (session_id)
|
|
||||||
WHERE {" AND ".join(pg_sub_query)}
|
|
||||||
GROUP BY requests.host
|
|
||||||
ORDER BY errors_count DESC) AS raw;"""
|
|
||||||
pg_query = cur.mogrify(pg_query, params)
|
|
||||||
logger.debug("-----------")
|
|
||||||
logger.debug(pg_query)
|
|
||||||
logger.debug("-----------")
|
|
||||||
cur.execute(pg_query)
|
|
||||||
row = cur.fetchone()
|
|
||||||
if row:
|
|
||||||
row["values"] = row["values"] or []
|
|
||||||
for r in row["values"]:
|
|
||||||
r.pop("rn")
|
|
||||||
|
|
||||||
return helper.dict_to_camel_case(row)
|
|
||||||
|
|
||||||
|
|
||||||
def get_errors_per_type(project_id, startTimestamp=TimeUTC.now(delta_days=-1), endTimestamp=TimeUTC.now(),
|
|
||||||
platform=None, density=7, **args):
|
|
||||||
step_size = __get_step_size(startTimestamp, endTimestamp, density, factor=1)
|
|
||||||
|
|
||||||
pg_sub_query_subset = __get_constraints(project_id=project_id, data=args)
|
|
||||||
pg_sub_query_subset.append("requests.timestamp>=%(startTimestamp)s")
|
|
||||||
pg_sub_query_subset.append("requests.timestamp<%(endTimestamp)s")
|
|
||||||
pg_sub_query_subset.append("requests.status_code > 200")
|
|
||||||
|
|
||||||
pg_sub_query_subset_e = __get_constraints(project_id=project_id, data=args, duration=False, main_table="m_errors",
|
|
||||||
time_constraint=False)
|
|
||||||
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=False,
|
|
||||||
chart=True, data=args, main_table="", time_column="timestamp",
|
|
||||||
project=False, duration=False)
|
|
||||||
pg_sub_query_subset_e.append("timestamp>=%(startTimestamp)s")
|
|
||||||
pg_sub_query_subset_e.append("timestamp<%(endTimestamp)s")
|
|
||||||
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
pg_query = f"""WITH requests AS (SELECT status_code AS status, timestamp
|
|
||||||
FROM events_common.requests
|
|
||||||
INNER JOIN public.sessions USING (session_id)
|
|
||||||
WHERE {" AND ".join(pg_sub_query_subset)}
|
|
||||||
),
|
|
||||||
errors_integ AS (SELECT timestamp
|
|
||||||
FROM events.errors
|
|
||||||
INNER JOIN public.errors AS m_errors USING (error_id)
|
|
||||||
WHERE {" AND ".join(pg_sub_query_subset_e)}
|
|
||||||
AND source != 'js_exception'
|
|
||||||
),
|
|
||||||
errors_js AS (SELECT timestamp
|
|
||||||
FROM events.errors
|
|
||||||
INNER JOIN public.errors AS m_errors USING (error_id)
|
|
||||||
WHERE {" AND ".join(pg_sub_query_subset_e)}
|
|
||||||
AND source = 'js_exception'
|
|
||||||
)
|
|
||||||
SELECT generated_timestamp AS timestamp,
|
|
||||||
COALESCE(SUM(CASE WHEN status / 100 = 4 THEN 1 ELSE 0 END), 0) AS _4xx,
|
|
||||||
COALESCE(SUM(CASE WHEN status / 100 = 5 THEN 1 ELSE 0 END), 0) AS _5xx,
|
|
||||||
COALESCE((SELECT COUNT(*)
|
|
||||||
FROM errors_js
|
|
||||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
|
||||||
), 0) AS js,
|
|
||||||
COALESCE((SELECT COUNT(*)
|
|
||||||
FROM errors_integ
|
|
||||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
|
||||||
), 0) AS integrations
|
|
||||||
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
|
|
||||||
LEFT JOIN LATERAL (SELECT status
|
|
||||||
FROM requests
|
|
||||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
|
||||||
) AS errors_partition ON (TRUE)
|
|
||||||
GROUP BY timestamp
|
|
||||||
ORDER BY timestamp;"""
|
|
||||||
params = {"step_size": step_size,
|
|
||||||
"project_id": project_id,
|
|
||||||
"startTimestamp": startTimestamp,
|
|
||||||
"endTimestamp": endTimestamp, **__get_constraint_values(args)}
|
|
||||||
cur.execute(cur.mogrify(pg_query, params))
|
|
||||||
rows = cur.fetchall()
|
|
||||||
rows = helper.list_to_camel_case(rows)
|
|
||||||
return rows
|
|
||||||
|
|
||||||
|
|
||||||
def get_impacted_sessions_by_js_errors(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
|
||||||
endTimestamp=TimeUTC.now(), density=7, **args):
|
|
||||||
step_size = __get_step_size(startTimestamp, endTimestamp, density, factor=1)
|
|
||||||
pg_sub_query = __get_constraints(project_id=project_id, data=args)
|
|
||||||
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=True,
|
|
||||||
chart=True, data=args)
|
|
||||||
pg_sub_query.append("m_errors.source = 'js_exception'")
|
|
||||||
pg_sub_query.append("m_errors.project_id = %(project_id)s")
|
|
||||||
pg_sub_query.append("errors.timestamp >= %(startTimestamp)s")
|
|
||||||
pg_sub_query.append("errors.timestamp < %(endTimestamp)s")
|
|
||||||
pg_sub_query_chart.append("m_errors.source = 'js_exception'")
|
|
||||||
pg_sub_query_chart.append("m_errors.project_id = %(project_id)s")
|
|
||||||
pg_sub_query_chart.append("errors.timestamp >= generated_timestamp")
|
|
||||||
pg_sub_query_chart.append("errors.timestamp < generated_timestamp+ %(step_size)s")
|
|
||||||
|
|
||||||
pg_sub_query_subset = __get_constraints(project_id=project_id, data=args, duration=False, main_table="m_errors",
|
|
||||||
time_constraint=False)
|
|
||||||
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=False,
|
|
||||||
chart=True, data=args, main_table="errors", time_column="timestamp",
|
|
||||||
project=False, duration=False)
|
|
||||||
pg_sub_query_subset.append("m_errors.source = 'js_exception'")
|
|
||||||
pg_sub_query_subset.append("errors.timestamp>=%(startTimestamp)s")
|
|
||||||
pg_sub_query_subset.append("errors.timestamp<%(endTimestamp)s")
|
|
||||||
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
pg_query = f"""WITH errors AS (SELECT DISTINCT ON (session_id,timestamp) session_id, timestamp
|
|
||||||
FROM events.errors
|
|
||||||
INNER JOIN public.errors AS m_errors USING (error_id)
|
|
||||||
WHERE {" AND ".join(pg_sub_query_subset)}
|
|
||||||
)
|
|
||||||
SELECT *
|
|
||||||
FROM (SELECT COUNT(DISTINCT session_id) AS sessions_count
|
|
||||||
FROM errors) AS counts
|
|
||||||
LEFT JOIN
|
|
||||||
(SELECT jsonb_agg(chart) AS chart
|
|
||||||
FROM (SELECT generated_timestamp AS timestamp,
|
|
||||||
COALESCE(COUNT(session_id), 0) AS sessions_count
|
|
||||||
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
|
|
||||||
LEFT JOIN LATERAL ( SELECT DISTINCT session_id
|
|
||||||
FROM errors
|
|
||||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
|
||||||
) AS sessions ON (TRUE)
|
|
||||||
GROUP BY generated_timestamp
|
|
||||||
ORDER BY generated_timestamp) AS chart) AS chart ON (TRUE);"""
|
|
||||||
cur.execute(cur.mogrify(pg_query, {"step_size": step_size,
|
|
||||||
"project_id": project_id,
|
|
||||||
"startTimestamp": startTimestamp,
|
|
||||||
"endTimestamp": endTimestamp,
|
|
||||||
**__get_constraint_values(args)}))
|
|
||||||
row_sessions = cur.fetchone()
|
|
||||||
pg_query = f"""WITH errors AS ( SELECT DISTINCT ON(errors.error_id,timestamp) errors.error_id,timestamp
|
|
||||||
FROM events.errors
|
|
||||||
INNER JOIN public.errors AS m_errors USING (error_id)
|
|
||||||
WHERE {" AND ".join(pg_sub_query_subset)}
|
|
||||||
)
|
|
||||||
SELECT *
|
|
||||||
FROM (SELECT COUNT(DISTINCT errors.error_id) AS errors_count
|
|
||||||
FROM errors) AS counts
|
|
||||||
LEFT JOIN
|
|
||||||
(SELECT jsonb_agg(chart) AS chart
|
|
||||||
FROM (SELECT generated_timestamp AS timestamp,
|
|
||||||
COALESCE(COUNT(error_id), 0) AS errors_count
|
|
||||||
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
|
|
||||||
LEFT JOIN LATERAL ( SELECT DISTINCT errors.error_id
|
|
||||||
FROM errors
|
|
||||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
|
||||||
) AS errors ON (TRUE)
|
|
||||||
GROUP BY generated_timestamp
|
|
||||||
ORDER BY generated_timestamp) AS chart) AS chart ON (TRUE);"""
|
|
||||||
cur.execute(cur.mogrify(pg_query, {"step_size": step_size,
|
|
||||||
"project_id": project_id,
|
|
||||||
"startTimestamp": startTimestamp,
|
|
||||||
"endTimestamp": endTimestamp,
|
|
||||||
**__get_constraint_values(args)}))
|
|
||||||
row_errors = cur.fetchone()
|
|
||||||
chart = __merge_charts(row_sessions.pop("chart"), row_errors.pop("chart"))
|
|
||||||
row_sessions = helper.dict_to_camel_case(row_sessions)
|
|
||||||
row_errors = helper.dict_to_camel_case(row_errors)
|
|
||||||
return {**row_sessions, **row_errors, "chart": chart}
|
|
||||||
|
|
||||||
|
|
||||||
def get_resources_by_party(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
|
||||||
endTimestamp=TimeUTC.now(), density=7, **args):
|
|
||||||
step_size = __get_step_size(startTimestamp, endTimestamp, density, factor=1)
|
|
||||||
pg_sub_query_subset = __get_constraints(project_id=project_id, time_constraint=True,
|
|
||||||
chart=False, data=args)
|
|
||||||
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=False, project=False,
|
|
||||||
chart=True, data=args, main_table="requests", time_column="timestamp",
|
|
||||||
duration=False)
|
|
||||||
pg_sub_query_subset.append("requests.timestamp >= %(startTimestamp)s")
|
|
||||||
pg_sub_query_subset.append("requests.timestamp < %(endTimestamp)s")
|
|
||||||
# pg_sub_query_subset.append("resources.type IN ('fetch', 'script')")
|
|
||||||
pg_sub_query_subset.append("requests.success = FALSE")
|
|
||||||
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
pg_query = f"""WITH requests AS (
|
|
||||||
SELECT requests.host, timestamp
|
|
||||||
FROM events_common.requests
|
|
||||||
INNER JOIN public.sessions USING (session_id)
|
|
||||||
WHERE {" AND ".join(pg_sub_query_subset)}
|
|
||||||
)
|
|
||||||
SELECT generated_timestamp AS timestamp,
|
|
||||||
SUM(CASE WHEN first.host = sub_requests.host THEN 1 ELSE 0 END) AS first_party,
|
|
||||||
SUM(CASE WHEN first.host != sub_requests.host THEN 1 ELSE 0 END) AS third_party
|
|
||||||
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
|
|
||||||
LEFT JOIN (
|
|
||||||
SELECT requests.host,
|
|
||||||
COUNT(requests.session_id) AS count
|
|
||||||
FROM events_common.requests
|
|
||||||
INNER JOIN public.sessions USING (session_id)
|
|
||||||
WHERE sessions.project_id = '1'
|
|
||||||
AND sessions.start_ts > (EXTRACT(EPOCH FROM now() - INTERVAL '31 days') * 1000)::BIGINT
|
|
||||||
AND sessions.start_ts < (EXTRACT(EPOCH FROM now()) * 1000)::BIGINT
|
|
||||||
AND requests.timestamp > (EXTRACT(EPOCH FROM now() - INTERVAL '31 days') * 1000)::BIGINT
|
|
||||||
AND requests.timestamp < (EXTRACT(EPOCH FROM now()) * 1000)::BIGINT
|
|
||||||
AND sessions.duration>0
|
|
||||||
GROUP BY requests.host
|
|
||||||
ORDER BY count DESC
|
|
||||||
LIMIT 1
|
|
||||||
) AS first ON (TRUE)
|
|
||||||
LEFT JOIN LATERAL (
|
|
||||||
SELECT requests.host
|
|
||||||
FROM requests
|
|
||||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
|
||||||
) AS sub_requests ON (TRUE)
|
|
||||||
GROUP BY generated_timestamp
|
|
||||||
ORDER BY generated_timestamp;"""
|
|
||||||
cur.execute(cur.mogrify(pg_query, {"step_size": step_size,
|
|
||||||
"project_id": project_id,
|
|
||||||
"startTimestamp": startTimestamp,
|
|
||||||
"endTimestamp": endTimestamp, **__get_constraint_values(args)}))
|
|
||||||
|
|
||||||
rows = cur.fetchall()
|
|
||||||
return rows
|
|
||||||
|
|
||||||
|
|
||||||
def get_user_activity_avg_visited_pages(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
|
||||||
endTimestamp=TimeUTC.now(), **args):
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
row = __get_user_activity_avg_visited_pages(cur, project_id, startTimestamp, endTimestamp, **args)
|
|
||||||
results = helper.dict_to_camel_case(row)
|
|
||||||
results["chart"] = __get_user_activity_avg_visited_pages_chart(cur, project_id, startTimestamp,
|
|
||||||
endTimestamp, **args)
|
|
||||||
|
|
||||||
diff = endTimestamp - startTimestamp
|
|
||||||
endTimestamp = startTimestamp
|
|
||||||
startTimestamp = endTimestamp - diff
|
|
||||||
row = __get_user_activity_avg_visited_pages(cur, project_id, startTimestamp, endTimestamp, **args)
|
|
||||||
|
|
||||||
previous = helper.dict_to_camel_case(row)
|
|
||||||
results["progress"] = helper.__progress(old_val=previous["value"], new_val=results["value"])
|
|
||||||
results["unit"] = schemas.TemplatePredefinedUnits.COUNT
|
|
||||||
return results
|
|
||||||
|
|
||||||
|
|
||||||
def __get_user_activity_avg_visited_pages(cur, project_id, startTimestamp, endTimestamp, **args):
|
|
||||||
pg_sub_query = __get_constraints(project_id=project_id, data=args)
|
|
||||||
pg_sub_query.append("sessions.pages_count>0")
|
|
||||||
pg_query = f"""SELECT COALESCE(CEIL(AVG(sessions.pages_count)),0) AS value
|
|
||||||
FROM public.sessions
|
|
||||||
WHERE {" AND ".join(pg_sub_query)};"""
|
|
||||||
params = {"project_id": project_id, "startTimestamp": startTimestamp, "endTimestamp": endTimestamp,
|
|
||||||
**__get_constraint_values(args)}
|
|
||||||
|
|
||||||
cur.execute(cur.mogrify(pg_query, params))
|
|
||||||
row = cur.fetchone()
|
|
||||||
return row
|
|
||||||
|
|
||||||
|
|
||||||
def __get_user_activity_avg_visited_pages_chart(cur, project_id, startTimestamp, endTimestamp, density=20, **args):
|
|
||||||
step_size = __get_step_size(endTimestamp=endTimestamp, startTimestamp=startTimestamp, density=density, factor=1)
|
|
||||||
params = {"step_size": step_size, "project_id": project_id, "startTimestamp": startTimestamp,
|
|
||||||
"endTimestamp": endTimestamp}
|
|
||||||
pg_sub_query_subset = __get_constraints(project_id=project_id, time_constraint=True,
|
|
||||||
chart=False, data=args)
|
|
||||||
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=False, project=False,
|
|
||||||
chart=True, data=args, main_table="sessions", time_column="start_ts",
|
|
||||||
duration=False)
|
|
||||||
pg_sub_query_subset.append("sessions.duration IS NOT NULL")
|
|
||||||
|
|
||||||
pg_query = f"""WITH sessions AS(SELECT sessions.pages_count, sessions.start_ts
|
|
||||||
FROM public.sessions
|
|
||||||
WHERE {" AND ".join(pg_sub_query_subset)}
|
|
||||||
)
|
|
||||||
SELECT generated_timestamp AS timestamp,
|
|
||||||
COALESCE(AVG(sessions.pages_count),0) AS value
|
|
||||||
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
|
|
||||||
LEFT JOIN LATERAL (
|
|
||||||
SELECT sessions.pages_count
|
|
||||||
FROM sessions
|
|
||||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
|
||||||
) AS sessions ON (TRUE)
|
|
||||||
GROUP BY generated_timestamp
|
|
||||||
ORDER BY generated_timestamp;"""
|
|
||||||
cur.execute(cur.mogrify(pg_query, {**params, **__get_constraint_values(args)}))
|
|
||||||
rows = cur.fetchall()
|
|
||||||
return rows
|
|
||||||
|
|
||||||
|
|
||||||
def get_top_metrics_count_requests(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
|
||||||
endTimestamp=TimeUTC.now(), value=None, density=20, **args):
|
|
||||||
step_size = __get_step_size(endTimestamp=endTimestamp, startTimestamp=startTimestamp, density=density, factor=1)
|
|
||||||
params = {"step_size": step_size, "project_id": project_id, "startTimestamp": startTimestamp,
|
|
||||||
"endTimestamp": endTimestamp}
|
|
||||||
pg_sub_query = __get_constraints(project_id=project_id, data=args)
|
|
||||||
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=False, project=False,
|
|
||||||
chart=True, data=args, main_table="pages", time_column="timestamp",
|
|
||||||
duration=False)
|
|
||||||
|
|
||||||
if value is not None:
|
|
||||||
pg_sub_query.append("pages.path = %(value)s")
|
|
||||||
pg_sub_query_chart.append("pages.path = %(value)s")
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
pg_query = f"""SELECT COUNT(pages.session_id) AS value
|
|
||||||
FROM events.pages INNER JOIN public.sessions USING (session_id)
|
|
||||||
WHERE {" AND ".join(pg_sub_query)};"""
|
|
||||||
cur.execute(cur.mogrify(pg_query, {"project_id": project_id,
|
|
||||||
"startTimestamp": startTimestamp,
|
|
||||||
"endTimestamp": endTimestamp,
|
|
||||||
"value": value, **__get_constraint_values(args)}))
|
|
||||||
row = cur.fetchone()
|
|
||||||
pg_query = f"""WITH pages AS(SELECT pages.timestamp
|
|
||||||
FROM events.pages INNER JOIN public.sessions USING (session_id)
|
|
||||||
WHERE {" AND ".join(pg_sub_query)}
|
|
||||||
)
|
|
||||||
SELECT generated_timestamp AS timestamp,
|
|
||||||
COUNT(pages.*) AS value
|
|
||||||
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
|
|
||||||
LEFT JOIN LATERAL (
|
|
||||||
SELECT 1
|
|
||||||
FROM pages
|
|
||||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
|
||||||
) AS pages ON (TRUE)
|
|
||||||
GROUP BY generated_timestamp
|
|
||||||
ORDER BY generated_timestamp;"""
|
|
||||||
cur.execute(cur.mogrify(pg_query, {**params, **__get_constraint_values(args)}))
|
|
||||||
rows = cur.fetchall()
|
|
||||||
row["chart"] = rows
|
|
||||||
row["unit"] = schemas.TemplatePredefinedUnits.COUNT
|
|
||||||
return helper.dict_to_camel_case(row)
|
|
||||||
|
|
||||||
|
|
||||||
def get_unique_users(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
|
||||||
endTimestamp=TimeUTC.now(),
|
|
||||||
density=7, **args):
|
|
||||||
step_size = __get_step_size(startTimestamp, endTimestamp, density, factor=1)
|
|
||||||
pg_sub_query = __get_constraints(project_id=project_id, data=args)
|
|
||||||
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=True,
|
|
||||||
chart=True, data=args)
|
|
||||||
pg_sub_query.append("user_id IS NOT NULL")
|
|
||||||
pg_sub_query.append("user_id != ''")
|
|
||||||
pg_sub_query_chart.append("user_id IS NOT NULL")
|
|
||||||
pg_sub_query_chart.append("user_id != ''")
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
pg_query = f"""SELECT generated_timestamp AS timestamp,
|
|
||||||
COALESCE(COUNT(sessions), 0) AS value
|
|
||||||
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
|
|
||||||
LEFT JOIN LATERAL ( SELECT DISTINCT user_id
|
|
||||||
FROM public.sessions
|
|
||||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
|
||||||
) AS sessions ON (TRUE)
|
|
||||||
GROUP BY generated_timestamp
|
|
||||||
ORDER BY generated_timestamp;"""
|
|
||||||
params = {"step_size": step_size, "project_id": project_id, "startTimestamp": startTimestamp,
|
|
||||||
"endTimestamp": endTimestamp, **__get_constraint_values(args)}
|
|
||||||
cur.execute(cur.mogrify(pg_query, params))
|
|
||||||
rows = cur.fetchall()
|
|
||||||
results = {
|
|
||||||
"value": sum([r["value"] for r in rows]),
|
|
||||||
"chart": rows
|
|
||||||
}
|
|
||||||
|
|
||||||
diff = endTimestamp - startTimestamp
|
|
||||||
endTimestamp = startTimestamp
|
|
||||||
startTimestamp = endTimestamp - diff
|
|
||||||
|
|
||||||
pg_query = f"""SELECT COUNT(DISTINCT sessions.user_id) AS count
|
|
||||||
FROM public.sessions
|
|
||||||
WHERE {" AND ".join(pg_sub_query)};"""
|
|
||||||
params = {"project_id": project_id, "startTimestamp": startTimestamp, "endTimestamp": endTimestamp,
|
|
||||||
**__get_constraint_values(args)}
|
|
||||||
|
|
||||||
cur.execute(cur.mogrify(pg_query, params))
|
|
||||||
|
|
||||||
count = cur.fetchone()["count"]
|
|
||||||
|
|
||||||
results["progress"] = helper.__progress(old_val=count, new_val=results["value"])
|
|
||||||
results["unit"] = schemas.TemplatePredefinedUnits.COUNT
|
|
||||||
return results
|
|
||||||
|
|
||||||
|
|
||||||
def get_speed_index_location(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
|
||||||
endTimestamp=TimeUTC.now(), **args):
|
|
||||||
pg_sub_query = __get_constraints(project_id=project_id, data=args)
|
|
||||||
pg_sub_query.append("pages.speed_index IS NOT NULL")
|
|
||||||
pg_sub_query.append("pages.speed_index>0")
|
|
||||||
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
pg_query = f"""SELECT sessions.user_country, AVG(pages.speed_index) AS value
|
|
||||||
FROM events.pages INNER JOIN public.sessions USING (session_id)
|
|
||||||
WHERE {" AND ".join(pg_sub_query)}
|
|
||||||
GROUP BY sessions.user_country
|
|
||||||
ORDER BY value, sessions.user_country;"""
|
|
||||||
params = {"project_id": project_id,
|
|
||||||
"startTimestamp": startTimestamp,
|
|
||||||
"endTimestamp": endTimestamp, **__get_constraint_values(args)}
|
|
||||||
cur.execute(cur.mogrify(pg_query, params))
|
|
||||||
rows = cur.fetchall()
|
|
||||||
if len(rows) > 0:
|
|
||||||
pg_query = f"""SELECT AVG(pages.speed_index) AS avg
|
|
||||||
FROM events.pages INNER JOIN public.sessions USING (session_id)
|
|
||||||
WHERE {" AND ".join(pg_sub_query)};"""
|
|
||||||
cur.execute(cur.mogrify(pg_query, params))
|
|
||||||
avg = cur.fetchone()["avg"]
|
|
||||||
else:
|
|
||||||
avg = 0
|
|
||||||
return {"value": avg, "chart": helper.list_to_camel_case(rows), "unit": schemas.TemplatePredefinedUnits.MILLISECOND}
|
|
||||||
10
api/chalicelib/core/metrics/__init__.py
Normal file
10
api/chalicelib/core/metrics/__init__.py
Normal file
|
|
@ -0,0 +1,10 @@
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from decouple import config
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
if config("EXP_METRICS", cast=bool, default=False):
|
||||||
|
logger.info(">>> Using experimental metrics")
|
||||||
|
else:
|
||||||
|
pass
|
||||||
|
|
@ -1,44 +1,19 @@
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from decouple import config
|
|
||||||
from fastapi import HTTPException, status
|
from fastapi import HTTPException, status
|
||||||
|
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core import funnels, issues, heatmaps, sessions_insights, sessions_mobs, sessions_favorite, \
|
from chalicelib.core import issues
|
||||||
product_analytics, custom_metrics_predefined
|
from chalicelib.core.errors import errors
|
||||||
|
from chalicelib.core.metrics import heatmaps, product_analytics, funnels
|
||||||
|
from chalicelib.core.sessions import sessions, sessions_search
|
||||||
from chalicelib.utils import helper, pg_client
|
from chalicelib.utils import helper, pg_client
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
from chalicelib.utils.storage import extra
|
|
||||||
|
|
||||||
if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
|
|
||||||
logging.info(">>> Using experimental error search")
|
|
||||||
from . import errors_exp as errors
|
|
||||||
else:
|
|
||||||
from . import errors as errors
|
|
||||||
|
|
||||||
if config("EXP_SESSIONS_SEARCH_METRIC", cast=bool, default=False):
|
|
||||||
from chalicelib.core import sessions
|
|
||||||
else:
|
|
||||||
from chalicelib.core import sessions_legacy as sessions
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
# TODO: refactor this to split
|
|
||||||
# timeseries /
|
|
||||||
# table of errors / table of issues / table of browsers / table of devices / table of countries / table of URLs
|
|
||||||
# remove "table of" calls from this function
|
|
||||||
def __try_live(project_id, data: schemas.CardSchema):
|
|
||||||
results = []
|
|
||||||
for i, s in enumerate(data.series):
|
|
||||||
results.append(sessions.search2_series(data=s.filter, project_id=project_id, density=data.density,
|
|
||||||
view_type=data.view_type, metric_type=data.metric_type,
|
|
||||||
metric_of=data.metric_of, metric_value=data.metric_value))
|
|
||||||
|
|
||||||
return results
|
|
||||||
|
|
||||||
|
|
||||||
def __get_table_of_series(project_id, data: schemas.CardSchema):
|
def __get_table_of_series(project_id, data: schemas.CardSchema):
|
||||||
results = []
|
results = []
|
||||||
for i, s in enumerate(data.series):
|
for i, s in enumerate(data.series):
|
||||||
|
|
@ -56,9 +31,6 @@ def __get_funnel_chart(project: schemas.ProjectContext, data: schemas.CardFunnel
|
||||||
"totalDropDueToIssues": 0
|
"totalDropDueToIssues": 0
|
||||||
}
|
}
|
||||||
|
|
||||||
# return funnels.get_top_insights_on_the_fly_widget(project_id=project_id,
|
|
||||||
# data=data.series[0].filter,
|
|
||||||
# metric_format=data.metric_format)
|
|
||||||
return funnels.get_simple_funnel(project=project,
|
return funnels.get_simple_funnel(project=project,
|
||||||
data=data.series[0].filter,
|
data=data.series[0].filter,
|
||||||
metric_format=data.metric_format)
|
metric_format=data.metric_format)
|
||||||
|
|
@ -70,7 +42,7 @@ def __get_errors_list(project: schemas.ProjectContext, user_id, data: schemas.Ca
|
||||||
"total": 0,
|
"total": 0,
|
||||||
"errors": []
|
"errors": []
|
||||||
}
|
}
|
||||||
return errors.search(data.series[0].filter, project_id=project.project_id, user_id=user_id)
|
return errors.search(data.series[0].filter, project=project, user_id=user_id)
|
||||||
|
|
||||||
|
|
||||||
def __get_sessions_list(project: schemas.ProjectContext, user_id, data: schemas.CardSchema):
|
def __get_sessions_list(project: schemas.ProjectContext, user_id, data: schemas.CardSchema):
|
||||||
|
|
@ -80,11 +52,11 @@ def __get_sessions_list(project: schemas.ProjectContext, user_id, data: schemas.
|
||||||
"total": 0,
|
"total": 0,
|
||||||
"sessions": []
|
"sessions": []
|
||||||
}
|
}
|
||||||
return sessions.search_sessions(data=data.series[0].filter, project_id=project.project_id, user_id=user_id)
|
return sessions_search.search_sessions(data=data.series[0].filter, project=project, user_id=user_id)
|
||||||
|
|
||||||
|
|
||||||
def __get_heat_map_chart(project: schemas.ProjectContext, user_id, data: schemas.CardHeatMap,
|
def get_heat_map_chart(project: schemas.ProjectContext, user_id, data: schemas.CardHeatMap,
|
||||||
include_mobs: bool = True):
|
include_mobs: bool = True):
|
||||||
if len(data.series) == 0:
|
if len(data.series) == 0:
|
||||||
return None
|
return None
|
||||||
data.series[0].filter.filters += data.series[0].filter.events
|
data.series[0].filter.filters += data.series[0].filter.events
|
||||||
|
|
@ -95,15 +67,6 @@ def __get_heat_map_chart(project: schemas.ProjectContext, user_id, data: schemas
|
||||||
include_mobs=include_mobs)
|
include_mobs=include_mobs)
|
||||||
|
|
||||||
|
|
||||||
# EE only
|
|
||||||
def __get_insights_chart(project: schemas.ProjectContext, data: schemas.CardInsights, user_id: int = None):
|
|
||||||
return sessions_insights.fetch_selected(project_id=project.project_id,
|
|
||||||
data=schemas.GetInsightsSchema(startTimestamp=data.startTimestamp,
|
|
||||||
endTimestamp=data.endTimestamp,
|
|
||||||
metricValue=data.metric_value,
|
|
||||||
series=data.series))
|
|
||||||
|
|
||||||
|
|
||||||
def __get_path_analysis_chart(project: schemas.ProjectContext, user_id: int, data: schemas.CardPathAnalysis):
|
def __get_path_analysis_chart(project: schemas.ProjectContext, user_id: int, data: schemas.CardPathAnalysis):
|
||||||
if len(data.series) == 0:
|
if len(data.series) == 0:
|
||||||
data.series.append(
|
data.series.append(
|
||||||
|
|
@ -115,7 +78,12 @@ def __get_path_analysis_chart(project: schemas.ProjectContext, user_id: int, dat
|
||||||
|
|
||||||
|
|
||||||
def __get_timeseries_chart(project: schemas.ProjectContext, data: schemas.CardTimeSeries, user_id: int = None):
|
def __get_timeseries_chart(project: schemas.ProjectContext, data: schemas.CardTimeSeries, user_id: int = None):
|
||||||
series_charts = __try_live(project_id=project.project_id, data=data)
|
series_charts = []
|
||||||
|
for i, s in enumerate(data.series):
|
||||||
|
series_charts.append(sessions.search2_series(data=s.filter, project_id=project.project_id, density=data.density,
|
||||||
|
metric_type=data.metric_type, metric_of=data.metric_of,
|
||||||
|
metric_value=data.metric_value))
|
||||||
|
|
||||||
results = [{}] * len(series_charts[0])
|
results = [{}] * len(series_charts[0])
|
||||||
for i in range(len(results)):
|
for i in range(len(results)):
|
||||||
for j, series_chart in enumerate(series_charts):
|
for j, series_chart in enumerate(series_charts):
|
||||||
|
|
@ -185,40 +153,28 @@ def __get_table_chart(project: schemas.ProjectContext, data: schemas.CardTable,
|
||||||
|
|
||||||
|
|
||||||
def get_chart(project: schemas.ProjectContext, data: schemas.CardSchema, user_id: int):
|
def get_chart(project: schemas.ProjectContext, data: schemas.CardSchema, user_id: int):
|
||||||
if data.is_predefined:
|
|
||||||
return custom_metrics_predefined.get_metric(key=data.metric_of,
|
|
||||||
project_id=project.project_id,
|
|
||||||
data=data.model_dump())
|
|
||||||
|
|
||||||
supported = {
|
supported = {
|
||||||
schemas.MetricType.TIMESERIES: __get_timeseries_chart,
|
schemas.MetricType.TIMESERIES: __get_timeseries_chart,
|
||||||
schemas.MetricType.TABLE: __get_table_chart,
|
schemas.MetricType.TABLE: __get_table_chart,
|
||||||
schemas.MetricType.HEAT_MAP: __get_heat_map_chart,
|
schemas.MetricType.HEAT_MAP: get_heat_map_chart,
|
||||||
schemas.MetricType.FUNNEL: __get_funnel_chart,
|
schemas.MetricType.FUNNEL: __get_funnel_chart,
|
||||||
schemas.MetricType.INSIGHTS: __get_insights_chart,
|
|
||||||
schemas.MetricType.PATH_ANALYSIS: __get_path_analysis_chart
|
schemas.MetricType.PATH_ANALYSIS: __get_path_analysis_chart
|
||||||
}
|
}
|
||||||
return supported.get(data.metric_type, not_supported)(project=project, data=data, user_id=user_id)
|
return supported.get(data.metric_type, not_supported)(project=project, data=data, user_id=user_id)
|
||||||
|
|
||||||
|
|
||||||
def get_sessions_by_card_id(project_id, user_id, metric_id, data: schemas.CardSessionsSchema):
|
def get_sessions_by_card_id(project: schemas.ProjectContext, user_id, metric_id, data: schemas.CardSessionsSchema):
|
||||||
# No need for this because UI is sending the full payload
|
if not card_exists(metric_id=metric_id, project_id=project.project_id, user_id=user_id):
|
||||||
# card: dict = get_card(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
|
|
||||||
# if card is None:
|
|
||||||
# return None
|
|
||||||
# metric: schemas.CardSchema = schemas.CardSchema(**card)
|
|
||||||
# metric: schemas.CardSchema = __merge_metric_with_data(metric=metric, data=data)
|
|
||||||
if not card_exists(metric_id=metric_id, project_id=project_id, user_id=user_id):
|
|
||||||
return None
|
return None
|
||||||
results = []
|
results = []
|
||||||
for s in data.series:
|
for s in data.series:
|
||||||
results.append({"seriesId": s.series_id, "seriesName": s.name,
|
results.append({"seriesId": s.series_id, "seriesName": s.name,
|
||||||
**sessions.search_sessions(data=s.filter, project_id=project_id, user_id=user_id)})
|
**sessions_search.search_sessions(data=s.filter, project=project, user_id=user_id)})
|
||||||
|
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
||||||
def get_sessions(project_id, user_id, data: schemas.CardSessionsSchema):
|
def get_sessions(project: schemas.ProjectContext, user_id, data: schemas.CardSessionsSchema):
|
||||||
results = []
|
results = []
|
||||||
if len(data.series) == 0:
|
if len(data.series) == 0:
|
||||||
return results
|
return results
|
||||||
|
|
@ -228,31 +184,33 @@ def get_sessions(project_id, user_id, data: schemas.CardSessionsSchema):
|
||||||
s.filter = schemas.SessionsSearchPayloadSchema(**s.filter.model_dump(by_alias=True))
|
s.filter = schemas.SessionsSearchPayloadSchema(**s.filter.model_dump(by_alias=True))
|
||||||
|
|
||||||
results.append({"seriesId": None, "seriesName": s.name,
|
results.append({"seriesId": None, "seriesName": s.name,
|
||||||
**sessions.search_sessions(data=s.filter, project_id=project_id, user_id=user_id)})
|
**sessions_search.search_sessions(data=s.filter, project=project, user_id=user_id)})
|
||||||
|
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
||||||
def get_issues(project: schemas.ProjectContext, user_id: int, data: schemas.CardSchema):
|
def get_issues(project: schemas.ProjectContext, user_id: int, data: schemas.CardSchema):
|
||||||
if data.is_predefined:
|
|
||||||
return not_supported()
|
|
||||||
if data.metric_of == schemas.MetricOfTable.ISSUES:
|
if data.metric_of == schemas.MetricOfTable.ISSUES:
|
||||||
return __get_table_of_issues(project=project, user_id=user_id, data=data)
|
return __get_table_of_issues(project=project, user_id=user_id, data=data)
|
||||||
supported = {
|
supported = {
|
||||||
schemas.MetricType.TIMESERIES: not_supported,
|
schemas.MetricType.TIMESERIES: not_supported,
|
||||||
schemas.MetricType.TABLE: not_supported,
|
schemas.MetricType.TABLE: not_supported,
|
||||||
schemas.MetricType.HEAT_MAP: not_supported,
|
schemas.MetricType.HEAT_MAP: not_supported,
|
||||||
schemas.MetricType.INSIGHTS: not_supported,
|
|
||||||
schemas.MetricType.PATH_ANALYSIS: not_supported,
|
schemas.MetricType.PATH_ANALYSIS: not_supported,
|
||||||
}
|
}
|
||||||
return supported.get(data.metric_type, not_supported)()
|
return supported.get(data.metric_type, not_supported)()
|
||||||
|
|
||||||
|
|
||||||
def __get_path_analysis_card_info(data: schemas.CardPathAnalysis):
|
def get_global_card_info(data: schemas.CardSchema):
|
||||||
|
r = {"hideExcess": data.hide_excess, "compareTo": data.compare_to, "rows": data.rows}
|
||||||
|
return r
|
||||||
|
|
||||||
|
|
||||||
|
def get_path_analysis_card_info(data: schemas.CardPathAnalysis):
|
||||||
r = {"start_point": [s.model_dump() for s in data.start_point],
|
r = {"start_point": [s.model_dump() for s in data.start_point],
|
||||||
"start_type": data.start_type,
|
"start_type": data.start_type,
|
||||||
"excludes": [e.model_dump() for e in data.excludes],
|
"excludes": [e.model_dump() for e in data.excludes],
|
||||||
"hideExcess": data.hide_excess}
|
"rows": data.rows}
|
||||||
return r
|
return r
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -263,25 +221,11 @@ def create_card(project: schemas.ProjectContext, user_id, data: schemas.CardSche
|
||||||
if data.session_id is not None:
|
if data.session_id is not None:
|
||||||
session_data = {"sessionId": data.session_id}
|
session_data = {"sessionId": data.session_id}
|
||||||
else:
|
else:
|
||||||
session_data = __get_heat_map_chart(project=project, user_id=user_id,
|
session_data = get_heat_map_chart(project=project, user_id=user_id,
|
||||||
data=data, include_mobs=False)
|
data=data, include_mobs=False)
|
||||||
if session_data is not None:
|
if session_data is not None:
|
||||||
session_data = {"sessionId": session_data["sessionId"]}
|
session_data = {"sessionId": session_data["sessionId"]}
|
||||||
|
|
||||||
if session_data is not None:
|
|
||||||
# for EE only
|
|
||||||
keys = sessions_mobs. \
|
|
||||||
__get_mob_keys(project_id=project.project_id, session_id=session_data["sessionId"])
|
|
||||||
keys += sessions_mobs. \
|
|
||||||
__get_mob_keys_deprecated(session_id=session_data["sessionId"]) # To support old sessions
|
|
||||||
tag = config('RETENTION_L_VALUE', default='vault')
|
|
||||||
for k in keys:
|
|
||||||
try:
|
|
||||||
extra.tag_session(file_key=k, tag_value=tag)
|
|
||||||
except Exception as e:
|
|
||||||
logger.warning(f"!!!Error while tagging: {k} to {tag} for heatMap")
|
|
||||||
logger.error(str(e))
|
|
||||||
|
|
||||||
_data = {"session_data": json.dumps(session_data) if session_data is not None else None}
|
_data = {"session_data": json.dumps(session_data) if session_data is not None else None}
|
||||||
for i, s in enumerate(data.series):
|
for i, s in enumerate(data.series):
|
||||||
for k in s.model_dump().keys():
|
for k in s.model_dump().keys():
|
||||||
|
|
@ -291,8 +235,10 @@ def create_card(project: schemas.ProjectContext, user_id, data: schemas.CardSche
|
||||||
series_len = len(data.series)
|
series_len = len(data.series)
|
||||||
params = {"user_id": user_id, "project_id": project.project_id, **data.model_dump(), **_data,
|
params = {"user_id": user_id, "project_id": project.project_id, **data.model_dump(), **_data,
|
||||||
"default_config": json.dumps(data.default_config.model_dump()), "card_info": None}
|
"default_config": json.dumps(data.default_config.model_dump()), "card_info": None}
|
||||||
|
params["card_info"] = get_global_card_info(data=data)
|
||||||
if data.metric_type == schemas.MetricType.PATH_ANALYSIS:
|
if data.metric_type == schemas.MetricType.PATH_ANALYSIS:
|
||||||
params["card_info"] = json.dumps(__get_path_analysis_card_info(data=data))
|
params["card_info"] = {**params["card_info"], **get_path_analysis_card_info(data=data)}
|
||||||
|
params["card_info"] = json.dumps(params["card_info"])
|
||||||
|
|
||||||
query = """INSERT INTO metrics (project_id, user_id, name, is_public,
|
query = """INSERT INTO metrics (project_id, user_id, name, is_public,
|
||||||
view_type, metric_type, metric_of, metric_value,
|
view_type, metric_type, metric_of, metric_value,
|
||||||
|
|
@ -352,16 +298,18 @@ def update_card(metric_id, user_id, project_id, data: schemas.CardSchema):
|
||||||
if i not in u_series_ids:
|
if i not in u_series_ids:
|
||||||
d_series_ids.append(i)
|
d_series_ids.append(i)
|
||||||
params["d_series_ids"] = tuple(d_series_ids)
|
params["d_series_ids"] = tuple(d_series_ids)
|
||||||
params["card_info"] = None
|
|
||||||
params["session_data"] = json.dumps(metric["data"])
|
params["session_data"] = json.dumps(metric["data"])
|
||||||
|
params["card_info"] = get_global_card_info(data=data)
|
||||||
if data.metric_type == schemas.MetricType.PATH_ANALYSIS:
|
if data.metric_type == schemas.MetricType.PATH_ANALYSIS:
|
||||||
params["card_info"] = json.dumps(__get_path_analysis_card_info(data=data))
|
params["card_info"] = {**params["card_info"], **get_path_analysis_card_info(data=data)}
|
||||||
elif data.metric_type == schemas.MetricType.HEAT_MAP:
|
elif data.metric_type == schemas.MetricType.HEAT_MAP:
|
||||||
if data.session_id is not None:
|
if data.session_id is not None:
|
||||||
params["session_data"] = json.dumps({"sessionId": data.session_id})
|
params["session_data"] = json.dumps({"sessionId": data.session_id})
|
||||||
elif metric.get("data") and metric["data"].get("sessionId"):
|
elif metric.get("data") and metric["data"].get("sessionId"):
|
||||||
params["session_data"] = json.dumps({"sessionId": metric["data"]["sessionId"]})
|
params["session_data"] = json.dumps({"sessionId": metric["data"]["sessionId"]})
|
||||||
|
|
||||||
|
params["card_info"] = json.dumps(params["card_info"])
|
||||||
|
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
sub_queries = []
|
sub_queries = []
|
||||||
if len(n_series) > 0:
|
if len(n_series) > 0:
|
||||||
|
|
@ -404,6 +352,100 @@ def update_card(metric_id, user_id, project_id, data: schemas.CardSchema):
|
||||||
return get_card(metric_id=metric_id, project_id=project_id, user_id=user_id)
|
return get_card(metric_id=metric_id, project_id=project_id, user_id=user_id)
|
||||||
|
|
||||||
|
|
||||||
|
def search_metrics(project_id, user_id, data: schemas.MetricSearchSchema, include_series=False):
|
||||||
|
constraints = ["metrics.project_id = %(project_id)s", "metrics.deleted_at ISNULL"]
|
||||||
|
params = {
|
||||||
|
"project_id": project_id,
|
||||||
|
"user_id": user_id,
|
||||||
|
"offset": (data.page - 1) * data.limit,
|
||||||
|
"limit": data.limit,
|
||||||
|
}
|
||||||
|
if data.mine_only:
|
||||||
|
constraints.append("user_id = %(user_id)s")
|
||||||
|
else:
|
||||||
|
constraints.append("(user_id = %(user_id)s OR metrics.is_public)")
|
||||||
|
if data.shared_only:
|
||||||
|
constraints.append("is_public")
|
||||||
|
|
||||||
|
if data.filter is not None:
|
||||||
|
if data.filter.type:
|
||||||
|
constraints.append("metrics.metric_type = %(filter_type)s")
|
||||||
|
params["filter_type"] = data.filter.type
|
||||||
|
if data.filter.query and len(data.filter.query) > 0:
|
||||||
|
constraints.append("(metrics.name ILIKE %(filter_query)s OR owner.owner_name ILIKE %(filter_query)s)")
|
||||||
|
params["filter_query"] = helper.values_for_operator(
|
||||||
|
value=data.filter.query, op=schemas.SearchEventOperator.CONTAINS
|
||||||
|
)
|
||||||
|
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
sub_join = ""
|
||||||
|
if include_series:
|
||||||
|
sub_join = """LEFT JOIN LATERAL (
|
||||||
|
SELECT COALESCE(jsonb_agg(metric_series.* ORDER BY index),'[]'::jsonb) AS series
|
||||||
|
FROM metric_series
|
||||||
|
WHERE metric_series.metric_id = metrics.metric_id
|
||||||
|
AND metric_series.deleted_at ISNULL
|
||||||
|
) AS metric_series ON (TRUE)"""
|
||||||
|
|
||||||
|
sort_column = data.sort.field if data.sort.field is not None and len(data.sort.field) > 0 \
|
||||||
|
else "created_at"
|
||||||
|
# change ascend to asc and descend to desc
|
||||||
|
sort_order = data.sort.order.value if hasattr(data.sort.order, "value") else data.sort.order
|
||||||
|
if sort_order == "ascend":
|
||||||
|
sort_order = "asc"
|
||||||
|
elif sort_order == "descend":
|
||||||
|
sort_order = "desc"
|
||||||
|
|
||||||
|
query = cur.mogrify(
|
||||||
|
f"""SELECT count(1) OVER () AS total,metric_id, project_id, user_id, name, is_public, created_at, edited_at,
|
||||||
|
metric_type, metric_of, metric_format, metric_value, view_type, is_pinned,
|
||||||
|
dashboards, owner_email, owner_name, default_config AS config, thumbnail
|
||||||
|
FROM metrics
|
||||||
|
{sub_join}
|
||||||
|
LEFT JOIN LATERAL (
|
||||||
|
SELECT COALESCE(jsonb_agg(connected_dashboards.* ORDER BY is_public, name),'[]'::jsonb) AS dashboards
|
||||||
|
FROM (
|
||||||
|
SELECT DISTINCT dashboard_id, name, is_public
|
||||||
|
FROM dashboards
|
||||||
|
INNER JOIN dashboard_widgets USING (dashboard_id)
|
||||||
|
WHERE deleted_at ISNULL
|
||||||
|
AND dashboard_widgets.metric_id = metrics.metric_id
|
||||||
|
AND project_id = %(project_id)s
|
||||||
|
AND ((dashboards.user_id = %(user_id)s OR is_public))
|
||||||
|
) AS connected_dashboards
|
||||||
|
) AS connected_dashboards ON (TRUE)
|
||||||
|
LEFT JOIN LATERAL (
|
||||||
|
SELECT email AS owner_email, name AS owner_name
|
||||||
|
FROM users
|
||||||
|
WHERE deleted_at ISNULL
|
||||||
|
AND users.user_id = metrics.user_id
|
||||||
|
) AS owner ON (TRUE)
|
||||||
|
WHERE {" AND ".join(constraints)}
|
||||||
|
ORDER BY {sort_column} {sort_order}
|
||||||
|
LIMIT %(limit)s OFFSET %(offset)s;""",
|
||||||
|
params
|
||||||
|
)
|
||||||
|
cur.execute(query)
|
||||||
|
rows = cur.fetchall()
|
||||||
|
if len(rows) > 0:
|
||||||
|
total = rows[0]["total"]
|
||||||
|
if include_series:
|
||||||
|
for r in rows:
|
||||||
|
r.pop("total")
|
||||||
|
for s in r.get("series", []):
|
||||||
|
s["filter"] = helper.old_search_payload_to_flat(s["filter"])
|
||||||
|
else:
|
||||||
|
for r in rows:
|
||||||
|
r.pop("total")
|
||||||
|
r["created_at"] = TimeUTC.datetime_to_timestamp(r["created_at"])
|
||||||
|
r["edited_at"] = TimeUTC.datetime_to_timestamp(r["edited_at"])
|
||||||
|
rows = helper.list_to_camel_case(rows)
|
||||||
|
else:
|
||||||
|
total = 0
|
||||||
|
|
||||||
|
return {"total": total, "list": rows}
|
||||||
|
|
||||||
|
|
||||||
def search_all(project_id, user_id, data: schemas.SearchCardsSchema, include_series=False):
|
def search_all(project_id, user_id, data: schemas.SearchCardsSchema, include_series=False):
|
||||||
constraints = ["metrics.project_id = %(project_id)s",
|
constraints = ["metrics.project_id = %(project_id)s",
|
||||||
"metrics.deleted_at ISNULL"]
|
"metrics.deleted_at ISNULL"]
|
||||||
|
|
@ -492,26 +534,20 @@ def delete_card(project_id, metric_id, user_id):
|
||||||
RETURNING data;""",
|
RETURNING data;""",
|
||||||
{"metric_id": metric_id, "project_id": project_id, "user_id": user_id})
|
{"metric_id": metric_id, "project_id": project_id, "user_id": user_id})
|
||||||
)
|
)
|
||||||
# for EE only
|
|
||||||
row = cur.fetchone()
|
|
||||||
if row:
|
|
||||||
if row["data"] and not sessions_favorite.favorite_session_exists(session_id=row["data"]["sessionId"]):
|
|
||||||
keys = sessions_mobs. \
|
|
||||||
__get_mob_keys(project_id=project_id, session_id=row["data"]["sessionId"])
|
|
||||||
keys += sessions_mobs. \
|
|
||||||
__get_mob_keys_deprecated(session_id=row["data"]["sessionId"]) # To support old sessions
|
|
||||||
tag = config('RETENTION_D_VALUE', default='default')
|
|
||||||
for k in keys:
|
|
||||||
try:
|
|
||||||
extra.tag_session(file_key=k, tag_value=tag)
|
|
||||||
except Exception as e:
|
|
||||||
logger.warning(f"!!!Error while tagging: {k} to {tag} for heatMap")
|
|
||||||
logger.error(str(e))
|
|
||||||
return {"state": "success"}
|
return {"state": "success"}
|
||||||
|
|
||||||
|
|
||||||
|
def __get_global_attributes(row):
|
||||||
|
if row is None or row.get("cardInfo") is None:
|
||||||
|
return row
|
||||||
|
card_info = row.get("cardInfo", {})
|
||||||
|
row["compareTo"] = card_info["compareTo"] if card_info.get("compareTo") is not None else []
|
||||||
|
return row
|
||||||
|
|
||||||
|
|
||||||
def __get_path_analysis_attributes(row):
|
def __get_path_analysis_attributes(row):
|
||||||
card_info = row.pop("cardInfo")
|
card_info = row.get("cardInfo", {})
|
||||||
row["excludes"] = card_info.get("excludes", [])
|
row["excludes"] = card_info.get("excludes", [])
|
||||||
row["startPoint"] = card_info.get("startPoint", [])
|
row["startPoint"] = card_info.get("startPoint", [])
|
||||||
row["startType"] = card_info.get("startType", "start")
|
row["startType"] = card_info.get("startType", "start")
|
||||||
|
|
@ -564,6 +600,8 @@ def get_card(metric_id, project_id, user_id, flatten: bool = True, include_data:
|
||||||
row = helper.dict_to_camel_case(row)
|
row = helper.dict_to_camel_case(row)
|
||||||
if row["metricType"] == schemas.MetricType.PATH_ANALYSIS:
|
if row["metricType"] == schemas.MetricType.PATH_ANALYSIS:
|
||||||
row = __get_path_analysis_attributes(row=row)
|
row = __get_path_analysis_attributes(row=row)
|
||||||
|
row = __get_global_attributes(row=row)
|
||||||
|
row.pop("cardInfo")
|
||||||
return row
|
return row
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -605,17 +643,7 @@ def change_state(project_id, metric_id, user_id, status):
|
||||||
|
|
||||||
|
|
||||||
def get_funnel_sessions_by_issue(user_id, project_id, metric_id, issue_id,
|
def get_funnel_sessions_by_issue(user_id, project_id, metric_id, issue_id,
|
||||||
data: schemas.CardSessionsSchema
|
data: schemas.CardSessionsSchema):
|
||||||
# , range_value=None, start_date=None, end_date=None
|
|
||||||
):
|
|
||||||
# No need for this because UI is sending the full payload
|
|
||||||
# card: dict = get_card(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
|
|
||||||
# if card is None:
|
|
||||||
# return None
|
|
||||||
# metric: schemas.CardSchema = schemas.CardSchema(**card)
|
|
||||||
# metric: schemas.CardSchema = __merge_metric_with_data(metric=metric, data=data)
|
|
||||||
# if metric is None:
|
|
||||||
# return None
|
|
||||||
if not card_exists(metric_id=metric_id, project_id=project_id, user_id=user_id):
|
if not card_exists(metric_id=metric_id, project_id=project_id, user_id=user_id):
|
||||||
return None
|
return None
|
||||||
for s in data.series:
|
for s in data.series:
|
||||||
|
|
@ -657,11 +685,7 @@ def make_chart_from_card(project: schemas.ProjectContext, user_id, metric_id, da
|
||||||
raw_metric["density"] = data.density
|
raw_metric["density"] = data.density
|
||||||
metric: schemas.CardSchema = schemas.CardSchema(**raw_metric)
|
metric: schemas.CardSchema = schemas.CardSchema(**raw_metric)
|
||||||
|
|
||||||
if metric.is_predefined:
|
if metric.metric_type == schemas.MetricType.HEAT_MAP:
|
||||||
return custom_metrics_predefined.get_metric(key=metric.metric_of,
|
|
||||||
project_id=project.project_id,
|
|
||||||
data=data.model_dump())
|
|
||||||
elif metric.metric_type == schemas.MetricType.HEAT_MAP:
|
|
||||||
if raw_metric["data"] and raw_metric["data"].get("sessionId"):
|
if raw_metric["data"] and raw_metric["data"].get("sessionId"):
|
||||||
return heatmaps.get_selected_session(project_id=project.project_id,
|
return heatmaps.get_selected_session(project_id=project.project_id,
|
||||||
session_id=raw_metric["data"]["sessionId"])
|
session_id=raw_metric["data"]["sessionId"])
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
import json
|
import json
|
||||||
|
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core import custom_metrics
|
from chalicelib.core.metrics import custom_metrics
|
||||||
from chalicelib.utils import helper
|
from chalicelib.utils import helper
|
||||||
from chalicelib.utils import pg_client
|
from chalicelib.utils import pg_client
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core import significance
|
from chalicelib.core.metrics.modules import significance
|
||||||
from chalicelib.utils import helper
|
from chalicelib.utils import helper
|
||||||
from chalicelib.utils import sql_helper as sh
|
from chalicelib.utils import sql_helper as sh
|
||||||
|
|
||||||
11
api/chalicelib/core/metrics/heatmaps/__init__.py
Normal file
11
api/chalicelib/core/metrics/heatmaps/__init__.py
Normal file
|
|
@ -0,0 +1,11 @@
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from decouple import config
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
if config("EXP_METRICS", cast=bool, default=False):
|
||||||
|
logger.info(">>> Using experimental heatmaps")
|
||||||
|
from .heatmaps_ch import *
|
||||||
|
else:
|
||||||
|
from .heatmaps import *
|
||||||
|
|
@ -1,7 +1,8 @@
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core import sessions_mobs, sessions
|
from chalicelib.core import sessions
|
||||||
|
from chalicelib.core.sessions import sessions_mobs
|
||||||
from chalicelib.utils import pg_client, helper
|
from chalicelib.utils import pg_client, helper
|
||||||
from chalicelib.utils import sql_helper as sh
|
from chalicelib.utils import sql_helper as sh
|
||||||
|
|
||||||
385
api/chalicelib/core/metrics/heatmaps/heatmaps_ch.py
Normal file
385
api/chalicelib/core/metrics/heatmaps/heatmaps_ch.py
Normal file
|
|
@ -0,0 +1,385 @@
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from decouple import config
|
||||||
|
|
||||||
|
import schemas
|
||||||
|
from chalicelib.core import events
|
||||||
|
from chalicelib.core.metrics.modules import sessions, sessions_mobs
|
||||||
|
from chalicelib.utils import sql_helper as sh
|
||||||
|
|
||||||
|
from chalicelib.utils import pg_client, helper, ch_client, exp_ch_helper
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def get_by_url(project_id, data: schemas.GetHeatMapPayloadSchema):
|
||||||
|
if data.url is None or data.url == "":
|
||||||
|
return []
|
||||||
|
args = {"startDate": data.startTimestamp, "endDate": data.endTimestamp,
|
||||||
|
"project_id": project_id, "url": data.url}
|
||||||
|
constraints = [
|
||||||
|
"main_events.project_id = toUInt16(%(project_id)s)",
|
||||||
|
"main_events.created_at >= toDateTime(%(startDate)s / 1000)",
|
||||||
|
"main_events.created_at <= toDateTime(%(endDate)s / 1000)",
|
||||||
|
"main_events.`$event_name` = 'CLICK'",
|
||||||
|
"isNotNull(JSON_VALUE(CAST(main_events.`$properties` AS String), '$.normalized_x'))"
|
||||||
|
]
|
||||||
|
|
||||||
|
if data.operator == schemas.SearchEventOperator.IS:
|
||||||
|
constraints.append("JSON_VALUE(CAST(main_events.`$properties` AS String), '$.url_path') = %(url)s")
|
||||||
|
else:
|
||||||
|
constraints.append("JSON_VALUE(CAST(main_events.`$properties` AS String), '$.url_path') ILIKE %(url)s")
|
||||||
|
args["url"] = helper.values_for_operator(data.url, data.operator)
|
||||||
|
|
||||||
|
query_from = f"{exp_ch_helper.get_main_events_table(data.startTimestamp)} AS main_events"
|
||||||
|
# TODO: is this used ?
|
||||||
|
# has_click_rage_filter = False
|
||||||
|
# if len(data.filters) > 0:
|
||||||
|
# for i, f in enumerate(data.filters):
|
||||||
|
# if f.type == schemas.FilterType.issue and len(f.value) > 0:
|
||||||
|
# has_click_rage_filter = True
|
||||||
|
# query_from += """INNER JOIN events_common.issues USING (timestamp, session_id)
|
||||||
|
# INNER JOIN issues AS mis USING (issue_id)
|
||||||
|
# INNER JOIN LATERAL (
|
||||||
|
# SELECT COUNT(1) AS real_count
|
||||||
|
# FROM events.clicks AS sc
|
||||||
|
# INNER JOIN sessions as ss USING (session_id)
|
||||||
|
# WHERE ss.project_id = 2
|
||||||
|
# AND (sc.url = %(url)s OR sc.path = %(url)s)
|
||||||
|
# AND sc.timestamp >= %(startDate)s
|
||||||
|
# AND sc.timestamp <= %(endDate)s
|
||||||
|
# AND ss.start_ts >= %(startDate)s
|
||||||
|
# AND ss.start_ts <= %(endDate)s
|
||||||
|
# AND sc.selector = clicks.selector) AS r_clicks ON (TRUE)"""
|
||||||
|
# constraints += ["mis.project_id = %(project_id)s",
|
||||||
|
# "issues.timestamp >= %(startDate)s",
|
||||||
|
# "issues.timestamp <= %(endDate)s"]
|
||||||
|
# f_k = f"issue_value{i}"
|
||||||
|
# args = {**args, **sh.multi_values(f.value, value_key=f_k)}
|
||||||
|
# constraints.append(sh.multi_conditions(f"%({f_k})s = ANY (issue_types)",
|
||||||
|
# f.value, value_key=f_k))
|
||||||
|
# constraints.append(sh.multi_conditions(f"mis.type = %({f_k})s",
|
||||||
|
# f.value, value_key=f_k))
|
||||||
|
# TODO: change this once click-rage is fixed
|
||||||
|
# if data.click_rage and not has_click_rage_filter:
|
||||||
|
# constraints.append("""(issues_t.session_id IS NULL
|
||||||
|
# OR (issues_t.datetime >= toDateTime(%(startDate)s/1000)
|
||||||
|
# AND issues_t.datetime <= toDateTime(%(endDate)s/1000)
|
||||||
|
# AND issues_t.project_id = toUInt16(%(project_id)s)
|
||||||
|
# AND issues_t.event_type = 'ISSUE'
|
||||||
|
# AND issues_t.project_id = toUInt16(%(project_id)s)
|
||||||
|
# AND mis.project_id = toUInt16(%(project_id)s)
|
||||||
|
# AND mis.type='click_rage'))""")
|
||||||
|
# query_from += """ LEFT JOIN experimental.events AS issues_t ON (main_events.session_id=issues_t.session_id)
|
||||||
|
# LEFT JOIN experimental.issues AS mis ON (issues_t.issue_id=mis.issue_id)"""
|
||||||
|
with ch_client.ClickHouseClient() as cur:
|
||||||
|
query = cur.format(query=f"""SELECT
|
||||||
|
JSON_VALUE(CAST(`$properties` AS String), '$.normalized_x') AS normalized_x,
|
||||||
|
JSON_VALUE(CAST(`$properties` AS String), '$.normalized_y') AS normalized_y
|
||||||
|
FROM {query_from}
|
||||||
|
WHERE {" AND ".join(constraints)}
|
||||||
|
LIMIT 500;""",
|
||||||
|
parameters=args)
|
||||||
|
logger.debug("---------")
|
||||||
|
logger.debug(query)
|
||||||
|
logger.debug("---------")
|
||||||
|
try:
|
||||||
|
rows = cur.execute(query=query)
|
||||||
|
except Exception as err:
|
||||||
|
logger.warning("--------- HEATMAP 2 SEARCH QUERY EXCEPTION CH -----------")
|
||||||
|
logger.warning(query)
|
||||||
|
logger.warning("--------- PAYLOAD -----------")
|
||||||
|
logger.warning(data)
|
||||||
|
logger.warning("--------------------")
|
||||||
|
raise err
|
||||||
|
|
||||||
|
return helper.list_to_camel_case(rows)
|
||||||
|
|
||||||
|
|
||||||
|
def get_x_y_by_url_and_session_id(project_id, session_id, data: schemas.GetHeatMapPayloadSchema):
|
||||||
|
args = {"project_id": project_id, "session_id": session_id, "url": data.url}
|
||||||
|
constraints = [
|
||||||
|
"main_events.project_id = toUInt16(%(project_id)s)",
|
||||||
|
"main_events.session_id = %(session_id)s",
|
||||||
|
"main_events.`$event_name`='CLICK'",
|
||||||
|
"isNotNull(JSON_VALUE(CAST(main_events.`$properties` AS String), '$.normalized_x'))"
|
||||||
|
]
|
||||||
|
if data.operator == schemas.SearchEventOperator.IS:
|
||||||
|
constraints.append("JSON_VALUE(CAST(main_events.`$properties` AS String), '$.url_path') = %(url)s")
|
||||||
|
else:
|
||||||
|
constraints.append("JSON_VALUE(CAST(main_events.`$properties` AS String), '$.url_path') ILIKE %(url)s")
|
||||||
|
args["url"] = helper.values_for_operator(data.url, data.operator)
|
||||||
|
|
||||||
|
query_from = f"{exp_ch_helper.get_main_events_table(0)} AS main_events"
|
||||||
|
|
||||||
|
with ch_client.ClickHouseClient() as cur:
|
||||||
|
query = cur.format(query=f"""SELECT main_events.normalized_x AS normalized_x,
|
||||||
|
main_events.normalized_y AS normalized_y
|
||||||
|
FROM {query_from}
|
||||||
|
WHERE {" AND ".join(constraints)};""",
|
||||||
|
parameters=args)
|
||||||
|
logger.debug("---------")
|
||||||
|
logger.debug(query)
|
||||||
|
logger.debug("---------")
|
||||||
|
try:
|
||||||
|
rows = cur.execute(query=query)
|
||||||
|
except Exception as err:
|
||||||
|
logger.warning("--------- HEATMAP-session_id SEARCH QUERY EXCEPTION CH -----------")
|
||||||
|
logger.warning(query)
|
||||||
|
logger.warning("--------- PAYLOAD -----------")
|
||||||
|
logger.warning(data)
|
||||||
|
logger.warning("--------------------")
|
||||||
|
raise err
|
||||||
|
|
||||||
|
return helper.list_to_camel_case(rows)
|
||||||
|
|
||||||
|
|
||||||
|
def get_selectors_by_url_and_session_id(project_id, session_id, data: schemas.GetHeatMapPayloadSchema):
|
||||||
|
args = {"project_id": project_id, "session_id": session_id, "url": data.url}
|
||||||
|
constraints = ["main_events.project_id = toUInt16(%(project_id)s)",
|
||||||
|
"main_events.session_id = %(session_id)s",
|
||||||
|
"main_events.`$event_name`='CLICK'"]
|
||||||
|
|
||||||
|
if data.operator == schemas.SearchEventOperator.IS:
|
||||||
|
constraints.append("JSON_VALUE(CAST(main_events.`$properties` AS String), '$.url_path') = %(url)s")
|
||||||
|
else:
|
||||||
|
constraints.append("JSON_VALUE(CAST(main_events.`$properties` AS String), '$.url_path') ILIKE %(url)s")
|
||||||
|
args["url"] = helper.values_for_operator(data.url, data.operator)
|
||||||
|
|
||||||
|
query_from = f"{exp_ch_helper.get_main_events_table(0)} AS main_events"
|
||||||
|
|
||||||
|
with ch_client.ClickHouseClient() as cur:
|
||||||
|
query = cur.format(query=f"""SELECT CAST(`$properties`.selector AS String) AS selector,
|
||||||
|
COUNT(1) AS count
|
||||||
|
FROM {query_from}
|
||||||
|
WHERE {" AND ".join(constraints)}
|
||||||
|
GROUP BY 1
|
||||||
|
ORDER BY count DESC;""",
|
||||||
|
parameters=args)
|
||||||
|
logger.debug("---------")
|
||||||
|
logger.debug(query)
|
||||||
|
logger.debug("---------")
|
||||||
|
try:
|
||||||
|
rows = cur.execute(query=query)
|
||||||
|
except Exception as err:
|
||||||
|
logger.warning("--------- HEATMAP-session_id SEARCH QUERY EXCEPTION CH -----------")
|
||||||
|
logger.warning(query)
|
||||||
|
logger.warning("--------- PAYLOAD -----------")
|
||||||
|
logger.warning(data)
|
||||||
|
logger.warning("--------------------")
|
||||||
|
raise err
|
||||||
|
|
||||||
|
return helper.list_to_camel_case(rows)
|
||||||
|
|
||||||
|
|
||||||
|
# use CH
|
||||||
|
SESSION_PROJECTION_COLS = """s.project_id,
|
||||||
|
s.session_id AS session_id,
|
||||||
|
toUnixTimestamp(s.datetime)*1000 AS start_ts,
|
||||||
|
s.duration AS duration"""
|
||||||
|
|
||||||
|
|
||||||
|
def __get_1_url(location_condition: schemas.SessionSearchEventSchema2 | None, session_id: str, project_id: int,
|
||||||
|
start_time: int,
|
||||||
|
end_time: int) -> str | None:
|
||||||
|
full_args = {
|
||||||
|
"sessionId": session_id,
|
||||||
|
"projectId": project_id,
|
||||||
|
"start_time": start_time,
|
||||||
|
"end_time": end_time,
|
||||||
|
}
|
||||||
|
sub_condition = ["session_id = %(sessionId)s", "`$event_name` = 'CLICK'", "project_id = %(projectId)s"]
|
||||||
|
if location_condition and len(location_condition.value) > 0:
|
||||||
|
f_k = "LOC"
|
||||||
|
op = sh.get_sql_operator(location_condition.operator)
|
||||||
|
full_args = {**full_args, **sh.multi_values(location_condition.value, value_key=f_k)}
|
||||||
|
sub_condition.append(
|
||||||
|
sh.multi_conditions(f'path {op} %({f_k})s', location_condition.value, is_not=False,
|
||||||
|
value_key=f_k))
|
||||||
|
with ch_client.ClickHouseClient() as cur:
|
||||||
|
main_query = cur.format(query=f"""WITH paths AS (
|
||||||
|
SELECT DISTINCT
|
||||||
|
JSON_VALUE(CAST(`$properties` AS String), '$.url_path') AS url_path
|
||||||
|
FROM product_analytics.events
|
||||||
|
WHERE {" AND ".join(sub_condition)}
|
||||||
|
)
|
||||||
|
SELECT
|
||||||
|
paths.url_path,
|
||||||
|
COUNT(*) AS count
|
||||||
|
FROM product_analytics.events
|
||||||
|
INNER JOIN paths
|
||||||
|
ON JSON_VALUE(CAST(product_analytics.events.$properties AS String), '$.url_path') = paths.url_path
|
||||||
|
WHERE `$event_name` = 'CLICK'
|
||||||
|
AND project_id = %(projectId)s
|
||||||
|
AND created_at >= toDateTime(%(start_time)s / 1000)
|
||||||
|
AND created_at <= toDateTime(%(end_time)s / 1000)
|
||||||
|
GROUP BY paths.url_path
|
||||||
|
ORDER BY count DESC
|
||||||
|
LIMIT 1;""",
|
||||||
|
parameters=full_args)
|
||||||
|
logger.debug("--------------------")
|
||||||
|
logger.debug(main_query)
|
||||||
|
logger.debug("--------------------")
|
||||||
|
try:
|
||||||
|
url = cur.execute(query=main_query)
|
||||||
|
except Exception as err:
|
||||||
|
logger.warning("--------- CLICK MAP BEST URL SEARCH QUERY EXCEPTION CH-----------")
|
||||||
|
logger.warning(main_query.decode('UTF-8'))
|
||||||
|
logger.warning("--------- PAYLOAD -----------")
|
||||||
|
logger.warning(full_args)
|
||||||
|
logger.warning("--------------------")
|
||||||
|
raise err
|
||||||
|
|
||||||
|
if url is None or len(url) == 0:
|
||||||
|
return None
|
||||||
|
return url[0]["url_path"]
|
||||||
|
|
||||||
|
|
||||||
|
def search_short_session(data: schemas.HeatMapSessionsSearch, project_id, user_id,
|
||||||
|
include_mobs: bool = True, exclude_sessions: list[str] = [],
|
||||||
|
_depth: int = 3):
|
||||||
|
no_platform = True
|
||||||
|
location_condition = None
|
||||||
|
no_click = True
|
||||||
|
for f in data.filters:
|
||||||
|
if f.type == schemas.FilterType.PLATFORM:
|
||||||
|
no_platform = False
|
||||||
|
break
|
||||||
|
for f in data.events:
|
||||||
|
if f.type == schemas.EventType.LOCATION:
|
||||||
|
if len(f.value) == 0:
|
||||||
|
f.operator = schemas.SearchEventOperator.IS_ANY
|
||||||
|
location_condition = f.model_copy()
|
||||||
|
elif f.type == schemas.EventType.CLICK:
|
||||||
|
no_click = False
|
||||||
|
if len(f.value) == 0:
|
||||||
|
f.operator = schemas.SearchEventOperator.IS_ANY
|
||||||
|
if location_condition and not no_click:
|
||||||
|
break
|
||||||
|
|
||||||
|
if no_platform:
|
||||||
|
data.filters.append(schemas.SessionSearchFilterSchema(type=schemas.FilterType.PLATFORM,
|
||||||
|
value=[schemas.PlatformType.DESKTOP],
|
||||||
|
operator=schemas.SearchEventOperator.IS))
|
||||||
|
if not location_condition:
|
||||||
|
data.events.append(schemas.SessionSearchEventSchema2(type=schemas.EventType.LOCATION,
|
||||||
|
value=[],
|
||||||
|
operator=schemas.SearchEventOperator.IS_ANY))
|
||||||
|
if no_click:
|
||||||
|
data.events.append(schemas.SessionSearchEventSchema2(type=schemas.EventType.CLICK,
|
||||||
|
value=[],
|
||||||
|
operator=schemas.SearchEventOperator.IS_ANY))
|
||||||
|
|
||||||
|
data.filters.append(schemas.SessionSearchFilterSchema(type=schemas.FilterType.EVENTS_COUNT,
|
||||||
|
value=[0],
|
||||||
|
operator=schemas.MathOperator.GREATER))
|
||||||
|
|
||||||
|
full_args, query_part = sessions.search_query_parts_ch(data=data, error_status=None, errors_only=False,
|
||||||
|
favorite_only=data.bookmarked, issue=None,
|
||||||
|
project_id=project_id, user_id=user_id)
|
||||||
|
full_args["exclude_sessions"] = tuple(exclude_sessions)
|
||||||
|
if len(exclude_sessions) > 0:
|
||||||
|
query_part += "\n AND session_id NOT IN (%(exclude_sessions)s)"
|
||||||
|
with ch_client.ClickHouseClient() as cur:
|
||||||
|
data.order = schemas.SortOrderType.DESC
|
||||||
|
data.sort = 'duration'
|
||||||
|
main_query = cur.format(query=f"""SELECT *
|
||||||
|
FROM (SELECT {SESSION_PROJECTION_COLS}
|
||||||
|
{query_part}
|
||||||
|
-- ORDER BY {data.sort} {data.order.value}
|
||||||
|
LIMIT 20) AS raw
|
||||||
|
ORDER BY rand()
|
||||||
|
LIMIT 1;""",
|
||||||
|
parameters=full_args)
|
||||||
|
logger.debug("--------------------")
|
||||||
|
logger.debug(main_query)
|
||||||
|
logger.debug("--------------------")
|
||||||
|
try:
|
||||||
|
session = cur.execute(query=main_query)
|
||||||
|
except Exception as err:
|
||||||
|
logger.warning("--------- CLICK MAP SHORT SESSION SEARCH QUERY EXCEPTION CH -----------")
|
||||||
|
logger.warning(main_query)
|
||||||
|
logger.warning("--------- PAYLOAD -----------")
|
||||||
|
logger.warning(data.model_dump_json())
|
||||||
|
logger.warning("--------------------")
|
||||||
|
raise err
|
||||||
|
|
||||||
|
if len(session) > 0:
|
||||||
|
session = session[0]
|
||||||
|
if not location_condition or location_condition.operator == schemas.SearchEventOperator.IS_ANY:
|
||||||
|
session["path"] = __get_1_url(project_id=project_id, session_id=session["session_id"],
|
||||||
|
location_condition=location_condition,
|
||||||
|
start_time=data.startTimestamp, end_time=data.endTimestamp)
|
||||||
|
else:
|
||||||
|
session["path"] = location_condition.value[0]
|
||||||
|
|
||||||
|
if include_mobs:
|
||||||
|
session['domURL'] = sessions_mobs.get_urls(session_id=session["session_id"], project_id=project_id)
|
||||||
|
session['mobsUrl'] = sessions_mobs.get_urls_depercated(session_id=session["session_id"])
|
||||||
|
if _depth > 0 and len(session['domURL']) == 0 and len(session['mobsUrl']) == 0:
|
||||||
|
return search_short_session(data=data, project_id=project_id, user_id=user_id,
|
||||||
|
include_mobs=include_mobs,
|
||||||
|
exclude_sessions=exclude_sessions + [session["session_id"]],
|
||||||
|
_depth=_depth - 1)
|
||||||
|
elif _depth == 0 and len(session['domURL']) == 0 and len(session['mobsUrl']) == 0:
|
||||||
|
logger.info("couldn't find an existing replay after 3 iterations for heatmap")
|
||||||
|
|
||||||
|
session['events'] = events.get_by_session_id(project_id=project_id, session_id=session["session_id"],
|
||||||
|
event_type=schemas.EventType.LOCATION)
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return helper.dict_to_camel_case(session)
|
||||||
|
|
||||||
|
|
||||||
|
def get_selected_session(project_id, session_id):
|
||||||
|
with ch_client.ClickHouseClient() as cur:
|
||||||
|
main_query = cur.format(query=f"""SELECT {SESSION_PROJECTION_COLS}
|
||||||
|
FROM experimental.sessions AS s
|
||||||
|
WHERE session_id=%(session_id)s;""",
|
||||||
|
parameters={"session_id": session_id})
|
||||||
|
logger.debug("--------------------")
|
||||||
|
logger.debug(main_query)
|
||||||
|
logger.debug("--------------------")
|
||||||
|
try:
|
||||||
|
session = cur.execute(query=main_query)
|
||||||
|
except Exception as err:
|
||||||
|
logger.warning("--------- CLICK MAP GET SELECTED SESSION QUERY EXCEPTION -----------")
|
||||||
|
logger.warning(main_query.decode('UTF-8'))
|
||||||
|
raise err
|
||||||
|
if len(session) > 0:
|
||||||
|
session = session[0]
|
||||||
|
else:
|
||||||
|
session = None
|
||||||
|
|
||||||
|
if session:
|
||||||
|
session['domURL'] = sessions_mobs.get_urls(session_id=session["session_id"], project_id=project_id)
|
||||||
|
session['mobsUrl'] = sessions_mobs.get_urls_depercated(session_id=session["session_id"])
|
||||||
|
if len(session['domURL']) == 0 and len(session['mobsUrl']) == 0:
|
||||||
|
session["_issue"] = "mob file not found"
|
||||||
|
logger.info("can't find selected mob file for heatmap")
|
||||||
|
session['events'] = get_page_events(session_id=session["session_id"], project_id=project_id)
|
||||||
|
|
||||||
|
return helper.dict_to_camel_case(session)
|
||||||
|
|
||||||
|
|
||||||
|
def get_page_events(session_id, project_id):
|
||||||
|
with ch_client.ClickHouseClient() as cur:
|
||||||
|
query = cur.format(query=f"""SELECT
|
||||||
|
event_id as message_id,
|
||||||
|
toUnixTimestamp(created_at)*1000 AS timestamp,
|
||||||
|
JSON_VALUE(CAST(`$properties` AS String), '$.url_host') AS host,
|
||||||
|
JSON_VALUE(CAST(`$properties` AS String), '$.url_path') AS path,
|
||||||
|
JSON_VALUE(CAST(`$properties` AS String), '$.url_path') AS value,
|
||||||
|
JSON_VALUE(CAST(`$properties` AS String), '$.url_path') AS url,
|
||||||
|
'LOCATION' AS type
|
||||||
|
FROM product_analytics.events
|
||||||
|
WHERE session_id = %(session_id)s
|
||||||
|
AND `$event_name`='LOCATION'
|
||||||
|
AND project_id= %(project_id)s
|
||||||
|
ORDER BY created_at,message_id;""",
|
||||||
|
parameters={"session_id": session_id, "project_id": project_id})
|
||||||
|
|
||||||
|
rows = cur.execute(query=query)
|
||||||
|
rows = helper.list_to_camel_case(rows)
|
||||||
|
return rows
|
||||||
12
api/chalicelib/core/metrics/modules/__init__.py
Normal file
12
api/chalicelib/core/metrics/modules/__init__.py
Normal file
|
|
@ -0,0 +1,12 @@
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from decouple import config
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
if config("EXP_METRICS", cast=bool, default=False):
|
||||||
|
import chalicelib.core.sessions.sessions_ch as sessions
|
||||||
|
else:
|
||||||
|
import chalicelib.core.sessions.sessions_pg as sessions
|
||||||
|
|
||||||
|
from chalicelib.core.sessions import sessions_mobs
|
||||||
10
api/chalicelib/core/metrics/modules/significance/__init__.py
Normal file
10
api/chalicelib/core/metrics/modules/significance/__init__.py
Normal file
|
|
@ -0,0 +1,10 @@
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from decouple import config
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
from .significance import *
|
||||||
|
|
||||||
|
if config("EXP_METRICS", cast=bool, default=False):
|
||||||
|
from .significance_ch import *
|
||||||
|
|
@ -1,20 +1,15 @@
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
import schemas
|
|
||||||
from chalicelib.core import events, metadata
|
|
||||||
from chalicelib.utils import sql_helper as sh
|
|
||||||
|
|
||||||
"""
|
|
||||||
todo: remove LIMIT from the query
|
|
||||||
"""
|
|
||||||
|
|
||||||
from typing import List
|
|
||||||
import math
|
import math
|
||||||
import warnings
|
import warnings
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
|
from typing import List
|
||||||
|
|
||||||
from psycopg2.extras import RealDictRow
|
from psycopg2.extras import RealDictRow
|
||||||
|
|
||||||
|
import schemas
|
||||||
|
from chalicelib.core import events, metadata
|
||||||
from chalicelib.utils import pg_client, helper
|
from chalicelib.utils import pg_client, helper
|
||||||
|
from chalicelib.utils import sql_helper as sh
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
SIGNIFICANCE_THRSH = 0.4
|
SIGNIFICANCE_THRSH = 0.4
|
||||||
|
|
@ -765,30 +760,6 @@ def get_issues(stages, rows, first_stage=None, last_stage=None, drop_only=False)
|
||||||
return n_critical_issues, issues_dict, total_drop_due_to_issues
|
return n_critical_issues, issues_dict, total_drop_due_to_issues
|
||||||
|
|
||||||
|
|
||||||
def get_top_insights(filter_d: schemas.CardSeriesFilterSchema, project_id,
|
|
||||||
metric_format: schemas.MetricExtendedFormatType):
|
|
||||||
output = []
|
|
||||||
stages = filter_d.events
|
|
||||||
|
|
||||||
if len(stages) == 0:
|
|
||||||
logger.debug("no stages found")
|
|
||||||
return output, 0
|
|
||||||
|
|
||||||
# The result of the multi-stage query
|
|
||||||
rows = get_stages_and_events(filter_d=filter_d, project_id=project_id)
|
|
||||||
# Obtain the first part of the output
|
|
||||||
stages_list = get_stages(stages, rows, metric_format=metric_format)
|
|
||||||
if len(rows) == 0:
|
|
||||||
return stages_list, 0
|
|
||||||
|
|
||||||
# Obtain the second part of the output
|
|
||||||
total_drop_due_to_issues = get_issues(stages, rows,
|
|
||||||
first_stage=1,
|
|
||||||
last_stage=len(filter_d.events),
|
|
||||||
drop_only=True)
|
|
||||||
return stages_list, total_drop_due_to_issues
|
|
||||||
|
|
||||||
|
|
||||||
def get_issues_list(filter_d: schemas.CardSeriesFilterSchema, project_id, first_stage=None, last_stage=None):
|
def get_issues_list(filter_d: schemas.CardSeriesFilterSchema, project_id, first_stage=None, last_stage=None):
|
||||||
output = dict({"total_drop_due_to_issues": 0, "critical_issues_count": 0, "significant": [], "insignificant": []})
|
output = dict({"total_drop_due_to_issues": 0, "critical_issues_count": 0, "significant": [], "insignificant": []})
|
||||||
stages = filter_d.events
|
stages = filter_d.events
|
||||||
|
|
@ -1,6 +1,14 @@
|
||||||
|
import logging
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
from psycopg2.extras import RealDictRow
|
||||||
|
|
||||||
|
import schemas
|
||||||
from chalicelib.utils import ch_client
|
from chalicelib.utils import ch_client
|
||||||
from chalicelib.utils import exp_ch_helper
|
from chalicelib.utils import exp_ch_helper
|
||||||
from .significance import *
|
from chalicelib.utils import helper
|
||||||
|
from chalicelib.utils import sql_helper as sh
|
||||||
|
from chalicelib.core import events
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
@ -11,9 +19,9 @@ def get_simple_funnel(filter_d: schemas.CardSeriesFilterSchema, project: schemas
|
||||||
filters: List[schemas.SessionSearchFilterSchema] = filter_d.filters
|
filters: List[schemas.SessionSearchFilterSchema] = filter_d.filters
|
||||||
platform = project.platform
|
platform = project.platform
|
||||||
constraints = ["e.project_id = %(project_id)s",
|
constraints = ["e.project_id = %(project_id)s",
|
||||||
"e.datetime >= toDateTime(%(startTimestamp)s/1000)",
|
"e.created_at >= toDateTime(%(startTimestamp)s/1000)",
|
||||||
"e.datetime <= toDateTime(%(endTimestamp)s/1000)",
|
"e.created_at <= toDateTime(%(endTimestamp)s/1000)",
|
||||||
"e.event_type IN %(eventTypes)s"]
|
"e.`$event_name` IN %(eventTypes)s"]
|
||||||
|
|
||||||
full_args = {"project_id": project.project_id, "startTimestamp": filter_d.startTimestamp,
|
full_args = {"project_id": project.project_id, "startTimestamp": filter_d.startTimestamp,
|
||||||
"endTimestamp": filter_d.endTimestamp}
|
"endTimestamp": filter_d.endTimestamp}
|
||||||
|
|
@ -149,18 +157,25 @@ def get_simple_funnel(filter_d: schemas.CardSeriesFilterSchema, project: schemas
|
||||||
if next_event_type not in event_types:
|
if next_event_type not in event_types:
|
||||||
event_types.append(next_event_type)
|
event_types.append(next_event_type)
|
||||||
full_args[f"event_type_{i}"] = next_event_type
|
full_args[f"event_type_{i}"] = next_event_type
|
||||||
n_stages_query.append(f"event_type=%(event_type_{i})s")
|
n_stages_query.append(f"`$event_name`=%(event_type_{i})s")
|
||||||
if is_not:
|
if is_not:
|
||||||
n_stages_query_not.append(n_stages_query[-1] + " AND " +
|
n_stages_query_not.append(n_stages_query[-1] + " AND " +
|
||||||
(sh.multi_conditions(f' {next_col_name} {op} %({e_k})s', s.value,
|
(sh.multi_conditions(
|
||||||
is_not=is_not, value_key=e_k)
|
f"JSON_VALUE(CAST(`$properties` AS String), '$.{next_col_name}') {op} %({e_k})s",
|
||||||
if not specific_condition else specific_condition))
|
s.value,
|
||||||
|
is_not=is_not,
|
||||||
|
value_key=e_k
|
||||||
|
) if not specific_condition else specific_condition))
|
||||||
elif not is_any:
|
elif not is_any:
|
||||||
n_stages_query[-1] += " AND " + (sh.multi_conditions(f' {next_col_name} {op} %({e_k})s', s.value,
|
n_stages_query[-1] += " AND " + (
|
||||||
is_not=is_not, value_key=e_k)
|
sh.multi_conditions(
|
||||||
if not specific_condition else specific_condition)
|
f"JSON_VALUE(CAST(`$properties` AS String), '$.{next_col_name}') {op} %({e_k})s",
|
||||||
|
s.value,
|
||||||
|
is_not=is_not,
|
||||||
|
value_key=e_k
|
||||||
|
) if not specific_condition else specific_condition)
|
||||||
|
|
||||||
full_args = {"eventTypes": tuple(event_types), **full_args, **values}
|
full_args = {"eventTypes": event_types, **full_args, **values}
|
||||||
n_stages = len(n_stages_query)
|
n_stages = len(n_stages_query)
|
||||||
if n_stages == 0:
|
if n_stages == 0:
|
||||||
return []
|
return []
|
||||||
|
|
@ -180,8 +195,8 @@ def get_simple_funnel(filter_d: schemas.CardSeriesFilterSchema, project: schemas
|
||||||
|
|
||||||
if len(n_stages_query_not) > 0:
|
if len(n_stages_query_not) > 0:
|
||||||
value_conditions_not_base = ["project_id = %(project_id)s",
|
value_conditions_not_base = ["project_id = %(project_id)s",
|
||||||
"datetime >= toDateTime(%(startTimestamp)s/1000)",
|
"created_at >= toDateTime(%(startTimestamp)s/1000)",
|
||||||
"datetime <= toDateTime(%(endTimestamp)s/1000)"]
|
"created_at <= toDateTime(%(endTimestamp)s/1000)"]
|
||||||
_value_conditions_not = []
|
_value_conditions_not = []
|
||||||
value_conditions_not = []
|
value_conditions_not = []
|
||||||
for c in n_stages_query_not:
|
for c in n_stages_query_not:
|
||||||
|
|
@ -202,7 +217,7 @@ def get_simple_funnel(filter_d: schemas.CardSeriesFilterSchema, project: schemas
|
||||||
sequences = []
|
sequences = []
|
||||||
projections = []
|
projections = []
|
||||||
for i, s in enumerate(n_stages_query):
|
for i, s in enumerate(n_stages_query):
|
||||||
projections.append(f"SUM(T{i + 1}) AS stage{i + 1}")
|
projections.append(f"coalesce(SUM(T{i + 1}),0) AS stage{i + 1}")
|
||||||
if i == 0:
|
if i == 0:
|
||||||
sequences.append(f"anyIf(1,{s}) AS T1")
|
sequences.append(f"anyIf(1,{s}) AS T1")
|
||||||
else:
|
else:
|
||||||
|
|
@ -213,23 +228,22 @@ def get_simple_funnel(filter_d: schemas.CardSeriesFilterSchema, project: schemas
|
||||||
pattern += f"(?{j + 1})"
|
pattern += f"(?{j + 1})"
|
||||||
conditions.append(n_stages_query[j])
|
conditions.append(n_stages_query[j])
|
||||||
j += 1
|
j += 1
|
||||||
sequences.append(f"sequenceMatch('{pattern}')(e.datetime, {','.join(conditions)}) AS T{i + 1}")
|
sequences.append(f"sequenceMatch('{pattern}')(toDateTime(e.created_at), {','.join(conditions)}) AS T{i + 1}")
|
||||||
|
|
||||||
n_stages_query = f"""
|
n_stages_query = f"""
|
||||||
SELECT {",".join(projections)}
|
SELECT {",".join(projections)}
|
||||||
FROM (SELECT {",".join(sequences)}
|
FROM (SELECT {",".join(sequences)}
|
||||||
FROM {MAIN_EVENTS_TABLE} AS e {extra_from}
|
FROM {MAIN_EVENTS_TABLE} AS e {extra_from}
|
||||||
WHERE {" AND ".join(constraints)}
|
WHERE {" AND ".join(constraints)}
|
||||||
GROUP BY {group_by}) AS raw;
|
GROUP BY {group_by}) AS raw;"""
|
||||||
"""
|
|
||||||
|
|
||||||
with ch_client.ClickHouseClient() as cur:
|
with ch_client.ClickHouseClient() as cur:
|
||||||
query = cur.format(n_stages_query, full_args)
|
query = cur.format(query=n_stages_query, parameters=full_args)
|
||||||
logger.debug("---------------------------------------------------")
|
logger.debug("---------------------------------------------------")
|
||||||
logger.debug(query)
|
logger.debug(query)
|
||||||
logger.debug("---------------------------------------------------")
|
logger.debug("---------------------------------------------------")
|
||||||
try:
|
try:
|
||||||
row = cur.execute(query)
|
row = cur.execute(query=query)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
logger.warning("--------- SIMPLE FUNNEL SEARCH QUERY EXCEPTION CH-----------")
|
logger.warning("--------- SIMPLE FUNNEL SEARCH QUERY EXCEPTION CH-----------")
|
||||||
logger.warning(query)
|
logger.warning(query)
|
||||||
10
api/chalicelib/core/metrics/product_analytics/__init__.py
Normal file
10
api/chalicelib/core/metrics/product_analytics/__init__.py
Normal file
|
|
@ -0,0 +1,10 @@
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from decouple import config
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
if config("EXP_METRICS", cast=bool, default=False):
|
||||||
|
logger.info(">>> Using experimental product-analytics")
|
||||||
|
from .product_analytics_ch import *
|
||||||
|
else:
|
||||||
|
from .product_analytics import *
|
||||||
|
|
@ -12,42 +12,75 @@ logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
def __transform_journey(rows, reverse_path=False):
|
def __transform_journey(rows, reverse_path=False):
|
||||||
total_100p = 0
|
total_100p = 0
|
||||||
number_of_step1 = 0
|
|
||||||
for r in rows:
|
for r in rows:
|
||||||
if r["event_number_in_session"] > 1:
|
if r["event_number_in_session"] > 1:
|
||||||
break
|
break
|
||||||
number_of_step1 += 1
|
|
||||||
total_100p += r["sessions_count"]
|
total_100p += r["sessions_count"]
|
||||||
# for i in range(number_of_step1):
|
|
||||||
# rows[i]["value"] = 100 / number_of_step1
|
|
||||||
|
|
||||||
# for i in range(number_of_step1, len(rows)):
|
|
||||||
for i in range(len(rows)):
|
|
||||||
rows[i]["value"] = rows[i]["sessions_count"] * 100 / total_100p
|
|
||||||
|
|
||||||
nodes = []
|
nodes = []
|
||||||
nodes_values = []
|
nodes_values = []
|
||||||
links = []
|
links = []
|
||||||
|
drops = []
|
||||||
|
max_depth = 0
|
||||||
for r in rows:
|
for r in rows:
|
||||||
source = f"{r['event_number_in_session']}_{r['event_type']}_{r['e_value']}"
|
r["value"] = r["sessions_count"] * 100 / total_100p
|
||||||
|
source = f"{r['event_number_in_session'] - 1}_{r['event_type']}_{r['e_value']}"
|
||||||
if source not in nodes:
|
if source not in nodes:
|
||||||
nodes.append(source)
|
nodes.append(source)
|
||||||
nodes_values.append({"name": r['e_value'], "eventType": r['event_type'],
|
nodes_values.append({"depth": r['event_number_in_session'] - 1,
|
||||||
"avgTimeFromPrevious": 0, "sessionsCount": 0})
|
"name": r['e_value'],
|
||||||
if r['next_value']:
|
"eventType": r['event_type'],
|
||||||
target = f"{r['event_number_in_session'] + 1}_{r['next_type']}_{r['next_value']}"
|
"id": len(nodes_values)})
|
||||||
if target not in nodes:
|
|
||||||
nodes.append(target)
|
|
||||||
nodes_values.append({"name": r['next_value'], "eventType": r['next_type'],
|
|
||||||
"avgTimeFromPrevious": 0, "sessionsCount": 0})
|
|
||||||
|
|
||||||
|
target = f"{r['event_number_in_session']}_{r['next_type']}_{r['next_value']}"
|
||||||
|
if target not in nodes:
|
||||||
|
nodes.append(target)
|
||||||
|
nodes_values.append({"depth": r['event_number_in_session'],
|
||||||
|
"name": r['next_value'],
|
||||||
|
"eventType": r['next_type'],
|
||||||
|
"id": len(nodes_values)})
|
||||||
|
|
||||||
|
sr_idx = nodes.index(source)
|
||||||
|
tg_idx = nodes.index(target)
|
||||||
|
|
||||||
|
link = {"eventType": r['event_type'], "sessionsCount": r["sessions_count"], "value": r["value"]}
|
||||||
|
if not reverse_path:
|
||||||
|
link["source"] = sr_idx
|
||||||
|
link["target"] = tg_idx
|
||||||
|
else:
|
||||||
|
link["source"] = tg_idx
|
||||||
|
link["target"] = sr_idx
|
||||||
|
links.append(link)
|
||||||
|
|
||||||
|
max_depth = r['event_number_in_session']
|
||||||
|
if r["next_type"] == "DROP":
|
||||||
|
for d in drops:
|
||||||
|
if d["depth"] == r['event_number_in_session']:
|
||||||
|
d["sessions_count"] += r["sessions_count"]
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
drops.append({"depth": r['event_number_in_session'], "sessions_count": r["sessions_count"]})
|
||||||
|
|
||||||
|
for i in range(len(drops)):
|
||||||
|
|
||||||
|
if drops[i]["depth"] < max_depth:
|
||||||
|
source = f"{drops[i]['depth']}_DROP_None"
|
||||||
|
target = f"{drops[i]['depth'] + 1}_DROP_None"
|
||||||
sr_idx = nodes.index(source)
|
sr_idx = nodes.index(source)
|
||||||
tg_idx = nodes.index(target)
|
|
||||||
if r["avg_time_from_previous"] is not None:
|
if i < len(drops) - 1 and drops[i]["depth"] + 1 == drops[i + 1]["depth"]:
|
||||||
nodes_values[tg_idx]["avgTimeFromPrevious"] += r["avg_time_from_previous"] * r["sessions_count"]
|
tg_idx = nodes.index(target)
|
||||||
nodes_values[tg_idx]["sessionsCount"] += r["sessions_count"]
|
else:
|
||||||
link = {"eventType": r['event_type'], "sessionsCount": r["sessions_count"],
|
nodes.append(target)
|
||||||
"value": r["value"], "avgTimeFromPrevious": r["avg_time_from_previous"]}
|
nodes_values.append({"depth": drops[i]["depth"] + 1,
|
||||||
|
"name": None,
|
||||||
|
"eventType": "DROP",
|
||||||
|
"id": len(nodes_values)})
|
||||||
|
tg_idx = len(nodes) - 1
|
||||||
|
|
||||||
|
link = {"eventType": "DROP",
|
||||||
|
"sessionsCount": drops[i]["sessions_count"],
|
||||||
|
"value": drops[i]["sessions_count"] * 100 / total_100p}
|
||||||
if not reverse_path:
|
if not reverse_path:
|
||||||
link["source"] = sr_idx
|
link["source"] = sr_idx
|
||||||
link["target"] = tg_idx
|
link["target"] = tg_idx
|
||||||
|
|
@ -55,13 +88,10 @@ def __transform_journey(rows, reverse_path=False):
|
||||||
link["source"] = tg_idx
|
link["source"] = tg_idx
|
||||||
link["target"] = sr_idx
|
link["target"] = sr_idx
|
||||||
links.append(link)
|
links.append(link)
|
||||||
for n in nodes_values:
|
|
||||||
if n["sessionsCount"] > 0:
|
|
||||||
n["avgTimeFromPrevious"] = n["avgTimeFromPrevious"] / n["sessionsCount"]
|
|
||||||
else:
|
|
||||||
n["avgTimeFromPrevious"] = None
|
|
||||||
n.pop("sessionsCount")
|
|
||||||
|
|
||||||
|
if reverse_path:
|
||||||
|
for n in nodes_values:
|
||||||
|
n["depth"] = max_depth - n["depth"]
|
||||||
return {"nodes": nodes_values,
|
return {"nodes": nodes_values,
|
||||||
"links": sorted(links, key=lambda x: (x["source"], x["target"]), reverse=False)}
|
"links": sorted(links, key=lambda x: (x["source"], x["target"]), reverse=False)}
|
||||||
|
|
||||||
|
|
@ -403,7 +433,9 @@ WITH sub_sessions AS (SELECT session_id {sub_sessions_extra_projection}
|
||||||
{"UNION ALL".join(projection_query)};"""
|
{"UNION ALL".join(projection_query)};"""
|
||||||
params = {"project_id": project_id, "startTimestamp": data.startTimestamp,
|
params = {"project_id": project_id, "startTimestamp": data.startTimestamp,
|
||||||
"endTimestamp": data.endTimestamp, "density": data.density,
|
"endTimestamp": data.endTimestamp, "density": data.density,
|
||||||
"eventThresholdNumberInGroup": 4 if data.hide_excess else 8,
|
# This is ignored because UI will take care of it
|
||||||
|
# "eventThresholdNumberInGroup": 4 if data.hide_excess else 8,
|
||||||
|
"eventThresholdNumberInGroup": 8,
|
||||||
**extra_values}
|
**extra_values}
|
||||||
query = cur.mogrify(pg_query, params)
|
query = cur.mogrify(pg_query, params)
|
||||||
_now = time()
|
_now = time()
|
||||||
|
|
@ -1,110 +1,135 @@
|
||||||
from typing import List
|
import logging
|
||||||
|
|
||||||
import schemas
|
|
||||||
from chalicelib.core.metrics import __get_basic_constraints, __get_meta_constraint
|
|
||||||
from chalicelib.core.metrics import __get_constraint_values, __complete_missing_steps
|
|
||||||
from chalicelib.utils import ch_client, exp_ch_helper
|
|
||||||
from chalicelib.utils import helper, dev
|
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
|
||||||
from chalicelib.utils import sql_helper as sh
|
|
||||||
from chalicelib.core import metadata
|
|
||||||
from time import time
|
from time import time
|
||||||
|
|
||||||
import logging
|
import schemas
|
||||||
|
from chalicelib.core import metadata
|
||||||
|
from .product_analytics import __transform_journey
|
||||||
|
from chalicelib.utils import ch_client, exp_ch_helper
|
||||||
|
from chalicelib.utils import helper
|
||||||
|
from chalicelib.utils import sql_helper as sh
|
||||||
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
|
from chalicelib.utils.metrics_helper import get_step_size
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def __transform_journey(rows, reverse_path=False):
|
|
||||||
total_100p = 0
|
|
||||||
number_of_step1 = 0
|
|
||||||
for r in rows:
|
|
||||||
if r["event_number_in_session"] > 1:
|
|
||||||
break
|
|
||||||
number_of_step1 += 1
|
|
||||||
total_100p += r["sessions_count"]
|
|
||||||
# for i in range(number_of_step1):
|
|
||||||
# rows[i]["value"] = 100 / number_of_step1
|
|
||||||
|
|
||||||
# for i in range(number_of_step1, len(rows)):
|
|
||||||
for i in range(len(rows)):
|
|
||||||
rows[i]["value"] = rows[i]["sessions_count"] * 100 / total_100p
|
|
||||||
|
|
||||||
nodes = []
|
|
||||||
nodes_values = []
|
|
||||||
links = []
|
|
||||||
for r in rows:
|
|
||||||
source = f"{r['event_number_in_session']}_{r['event_type']}_{r['e_value']}"
|
|
||||||
if source not in nodes:
|
|
||||||
nodes.append(source)
|
|
||||||
nodes_values.append({"name": r['e_value'], "eventType": r['event_type'],
|
|
||||||
"avgTimeFromPrevious": 0, "sessionsCount": 0})
|
|
||||||
if r['next_value']:
|
|
||||||
target = f"{r['event_number_in_session'] + 1}_{r['next_type']}_{r['next_value']}"
|
|
||||||
if target not in nodes:
|
|
||||||
nodes.append(target)
|
|
||||||
nodes_values.append({"name": r['next_value'], "eventType": r['next_type'],
|
|
||||||
"avgTimeFromPrevious": 0, "sessionsCount": 0})
|
|
||||||
|
|
||||||
sr_idx = nodes.index(source)
|
|
||||||
tg_idx = nodes.index(target)
|
|
||||||
if r["avg_time_from_previous"] is not None:
|
|
||||||
nodes_values[tg_idx]["avgTimeFromPrevious"] += r["avg_time_from_previous"] * r["sessions_count"]
|
|
||||||
nodes_values[tg_idx]["sessionsCount"] += r["sessions_count"]
|
|
||||||
link = {"eventType": r['event_type'], "sessionsCount": r["sessions_count"],
|
|
||||||
"value": r["value"], "avgTimeFromPrevious": r["avg_time_from_previous"]}
|
|
||||||
if not reverse_path:
|
|
||||||
link["source"] = sr_idx
|
|
||||||
link["target"] = tg_idx
|
|
||||||
else:
|
|
||||||
link["source"] = tg_idx
|
|
||||||
link["target"] = sr_idx
|
|
||||||
links.append(link)
|
|
||||||
for n in nodes_values:
|
|
||||||
if n["sessionsCount"] > 0:
|
|
||||||
n["avgTimeFromPrevious"] = n["avgTimeFromPrevious"] / n["sessionsCount"]
|
|
||||||
else:
|
|
||||||
n["avgTimeFromPrevious"] = None
|
|
||||||
n.pop("sessionsCount")
|
|
||||||
|
|
||||||
return {"nodes": nodes_values,
|
|
||||||
"links": sorted(links, key=lambda x: (x["source"], x["target"]), reverse=False)}
|
|
||||||
|
|
||||||
|
|
||||||
JOURNEY_TYPES = {
|
JOURNEY_TYPES = {
|
||||||
schemas.ProductAnalyticsSelectedEventType.LOCATION: {"eventType": "LOCATION", "column": "url_path"},
|
schemas.ProductAnalyticsSelectedEventType.LOCATION: {"eventType": "LOCATION", "column": "`$properties`.url_path"},
|
||||||
schemas.ProductAnalyticsSelectedEventType.CLICK: {"eventType": "CLICK", "column": "label"},
|
schemas.ProductAnalyticsSelectedEventType.CLICK: {"eventType": "CLICK", "column": "`$properties`.label"},
|
||||||
schemas.ProductAnalyticsSelectedEventType.INPUT: {"eventType": "INPUT", "column": "label"},
|
schemas.ProductAnalyticsSelectedEventType.INPUT: {"eventType": "INPUT", "column": "`$properties`.label"},
|
||||||
schemas.ProductAnalyticsSelectedEventType.CUSTOM_EVENT: {"eventType": "CUSTOM", "column": "name"}
|
schemas.ProductAnalyticsSelectedEventType.CUSTOM_EVENT: {"eventType": "CUSTOM", "column": "`$properties`.name"}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
# Q6: use events as a sub_query to support filter of materialized columns when doing a join
|
def __get_basic_constraints_events(table_name=None, identifier="project_id"):
|
||||||
# query: Q5, the result is correct,
|
if table_name:
|
||||||
|
table_name += "."
|
||||||
|
else:
|
||||||
|
table_name = ""
|
||||||
|
ch_sub_query = [f"{table_name}{identifier} =toUInt16(%({identifier})s)"]
|
||||||
|
ch_sub_query.append(f"{table_name}created_at >= toDateTime(%(startTimestamp)s/1000)")
|
||||||
|
ch_sub_query.append(f"{table_name}created_at < toDateTime(%(endTimestamp)s/1000)")
|
||||||
|
return ch_sub_query
|
||||||
|
|
||||||
|
|
||||||
|
def __frange(start, stop, step):
|
||||||
|
result = []
|
||||||
|
i = start
|
||||||
|
while i < stop:
|
||||||
|
result.append(i)
|
||||||
|
i += step
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def __add_missing_keys(original, complete):
|
||||||
|
for missing in [key for key in complete.keys() if key not in original.keys()]:
|
||||||
|
original[missing] = complete[missing]
|
||||||
|
return original
|
||||||
|
|
||||||
|
|
||||||
|
def __complete_missing_steps(start_time, end_time, density, neutral, rows, time_key="timestamp", time_coefficient=1000):
|
||||||
|
if len(rows) == density:
|
||||||
|
return rows
|
||||||
|
step = get_step_size(start_time, end_time, density, decimal=True)
|
||||||
|
optimal = [(int(i * time_coefficient), int((i + step) * time_coefficient)) for i in
|
||||||
|
__frange(start_time // time_coefficient, end_time // time_coefficient, step)]
|
||||||
|
result = []
|
||||||
|
r = 0
|
||||||
|
o = 0
|
||||||
|
for i in range(density):
|
||||||
|
neutral_clone = dict(neutral)
|
||||||
|
for k in neutral_clone.keys():
|
||||||
|
if callable(neutral_clone[k]):
|
||||||
|
neutral_clone[k] = neutral_clone[k]()
|
||||||
|
if r < len(rows) and len(result) + len(rows) - r == density:
|
||||||
|
result += rows[r:]
|
||||||
|
break
|
||||||
|
if r < len(rows) and o < len(optimal) and rows[r][time_key] < optimal[o][0]:
|
||||||
|
# complete missing keys in original object
|
||||||
|
rows[r] = __add_missing_keys(original=rows[r], complete=neutral_clone)
|
||||||
|
result.append(rows[r])
|
||||||
|
r += 1
|
||||||
|
elif r < len(rows) and o < len(optimal) and optimal[o][0] <= rows[r][time_key] < optimal[o][1]:
|
||||||
|
# complete missing keys in original object
|
||||||
|
rows[r] = __add_missing_keys(original=rows[r], complete=neutral_clone)
|
||||||
|
result.append(rows[r])
|
||||||
|
r += 1
|
||||||
|
o += 1
|
||||||
|
else:
|
||||||
|
neutral_clone[time_key] = optimal[o][0]
|
||||||
|
result.append(neutral_clone)
|
||||||
|
o += 1
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
# startPoints are computed before ranked_events to reduce the number of window functions over rows
|
# startPoints are computed before ranked_events to reduce the number of window functions over rows
|
||||||
# replaced time_to_target by time_from_previous
|
# compute avg_time_from_previous at the same level as sessions_count (this was removed in v1.22)
|
||||||
# compute avg_time_from_previous at the same level as sessions_count
|
|
||||||
# sort by top 5 according to sessions_count at the CTE level
|
|
||||||
# final part project data without grouping
|
|
||||||
# if start-point is selected, the selected event is ranked n°1
|
# if start-point is selected, the selected event is ranked n°1
|
||||||
def path_analysis(project_id: int, data: schemas.CardPathAnalysis):
|
def path_analysis(project_id: int, data: schemas.CardPathAnalysis):
|
||||||
|
if not data.hide_excess:
|
||||||
|
data.hide_excess = True
|
||||||
|
data.rows = 50
|
||||||
sub_events = []
|
sub_events = []
|
||||||
start_points_conditions = []
|
start_points_conditions = []
|
||||||
step_0_conditions = []
|
step_0_conditions = []
|
||||||
|
step_1_post_conditions = ["event_number_in_session <= %(density)s"]
|
||||||
|
q2_extra_col = None
|
||||||
|
q2_extra_condition = None
|
||||||
if len(data.metric_value) == 0:
|
if len(data.metric_value) == 0:
|
||||||
data.metric_value.append(schemas.ProductAnalyticsSelectedEventType.LOCATION)
|
data.metric_value.append(schemas.ProductAnalyticsSelectedEventType.LOCATION)
|
||||||
sub_events.append({"column": JOURNEY_TYPES[schemas.ProductAnalyticsSelectedEventType.LOCATION]["column"],
|
sub_events.append({"column": JOURNEY_TYPES[schemas.ProductAnalyticsSelectedEventType.LOCATION]["column"],
|
||||||
"eventType": schemas.ProductAnalyticsSelectedEventType.LOCATION.value})
|
"eventType": schemas.ProductAnalyticsSelectedEventType.LOCATION.value})
|
||||||
else:
|
else:
|
||||||
|
if len(data.start_point) > 0:
|
||||||
|
extra_metric_values = []
|
||||||
|
for s in data.start_point:
|
||||||
|
if s.type not in data.metric_value:
|
||||||
|
sub_events.append({"column": JOURNEY_TYPES[s.type]["column"],
|
||||||
|
"eventType": JOURNEY_TYPES[s.type]["eventType"]})
|
||||||
|
step_1_post_conditions.append(
|
||||||
|
f"(`$event_name`='{JOURNEY_TYPES[s.type]['eventType']}' AND event_number_in_session = 1 \
|
||||||
|
OR `$event_name`!='{JOURNEY_TYPES[s.type]['eventType']}' AND event_number_in_session > 1)")
|
||||||
|
extra_metric_values.append(s.type)
|
||||||
|
if not q2_extra_col:
|
||||||
|
# This is used in case start event has different type of the visible event,
|
||||||
|
# because it causes intermediary events to be removed, so you find a jump from step-0 to step-3
|
||||||
|
# because step-2 is not of a visible event
|
||||||
|
q2_extra_col = """,leadInFrame(toNullable(event_number_in_session))
|
||||||
|
OVER (PARTITION BY session_id ORDER BY created_at %s
|
||||||
|
ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS next_event_number_in_session"""
|
||||||
|
q2_extra_condition = """WHERE event_number_in_session + 1 = next_event_number_in_session
|
||||||
|
OR isNull(next_event_number_in_session);"""
|
||||||
|
data.metric_value += extra_metric_values
|
||||||
|
|
||||||
for v in data.metric_value:
|
for v in data.metric_value:
|
||||||
if JOURNEY_TYPES.get(v):
|
if JOURNEY_TYPES.get(v):
|
||||||
sub_events.append({"column": JOURNEY_TYPES[v]["column"],
|
sub_events.append({"column": JOURNEY_TYPES[v]["column"],
|
||||||
"eventType": JOURNEY_TYPES[v]["eventType"]})
|
"eventType": JOURNEY_TYPES[v]["eventType"]})
|
||||||
|
|
||||||
if len(sub_events) == 1:
|
if len(sub_events) == 1:
|
||||||
main_column = sub_events[0]['column']
|
main_column = sub_events[0]['column']
|
||||||
else:
|
else:
|
||||||
main_column = f"multiIf(%s,%s)" % (
|
main_column = f"multiIf(%s,%s)" % (
|
||||||
','.join([f"event_type='{s['eventType']}',{s['column']}" for s in sub_events[:-1]]),
|
','.join([f"`$event_name`='{s['eventType']}',{s['column']}" for s in sub_events[:-1]]),
|
||||||
sub_events[-1]["column"])
|
sub_events[-1]["column"])
|
||||||
extra_values = {}
|
extra_values = {}
|
||||||
reverse = data.start_type == "end"
|
reverse = data.start_type == "end"
|
||||||
|
|
@ -117,19 +142,19 @@ def path_analysis(project_id: int, data: schemas.CardPathAnalysis):
|
||||||
event_type = JOURNEY_TYPES[sf.type]['eventType']
|
event_type = JOURNEY_TYPES[sf.type]['eventType']
|
||||||
extra_values = {**extra_values, **sh.multi_values(sf.value, value_key=f_k),
|
extra_values = {**extra_values, **sh.multi_values(sf.value, value_key=f_k),
|
||||||
f"start_event_type_{i}": event_type}
|
f"start_event_type_{i}": event_type}
|
||||||
start_points_conditions.append(f"(event_type=%(start_event_type_{i})s AND " +
|
start_points_conditions.append(f"(`$event_name`=%(start_event_type_{i})s AND " +
|
||||||
sh.multi_conditions(f'{event_column} {op} %({f_k})s', sf.value, is_not=is_not,
|
sh.multi_conditions(f'{event_column} {op} %({f_k})s', sf.value, is_not=is_not,
|
||||||
value_key=f_k)
|
value_key=f_k)
|
||||||
+ ")")
|
+ ")")
|
||||||
step_0_conditions.append(f"(event_type=%(start_event_type_{i})s AND " +
|
step_0_conditions.append(f"(`$event_name`=%(start_event_type_{i})s AND " +
|
||||||
sh.multi_conditions(f'e_value {op} %({f_k})s', sf.value, is_not=is_not,
|
sh.multi_conditions(f'e_value {op} %({f_k})s', sf.value, is_not=is_not,
|
||||||
value_key=f_k)
|
value_key=f_k)
|
||||||
+ ")")
|
+ ")")
|
||||||
if len(start_points_conditions) > 0:
|
if len(start_points_conditions) > 0:
|
||||||
start_points_conditions = ["(" + " OR ".join(start_points_conditions) + ")",
|
start_points_conditions = ["(" + " OR ".join(start_points_conditions) + ")",
|
||||||
"events.project_id = toUInt16(%(project_id)s)",
|
"events.project_id = toUInt16(%(project_id)s)",
|
||||||
"events.datetime >= toDateTime(%(startTimestamp)s / 1000)",
|
"events.created_at >= toDateTime(%(startTimestamp)s / 1000)",
|
||||||
"events.datetime < toDateTime(%(endTimestamp)s / 1000)"]
|
"events.created_at < toDateTime(%(endTimestamp)s / 1000)"]
|
||||||
step_0_conditions = ["(" + " OR ".join(step_0_conditions) + ")",
|
step_0_conditions = ["(" + " OR ".join(step_0_conditions) + ")",
|
||||||
"pre_ranked_events.event_number_in_session = 1"]
|
"pre_ranked_events.event_number_in_session = 1"]
|
||||||
|
|
||||||
|
|
@ -318,10 +343,11 @@ def path_analysis(project_id: int, data: schemas.CardPathAnalysis):
|
||||||
else:
|
else:
|
||||||
path_direction = ""
|
path_direction = ""
|
||||||
|
|
||||||
ch_sub_query = __get_basic_constraints(table_name="events")
|
# ch_sub_query = __get_basic_constraints(table_name="events")
|
||||||
|
ch_sub_query = __get_basic_constraints_events(table_name="events")
|
||||||
selected_event_type_sub_query = []
|
selected_event_type_sub_query = []
|
||||||
for s in data.metric_value:
|
for s in data.metric_value:
|
||||||
selected_event_type_sub_query.append(f"events.event_type = '{JOURNEY_TYPES[s]['eventType']}'")
|
selected_event_type_sub_query.append(f"events.`$event_name` = '{JOURNEY_TYPES[s]['eventType']}'")
|
||||||
if s in exclusions:
|
if s in exclusions:
|
||||||
selected_event_type_sub_query[-1] += " AND (" + " AND ".join(exclusions[s]) + ")"
|
selected_event_type_sub_query[-1] += " AND (" + " AND ".join(exclusions[s]) + ")"
|
||||||
selected_event_type_sub_query = " OR ".join(selected_event_type_sub_query)
|
selected_event_type_sub_query = " OR ".join(selected_event_type_sub_query)
|
||||||
|
|
@ -344,14 +370,14 @@ def path_analysis(project_id: int, data: schemas.CardPathAnalysis):
|
||||||
|
|
||||||
if len(start_points_conditions) == 0:
|
if len(start_points_conditions) == 0:
|
||||||
step_0_subquery = """SELECT DISTINCT session_id
|
step_0_subquery = """SELECT DISTINCT session_id
|
||||||
FROM (SELECT event_type, e_value
|
FROM (SELECT `$event_name`, e_value
|
||||||
FROM pre_ranked_events
|
FROM pre_ranked_events
|
||||||
WHERE event_number_in_session = 1
|
WHERE event_number_in_session = 1
|
||||||
GROUP BY event_type, e_value
|
GROUP BY `$event_name`, e_value
|
||||||
ORDER BY count(1) DESC
|
ORDER BY count(1) DESC
|
||||||
LIMIT 1) AS top_start_events
|
LIMIT 1) AS top_start_events
|
||||||
INNER JOIN pre_ranked_events
|
INNER JOIN pre_ranked_events
|
||||||
ON (top_start_events.event_type = pre_ranked_events.event_type AND
|
ON (top_start_events.`$event_name` = pre_ranked_events.`$event_name` AND
|
||||||
top_start_events.e_value = pre_ranked_events.e_value)
|
top_start_events.e_value = pre_ranked_events.e_value)
|
||||||
WHERE pre_ranked_events.event_number_in_session = 1"""
|
WHERE pre_ranked_events.event_number_in_session = 1"""
|
||||||
initial_event_cte = ""
|
initial_event_cte = ""
|
||||||
|
|
@ -360,65 +386,85 @@ def path_analysis(project_id: int, data: schemas.CardPathAnalysis):
|
||||||
FROM pre_ranked_events
|
FROM pre_ranked_events
|
||||||
WHERE {" AND ".join(step_0_conditions)}"""
|
WHERE {" AND ".join(step_0_conditions)}"""
|
||||||
initial_event_cte = f"""\
|
initial_event_cte = f"""\
|
||||||
initial_event AS (SELECT events.session_id, MIN(datetime) AS start_event_timestamp
|
initial_event AS (SELECT events.session_id, MIN(created_at) AS start_event_timestamp
|
||||||
FROM {main_events_table} {"INNER JOIN sub_sessions USING (session_id)" if len(sessions_conditions) > 0 else ""}
|
FROM {main_events_table} {"INNER JOIN sub_sessions USING (session_id)" if len(sessions_conditions) > 0 else ""}
|
||||||
WHERE {" AND ".join(start_points_conditions)}
|
WHERE {" AND ".join(start_points_conditions)}
|
||||||
GROUP BY 1),"""
|
GROUP BY 1),"""
|
||||||
ch_sub_query.append("events.datetime>=initial_event.start_event_timestamp")
|
ch_sub_query.append(f"events.created_at{'<=' if reverse else '>='}initial_event.start_event_timestamp")
|
||||||
main_events_table += " INNER JOIN initial_event ON (events.session_id = initial_event.session_id)"
|
main_events_table += " INNER JOIN initial_event ON (events.session_id = initial_event.session_id)"
|
||||||
sessions_conditions = []
|
sessions_conditions = []
|
||||||
|
|
||||||
steps_query = ["""n1 AS (SELECT event_number_in_session,
|
steps_query = []
|
||||||
event_type,
|
# This is used if data.hideExcess is True
|
||||||
e_value,
|
projection_query = []
|
||||||
next_type,
|
drop_query = []
|
||||||
next_value,
|
top_query = []
|
||||||
AVG(time_from_previous) AS avg_time_from_previous,
|
top_with_next_query = []
|
||||||
COUNT(1) AS sessions_count
|
other_query = []
|
||||||
FROM ranked_events
|
for i in range(1, data.density + (1 if data.hide_excess else 0)):
|
||||||
WHERE event_number_in_session = 1
|
steps_query.append(f"""n{i} AS (SELECT event_number_in_session,
|
||||||
AND isNotNull(next_value)
|
`$event_name`,
|
||||||
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
|
e_value,
|
||||||
ORDER BY sessions_count DESC
|
next_type,
|
||||||
LIMIT %(eventThresholdNumberInGroup)s)"""]
|
next_value,
|
||||||
projection_query = ["""SELECT event_number_in_session,
|
COUNT(1) AS sessions_count
|
||||||
event_type,
|
FROM ranked_events
|
||||||
e_value,
|
WHERE event_number_in_session = {i}
|
||||||
next_type,
|
GROUP BY event_number_in_session, `$event_name`, e_value, next_type, next_value
|
||||||
next_value,
|
ORDER BY sessions_count DESC)""")
|
||||||
sessions_count,
|
if not data.hide_excess:
|
||||||
avg_time_from_previous
|
projection_query.append(f"""\
|
||||||
FROM n1"""]
|
SELECT event_number_in_session,
|
||||||
for i in range(2, data.density + 1):
|
`$event_name`,
|
||||||
steps_query.append(f"""n{i} AS (SELECT *
|
|
||||||
FROM (SELECT re.event_number_in_session AS event_number_in_session,
|
|
||||||
re.event_type AS event_type,
|
|
||||||
re.e_value AS e_value,
|
|
||||||
re.next_type AS next_type,
|
|
||||||
re.next_value AS next_value,
|
|
||||||
AVG(re.time_from_previous) AS avg_time_from_previous,
|
|
||||||
COUNT(1) AS sessions_count
|
|
||||||
FROM n{i - 1} INNER JOIN ranked_events AS re
|
|
||||||
ON (n{i - 1}.next_value = re.e_value AND n{i - 1}.next_type = re.event_type)
|
|
||||||
WHERE re.event_number_in_session = {i}
|
|
||||||
GROUP BY re.event_number_in_session, re.event_type, re.e_value, re.next_type, re.next_value) AS sub_level
|
|
||||||
ORDER BY sessions_count DESC
|
|
||||||
LIMIT %(eventThresholdNumberInGroup)s)""")
|
|
||||||
projection_query.append(f"""SELECT event_number_in_session,
|
|
||||||
event_type,
|
|
||||||
e_value,
|
e_value,
|
||||||
next_type,
|
next_type,
|
||||||
next_value,
|
next_value,
|
||||||
sessions_count,
|
sessions_count
|
||||||
avg_time_from_previous
|
FROM n{i}
|
||||||
FROM n{i}""")
|
WHERE isNotNull(next_type)""")
|
||||||
|
else:
|
||||||
|
top_query.append(f"""\
|
||||||
|
SELECT event_number_in_session,
|
||||||
|
`$event_name`,
|
||||||
|
e_value,
|
||||||
|
SUM(n{i}.sessions_count) AS sessions_count
|
||||||
|
FROM n{i}
|
||||||
|
GROUP BY event_number_in_session, `$event_name`, e_value
|
||||||
|
ORDER BY sessions_count DESC
|
||||||
|
LIMIT %(visibleRows)s""")
|
||||||
|
|
||||||
|
if i < data.density:
|
||||||
|
drop_query.append(f"""SELECT event_number_in_session,
|
||||||
|
`$event_name`,
|
||||||
|
e_value,
|
||||||
|
'DROP' AS next_type,
|
||||||
|
NULL AS next_value,
|
||||||
|
sessions_count
|
||||||
|
FROM n{i}
|
||||||
|
WHERE isNull(n{i}.next_type)""")
|
||||||
|
if data.hide_excess:
|
||||||
|
top_with_next_query.append(f"""\
|
||||||
|
SELECT n{i}.*
|
||||||
|
FROM n{i}
|
||||||
|
INNER JOIN top_n
|
||||||
|
ON (n{i}.event_number_in_session = top_n.event_number_in_session
|
||||||
|
AND n{i}.`$event_name` = top_n.`$event_name`
|
||||||
|
AND n{i}.e_value = top_n.e_value)""")
|
||||||
|
|
||||||
|
if i > 1 and data.hide_excess:
|
||||||
|
other_query.append(f"""SELECT n{i}.*
|
||||||
|
FROM n{i}
|
||||||
|
WHERE (event_number_in_session, `$event_name`, e_value) NOT IN
|
||||||
|
(SELECT event_number_in_session, `$event_name`, e_value
|
||||||
|
FROM top_n
|
||||||
|
WHERE top_n.event_number_in_session = {i})""")
|
||||||
|
|
||||||
with ch_client.ClickHouseClient(database="experimental") as ch:
|
with ch_client.ClickHouseClient(database="experimental") as ch:
|
||||||
time_key = TimeUTC.now()
|
time_key = TimeUTC.now()
|
||||||
_now = time()
|
_now = time()
|
||||||
params = {"project_id": project_id, "startTimestamp": data.startTimestamp,
|
params = {"project_id": project_id, "startTimestamp": data.startTimestamp,
|
||||||
"endTimestamp": data.endTimestamp, "density": data.density,
|
"endTimestamp": data.endTimestamp, "density": data.density,
|
||||||
"eventThresholdNumberInGroup": 4 if data.hide_excess else 8,
|
"visibleRows": data.rows,
|
||||||
**extra_values}
|
**extra_values}
|
||||||
|
|
||||||
ch_query1 = f"""\
|
ch_query1 = f"""\
|
||||||
|
|
@ -427,23 +473,24 @@ WITH {initial_sessions_cte}
|
||||||
{initial_event_cte}
|
{initial_event_cte}
|
||||||
pre_ranked_events AS (SELECT *
|
pre_ranked_events AS (SELECT *
|
||||||
FROM (SELECT session_id,
|
FROM (SELECT session_id,
|
||||||
event_type,
|
`$event_name`,
|
||||||
datetime,
|
created_at,
|
||||||
{main_column} AS e_value,
|
toString({main_column}) AS e_value,
|
||||||
row_number() OVER (PARTITION BY session_id
|
row_number() OVER (PARTITION BY session_id
|
||||||
ORDER BY datetime {path_direction},
|
ORDER BY created_at {path_direction},
|
||||||
message_id {path_direction} ) AS event_number_in_session
|
event_id {path_direction} ) AS event_number_in_session
|
||||||
FROM {main_events_table} {"INNER JOIN sub_sessions ON (sub_sessions.session_id = events.session_id)" if len(sessions_conditions) > 0 else ""}
|
FROM {main_events_table} {"INNER JOIN sub_sessions ON (sub_sessions.session_id = events.session_id)" if len(sessions_conditions) > 0 else ""}
|
||||||
WHERE {" AND ".join(ch_sub_query)}
|
WHERE {" AND ".join(ch_sub_query)}
|
||||||
) AS full_ranked_events
|
) AS full_ranked_events
|
||||||
WHERE event_number_in_session <= %(density)s)
|
WHERE {" AND ".join(step_1_post_conditions)})
|
||||||
SELECT *
|
SELECT *
|
||||||
FROM pre_ranked_events;"""
|
FROM pre_ranked_events;"""
|
||||||
logger.debug("---------Q1-----------")
|
logger.debug("---------Q1-----------")
|
||||||
ch.execute(query=ch_query1, params=params)
|
ch_query1 = ch.format(query=ch_query1, parameters=params)
|
||||||
|
ch.execute(query=ch_query1)
|
||||||
if time() - _now > 2:
|
if time() - _now > 2:
|
||||||
logger.warning(f">>>>>>>>>PathAnalysis long query EE ({int(time() - _now)}s)<<<<<<<<<")
|
logger.warning(f">>>>>>>>>PathAnalysis long query EE ({int(time() - _now)}s)<<<<<<<<<")
|
||||||
logger.warning(ch.format(ch_query1, params))
|
logger.warning(str.encode(ch_query1))
|
||||||
logger.warning("----------------------")
|
logger.warning("----------------------")
|
||||||
_now = time()
|
_now = time()
|
||||||
|
|
||||||
|
|
@ -454,38 +501,136 @@ WITH pre_ranked_events AS (SELECT *
|
||||||
start_points AS ({step_0_subquery}),
|
start_points AS ({step_0_subquery}),
|
||||||
ranked_events AS (SELECT pre_ranked_events.*,
|
ranked_events AS (SELECT pre_ranked_events.*,
|
||||||
leadInFrame(e_value)
|
leadInFrame(e_value)
|
||||||
OVER (PARTITION BY session_id ORDER BY datetime {path_direction}
|
OVER (PARTITION BY session_id ORDER BY created_at {path_direction}
|
||||||
ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS next_value,
|
ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS next_value,
|
||||||
leadInFrame(toNullable(event_type))
|
leadInFrame(toNullable(`$event_name`))
|
||||||
OVER (PARTITION BY session_id ORDER BY datetime {path_direction}
|
OVER (PARTITION BY session_id ORDER BY created_at {path_direction}
|
||||||
ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS next_type,
|
ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS next_type
|
||||||
abs(lagInFrame(toNullable(datetime))
|
{q2_extra_col % path_direction if q2_extra_col else ""}
|
||||||
OVER (PARTITION BY session_id ORDER BY datetime {path_direction}
|
|
||||||
ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)
|
|
||||||
- pre_ranked_events.datetime) AS time_from_previous
|
|
||||||
FROM start_points INNER JOIN pre_ranked_events USING (session_id))
|
FROM start_points INNER JOIN pre_ranked_events USING (session_id))
|
||||||
SELECT *
|
SELECT *
|
||||||
FROM ranked_events;"""
|
FROM ranked_events
|
||||||
|
{q2_extra_condition if q2_extra_condition else ""};"""
|
||||||
logger.debug("---------Q2-----------")
|
logger.debug("---------Q2-----------")
|
||||||
ch.execute(query=ch_query2, params=params)
|
ch_query2 = ch.format(query=ch_query2, parameters=params)
|
||||||
|
ch.execute(query=ch_query2)
|
||||||
if time() - _now > 2:
|
if time() - _now > 2:
|
||||||
logger.warning(f">>>>>>>>>PathAnalysis long query EE ({int(time() - _now)}s)<<<<<<<<<")
|
logger.warning(f">>>>>>>>>PathAnalysis long query EE ({int(time() - _now)}s)<<<<<<<<<")
|
||||||
logger.warning(ch.format(ch_query2, params))
|
logger.warning(str.encode(ch_query2))
|
||||||
logger.warning("----------------------")
|
logger.warning("----------------------")
|
||||||
_now = time()
|
_now = time()
|
||||||
|
|
||||||
|
sub_cte = ""
|
||||||
|
if data.hide_excess:
|
||||||
|
sub_cte = f""",
|
||||||
|
top_n AS ({" UNION ALL ".join(top_query)}),
|
||||||
|
top_n_with_next AS ({" UNION ALL ".join(top_with_next_query)}),
|
||||||
|
others_n AS ({" UNION ALL ".join(other_query)})"""
|
||||||
|
projection_query = """\
|
||||||
|
-- Top to Top: valid
|
||||||
|
SELECT top_n_with_next.*
|
||||||
|
FROM top_n_with_next
|
||||||
|
INNER JOIN top_n
|
||||||
|
ON (top_n_with_next.event_number_in_session + 1 = top_n.event_number_in_session
|
||||||
|
AND top_n_with_next.next_type = top_n.`$event_name`
|
||||||
|
AND top_n_with_next.next_value = top_n.e_value)
|
||||||
|
UNION ALL
|
||||||
|
-- Top to Others: valid
|
||||||
|
SELECT top_n_with_next.event_number_in_session,
|
||||||
|
top_n_with_next.`$event_name`,
|
||||||
|
top_n_with_next.e_value,
|
||||||
|
'OTHER' AS next_type,
|
||||||
|
NULL AS next_value,
|
||||||
|
SUM(top_n_with_next.sessions_count) AS sessions_count
|
||||||
|
FROM top_n_with_next
|
||||||
|
WHERE (top_n_with_next.event_number_in_session + 1, top_n_with_next.next_type, top_n_with_next.next_value) IN
|
||||||
|
(SELECT others_n.event_number_in_session, others_n.`$event_name`, others_n.e_value FROM others_n)
|
||||||
|
GROUP BY top_n_with_next.event_number_in_session, top_n_with_next.`$event_name`, top_n_with_next.e_value
|
||||||
|
UNION ALL
|
||||||
|
-- Top go to Drop: valid
|
||||||
|
SELECT drop_n.event_number_in_session,
|
||||||
|
drop_n.`$event_name`,
|
||||||
|
drop_n.e_value,
|
||||||
|
drop_n.next_type,
|
||||||
|
drop_n.next_value,
|
||||||
|
drop_n.sessions_count
|
||||||
|
FROM drop_n
|
||||||
|
INNER JOIN top_n ON (drop_n.event_number_in_session = top_n.event_number_in_session
|
||||||
|
AND drop_n.`$event_name` = top_n.`$event_name`
|
||||||
|
AND drop_n.e_value = top_n.e_value)
|
||||||
|
ORDER BY drop_n.event_number_in_session
|
||||||
|
UNION ALL
|
||||||
|
-- Others got to Drop: valid
|
||||||
|
SELECT others_n.event_number_in_session,
|
||||||
|
'OTHER' AS `$event_name`,
|
||||||
|
NULL AS e_value,
|
||||||
|
'DROP' AS next_type,
|
||||||
|
NULL AS next_value,
|
||||||
|
SUM(others_n.sessions_count) AS sessions_count
|
||||||
|
FROM others_n
|
||||||
|
WHERE isNull(others_n.next_type)
|
||||||
|
AND others_n.event_number_in_session < 3
|
||||||
|
GROUP BY others_n.event_number_in_session, next_type, next_value
|
||||||
|
UNION ALL
|
||||||
|
-- Others got to Top:valid
|
||||||
|
SELECT others_n.event_number_in_session,
|
||||||
|
'OTHER' AS `$event_name`,
|
||||||
|
NULL AS e_value,
|
||||||
|
others_n.next_type,
|
||||||
|
others_n.next_value,
|
||||||
|
SUM(others_n.sessions_count) AS sessions_count
|
||||||
|
FROM others_n
|
||||||
|
WHERE isNotNull(others_n.next_type)
|
||||||
|
AND (others_n.event_number_in_session + 1, others_n.next_type, others_n.next_value) IN
|
||||||
|
(SELECT top_n.event_number_in_session, top_n.`$event_name`, top_n.e_value FROM top_n)
|
||||||
|
GROUP BY others_n.event_number_in_session, others_n.next_type, others_n.next_value
|
||||||
|
UNION ALL
|
||||||
|
-- Others got to Others
|
||||||
|
SELECT others_n.event_number_in_session,
|
||||||
|
'OTHER' AS `$event_name`,
|
||||||
|
NULL AS e_value,
|
||||||
|
'OTHER' AS next_type,
|
||||||
|
NULL AS next_value,
|
||||||
|
SUM(others_n.sessions_count) AS sessions_count
|
||||||
|
FROM others_n
|
||||||
|
WHERE isNotNull(others_n.next_type)
|
||||||
|
AND others_n.event_number_in_session < %(density)s
|
||||||
|
AND (others_n.event_number_in_session + 1, others_n.next_type, others_n.next_value) NOT IN
|
||||||
|
(SELECT event_number_in_session, `$event_name`, e_value FROM top_n)
|
||||||
|
GROUP BY others_n.event_number_in_session"""
|
||||||
|
else:
|
||||||
|
projection_query.append("""\
|
||||||
|
SELECT event_number_in_session,
|
||||||
|
`$event_name`,
|
||||||
|
e_value,
|
||||||
|
next_type,
|
||||||
|
next_value,
|
||||||
|
sessions_count
|
||||||
|
FROM drop_n""")
|
||||||
|
projection_query = " UNION ALL ".join(projection_query)
|
||||||
|
|
||||||
ch_query3 = f"""\
|
ch_query3 = f"""\
|
||||||
WITH ranked_events AS (SELECT *
|
WITH ranked_events AS (SELECT *
|
||||||
FROM ranked_events_{time_key}),
|
FROM ranked_events_{time_key}),
|
||||||
{",".join(steps_query)}
|
{", ".join(steps_query)},
|
||||||
SELECT *
|
drop_n AS ({" UNION ALL ".join(drop_query)})
|
||||||
FROM ({" UNION ALL ".join(projection_query)}) AS chart_steps
|
{sub_cte}
|
||||||
ORDER BY event_number_in_session;"""
|
SELECT event_number_in_session,
|
||||||
|
`$event_name` AS event_type,
|
||||||
|
e_value,
|
||||||
|
next_type,
|
||||||
|
next_value,
|
||||||
|
sessions_count
|
||||||
|
FROM (
|
||||||
|
{projection_query}
|
||||||
|
) AS chart_steps
|
||||||
|
ORDER BY event_number_in_session, sessions_count DESC;"""
|
||||||
logger.debug("---------Q3-----------")
|
logger.debug("---------Q3-----------")
|
||||||
rows = ch.execute(query=ch_query3, params=params)
|
ch_query3 = ch.format(query=ch_query3, parameters=params)
|
||||||
|
rows = ch.execute(query=ch_query3)
|
||||||
if time() - _now > 2:
|
if time() - _now > 2:
|
||||||
logger.warning(f">>>>>>>>>PathAnalysis long query EE ({int(time() - _now)}s)<<<<<<<<<")
|
logger.warning(f">>>>>>>>>PathAnalysis long query EE ({int(time() - _now)}s)<<<<<<<<<")
|
||||||
logger.warning(ch.format(ch_query3, params))
|
logger.warning(str.encode(ch_query3))
|
||||||
logger.warning("----------------------")
|
logger.warning("----------------------")
|
||||||
|
|
||||||
return __transform_journey(rows=rows, reverse_path=reverse)
|
return __transform_journey(rows=rows, reverse_path=reverse)
|
||||||
14
api/chalicelib/core/metrics/product_anaytics2.py
Normal file
14
api/chalicelib/core/metrics/product_anaytics2.py
Normal file
|
|
@ -0,0 +1,14 @@
|
||||||
|
from chalicelib.utils.ch_client import ClickHouseClient
|
||||||
|
|
||||||
|
|
||||||
|
def search_events(project_id: int, data: dict):
|
||||||
|
with ClickHouseClient() as ch_client:
|
||||||
|
r = ch_client.format(
|
||||||
|
"""SELECT *
|
||||||
|
FROM taha.events
|
||||||
|
WHERE project_id=%(project_id)s
|
||||||
|
ORDER BY created_at;""",
|
||||||
|
params={"project_id": project_id})
|
||||||
|
x = ch_client.execute(r)
|
||||||
|
|
||||||
|
return x
|
||||||
6
api/chalicelib/core/modules/__init__.py
Normal file
6
api/chalicelib/core/modules/__init__.py
Normal file
|
|
@ -0,0 +1,6 @@
|
||||||
|
TENANT_CONDITION = "TRUE"
|
||||||
|
MOB_KEY = ""
|
||||||
|
|
||||||
|
|
||||||
|
def get_file_key(project_id, session_id):
|
||||||
|
return {}
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
import json
|
import json
|
||||||
from typing import Optional, List
|
import logging
|
||||||
from collections import Counter
|
from collections import Counter
|
||||||
|
from typing import Optional, List
|
||||||
|
|
||||||
from fastapi import HTTPException, status
|
from fastapi import HTTPException, status
|
||||||
|
|
||||||
|
|
@ -9,6 +10,8 @@ from chalicelib.core import users
|
||||||
from chalicelib.utils import pg_client, helper
|
from chalicelib.utils import pg_client, helper
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def __exists_by_name(name: str, exclude_id: Optional[int]) -> bool:
|
def __exists_by_name(name: str, exclude_id: Optional[int]) -> bool:
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
|
|
@ -410,7 +413,6 @@ def update_project_conditions(project_id, conditions):
|
||||||
create_project_conditions(project_id, to_be_created)
|
create_project_conditions(project_id, to_be_created)
|
||||||
|
|
||||||
if to_be_updated:
|
if to_be_updated:
|
||||||
print(to_be_updated)
|
|
||||||
update_project_condition(project_id, to_be_updated)
|
update_project_condition(project_id, to_be_updated)
|
||||||
|
|
||||||
return get_conditions(project_id)
|
return get_conditions(project_id)
|
||||||
|
|
@ -425,3 +427,45 @@ def get_projects_ids(tenant_id):
|
||||||
cur.execute(query=query)
|
cur.execute(query=query)
|
||||||
rows = cur.fetchall()
|
rows = cur.fetchall()
|
||||||
return [r["project_id"] for r in rows]
|
return [r["project_id"] for r in rows]
|
||||||
|
|
||||||
|
|
||||||
|
def delete_metadata_condition(project_id, metadata_key):
|
||||||
|
sql = """\
|
||||||
|
UPDATE public.projects_conditions
|
||||||
|
SET filters=(SELECT COALESCE(jsonb_agg(elem), '[]'::jsonb)
|
||||||
|
FROM jsonb_array_elements(filters) AS elem
|
||||||
|
WHERE NOT (elem ->> 'type' = 'metadata'
|
||||||
|
AND elem ->> 'source' = %(metadata_key)s))
|
||||||
|
WHERE project_id = %(project_id)s
|
||||||
|
AND jsonb_typeof(filters) = 'array'
|
||||||
|
AND EXISTS (SELECT 1
|
||||||
|
FROM jsonb_array_elements(filters) AS elem
|
||||||
|
WHERE elem ->> 'type' = 'metadata'
|
||||||
|
AND elem ->> 'source' = %(metadata_key)s);"""
|
||||||
|
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
query = cur.mogrify(sql, {"project_id": project_id, "metadata_key": metadata_key})
|
||||||
|
cur.execute(query)
|
||||||
|
|
||||||
|
|
||||||
|
def rename_metadata_condition(project_id, old_metadata_key, new_metadata_key):
|
||||||
|
sql = """\
|
||||||
|
UPDATE public.projects_conditions
|
||||||
|
SET filters = (SELECT jsonb_agg(CASE
|
||||||
|
WHEN elem ->> 'type' = 'metadata' AND elem ->> 'source' = %(old_metadata_key)s
|
||||||
|
THEN elem || ('{"source": "'||%(new_metadata_key)s||'"}')::jsonb
|
||||||
|
ELSE elem END)
|
||||||
|
FROM jsonb_array_elements(filters) AS elem)
|
||||||
|
WHERE project_id = %(project_id)s
|
||||||
|
AND jsonb_typeof(filters) = 'array'
|
||||||
|
AND EXISTS (SELECT 1
|
||||||
|
FROM jsonb_array_elements(filters) AS elem
|
||||||
|
WHERE elem ->> 'type' = 'metadata'
|
||||||
|
AND elem ->> 'source' = %(old_metadata_key)s);"""
|
||||||
|
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
query = cur.mogrify(sql, {"project_id": project_id, "old_metadata_key": old_metadata_key,
|
||||||
|
"new_metadata_key": new_metadata_key})
|
||||||
|
cur.execute(query)
|
||||||
|
|
||||||
|
# TODO: make project conditions use metadata-column-name instead of metadata-key
|
||||||
|
|
|
||||||
|
|
@ -14,7 +14,7 @@ def reset(data: schemas.ForgetPasswordPayloadSchema, background_tasks: Backgroun
|
||||||
if helper.allow_captcha() and not captcha.is_valid(data.g_recaptcha_response):
|
if helper.allow_captcha() and not captcha.is_valid(data.g_recaptcha_response):
|
||||||
return {"errors": ["Invalid captcha."]}
|
return {"errors": ["Invalid captcha."]}
|
||||||
if not smtp.has_smtp():
|
if not smtp.has_smtp():
|
||||||
return {"errors": ["no SMTP configuration found, you can ask your admin to reset your password"]}
|
return {"errors": ["Email delivery failed due to invalid SMTP configuration. Please contact your admin."]}
|
||||||
a_user = users.get_by_email_only(data.email)
|
a_user = users.get_by_email_only(data.email)
|
||||||
if a_user:
|
if a_user:
|
||||||
invitation_link = users.generate_new_invitation(user_id=a_user["userId"])
|
invitation_link = users.generate_new_invitation(user_id=a_user["userId"])
|
||||||
|
|
|
||||||
13
api/chalicelib/core/sessions/__init__.py
Normal file
13
api/chalicelib/core/sessions/__init__.py
Normal file
|
|
@ -0,0 +1,13 @@
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from decouple import config
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
from . import sessions_pg
|
||||||
|
from . import sessions_pg as sessions_legacy
|
||||||
|
from . import sessions_ch
|
||||||
|
|
||||||
|
if config("EXP_METRICS", cast=bool, default=False):
|
||||||
|
from . import sessions_ch as sessions
|
||||||
|
else:
|
||||||
|
from . import sessions_pg as sessions
|
||||||
|
|
@ -1,10 +1,12 @@
|
||||||
from decouple import config
|
|
||||||
from chalicelib.utils import helper
|
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
|
||||||
from chalicelib.utils import pg_client
|
|
||||||
from chalicelib.core import integrations_manager, integration_base_issue
|
|
||||||
import json
|
import json
|
||||||
|
|
||||||
|
from decouple import config
|
||||||
|
|
||||||
|
from chalicelib.core.issue_tracking import integrations_manager, base_issue
|
||||||
|
from chalicelib.utils import helper
|
||||||
|
from chalicelib.utils import pg_client
|
||||||
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
|
|
||||||
|
|
||||||
def __get_saved_data(project_id, session_id, issue_id, tool):
|
def __get_saved_data(project_id, session_id, issue_id, tool):
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
|
|
@ -39,8 +41,8 @@ def create_new_assignment(tenant_id, project_id, session_id, creator_id, assigne
|
||||||
issue = integration.issue_handler.create_new_assignment(title=title, assignee=assignee, description=description,
|
issue = integration.issue_handler.create_new_assignment(title=title, assignee=assignee, description=description,
|
||||||
issue_type=issue_type,
|
issue_type=issue_type,
|
||||||
integration_project_id=integration_project_id)
|
integration_project_id=integration_project_id)
|
||||||
except integration_base_issue.RequestException as e:
|
except base_issue.RequestException as e:
|
||||||
return integration_base_issue.proxy_issues_handler(e)
|
return base_issue.proxy_issues_handler(e)
|
||||||
if issue is None or "id" not in issue:
|
if issue is None or "id" not in issue:
|
||||||
return {"errors": ["something went wrong while creating the issue"]}
|
return {"errors": ["something went wrong while creating the issue"]}
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
1559
api/chalicelib/core/sessions/sessions_ch.py
Normal file
1559
api/chalicelib/core/sessions/sessions_ch.py
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1 @@
|
||||||
|
from .sessions_devtool import *
|
||||||
|
|
@ -1,9 +1,10 @@
|
||||||
from decouple import config
|
from decouple import config
|
||||||
|
|
||||||
|
import schemas
|
||||||
from chalicelib.utils.storage import StorageClient
|
from chalicelib.utils.storage import StorageClient
|
||||||
|
|
||||||
|
|
||||||
def __get_devtools_keys(project_id, session_id):
|
def get_devtools_keys(project_id, session_id):
|
||||||
params = {
|
params = {
|
||||||
"sessionId": session_id,
|
"sessionId": session_id,
|
||||||
"projectId": project_id
|
"projectId": project_id
|
||||||
|
|
@ -13,9 +14,9 @@ def __get_devtools_keys(project_id, session_id):
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
def get_urls(session_id, project_id, check_existence: bool = True):
|
def get_urls(session_id, project_id, context: schemas.CurrentContext, check_existence: bool = True):
|
||||||
results = []
|
results = []
|
||||||
for k in __get_devtools_keys(project_id=project_id, session_id=session_id):
|
for k in get_devtools_keys(project_id=project_id, session_id=session_id):
|
||||||
if check_existence and not StorageClient.exists(bucket=config("sessions_bucket"), key=k):
|
if check_existence and not StorageClient.exists(bucket=config("sessions_bucket"), key=k):
|
||||||
continue
|
continue
|
||||||
results.append(StorageClient.get_presigned_url_for_sharing(
|
results.append(StorageClient.get_presigned_url_for_sharing(
|
||||||
|
|
@ -28,5 +29,5 @@ def get_urls(session_id, project_id, check_existence: bool = True):
|
||||||
|
|
||||||
def delete_mobs(project_id, session_ids):
|
def delete_mobs(project_id, session_ids):
|
||||||
for session_id in session_ids:
|
for session_id in session_ids:
|
||||||
for k in __get_devtools_keys(project_id=project_id, session_id=session_id):
|
for k in get_devtools_keys(project_id=project_id, session_id=session_id):
|
||||||
StorageClient.tag_for_deletion(bucket=config("sessions_bucket"), key=k)
|
StorageClient.tag_for_deletion(bucket=config("sessions_bucket"), key=k)
|
||||||
|
|
@ -0,0 +1 @@
|
||||||
|
from .sessions_favorite import *
|
||||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue