Compare commits
603 commits
api-pa-eve
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
90510aa33b | ||
|
|
96a70f5d41 | ||
|
|
d4a13edcf0 | ||
|
|
51fad91a22 | ||
|
|
36abcda1e1 | ||
|
|
dd5f464f73 | ||
|
|
f9ada41272 | ||
|
|
9e24a3583e | ||
|
|
0a3129d3cd | ||
|
|
99d61db9d9 | ||
|
|
133958622e | ||
|
|
fb021f606f | ||
|
|
a2905fa8ed | ||
|
|
beec2283fd | ||
|
|
6c8b55019e | ||
|
|
e3e3e11227 | ||
|
|
c6f7de04cc | ||
|
|
2921c17cbf | ||
|
|
7eb3f5c4c8 | ||
|
|
5a9a8e588a | ||
|
|
4b14258266 | ||
|
|
744d2d4311 | ||
|
|
64242a5dc0 | ||
|
|
cae3002697 | ||
|
|
3d3c62196b | ||
|
|
e810958a5d | ||
|
|
39fa9787d1 | ||
|
|
c9c1ad4dde | ||
|
|
d9868928be | ||
|
|
a460d8c9a2 | ||
|
|
930417aab4 | ||
|
|
07bc184f4d | ||
|
|
71b7cca569 | ||
|
|
355d27eaa0 | ||
|
|
66b485cccf | ||
|
|
de33a42151 | ||
|
|
f12bdebf82 | ||
|
|
bbfa20c693 | ||
|
|
f264ba043d | ||
|
|
a05dce8125 | ||
|
|
3a1635d81f | ||
|
|
ccb332c636 | ||
|
|
80ffa15959 | ||
|
|
b2e961d621 | ||
|
|
b4d0598f23 | ||
|
|
e77f083f10 | ||
|
|
58da1d3f64 | ||
|
|
447fc26a2a | ||
|
|
9bdf6e4f92 | ||
|
|
01f403e12d | ||
|
|
39eb943b86 | ||
|
|
366b0d38b0 | ||
|
|
f4d5b3c06e | ||
|
|
93ae18133e | ||
|
|
fbe5d78270 | ||
|
|
b803eed1d4 | ||
|
|
9ed3cb1b7e | ||
|
|
5e0e5730ba | ||
|
|
d78b33dcd2 | ||
|
|
4b1ca200b4 | ||
|
|
08d930f9ff | ||
|
|
da37809bc8 | ||
|
|
d922fc7ad5 | ||
|
|
796360fdd2 | ||
|
|
13dbb60d8b | ||
|
|
9e20a49128 | ||
|
|
91f8cc1399 | ||
|
|
f8ba3f6d89 | ||
|
|
85e30b3692 | ||
|
|
0360e3726e | ||
|
|
77bbb5af36 | ||
|
|
ab0d4cfb62 | ||
|
|
3fd506a812 | ||
|
|
e8432e2dec | ||
|
|
5c76a8524c | ||
|
|
3ba40a4811 | ||
|
|
f9a3f24590 | ||
|
|
85d6d0abac | ||
|
|
b3594136ce | ||
|
|
8f67edde8d | ||
|
|
74ed29915b | ||
|
|
3ca71ec211 | ||
|
|
0e469fd056 | ||
|
|
a8cb0e1643 | ||
|
|
e171f0d8d5 | ||
|
|
68ea291444 | ||
|
|
05cbb831c7 | ||
|
|
5070ded1f4 | ||
|
|
77610a4924 | ||
|
|
7c34e4a0f6 | ||
|
|
330e21183f | ||
|
|
30ce37896c | ||
|
|
80a7817e7d | ||
|
|
1b9c568cb1 | ||
|
|
3759771ae9 | ||
|
|
f6ae5aba88 | ||
|
|
5190dc512a | ||
|
|
3fcccb51e8 | ||
|
|
26077d5689 | ||
|
|
00c57348fd | ||
|
|
1f9bc5520a | ||
|
|
aef94618f6 | ||
|
|
2a330318c7 | ||
|
|
6777d5ce2a | ||
|
|
8a6f8fe91f | ||
|
|
7b078fed4c | ||
|
|
894d4c84b3 | ||
|
|
46390a3ba9 | ||
|
|
621667f5ce | ||
|
|
a72f476f1c | ||
|
|
623946ce4e | ||
|
|
2d099214fc | ||
|
|
b0e7054f89 | ||
|
|
a9097270af | ||
|
|
5d514ddaf2 | ||
|
|
43688bb03b | ||
|
|
e050cee7bb | ||
|
|
6b35df7125 | ||
|
|
8e099b6dc3 | ||
|
|
c0a4734054 | ||
|
|
7de1efb5fe | ||
|
|
d4ff28ddbe | ||
|
|
b2256f72d0 | ||
|
|
a63bda1c79 | ||
|
|
3a0176789e | ||
|
|
f2b7271fca | ||
|
|
d50f89662b | ||
|
|
35051d201c | ||
|
|
214be95ecc | ||
|
|
dbc142c114 | ||
|
|
443f5e8f08 | ||
|
|
9f693f220d | ||
|
|
5ab30380b0 | ||
|
|
fc86555644 | ||
|
|
2a3c611a27 | ||
|
|
1d6fb0ae9e | ||
|
|
bef91a6136 | ||
|
|
1e2bd19d32 | ||
|
|
3b58cb347e | ||
|
|
ca4590501a | ||
|
|
fd12cc7585 | ||
|
|
6abded53e0 | ||
|
|
82c5e5e59d | ||
|
|
c77b0cc4de | ||
|
|
de344e62ef | ||
|
|
deb78a62c0 | ||
|
|
0724cf05f0 | ||
|
|
cc704f1bc3 | ||
|
|
4c159b2d26 | ||
|
|
42df33bc01 | ||
|
|
ae95b48760 | ||
|
|
4be3050e61 | ||
|
|
8eec6e983b | ||
|
|
5fec615044 | ||
|
|
f77568a01c | ||
|
|
618e4dc59f | ||
|
|
b94fcb11e5 | ||
|
|
f93ee6fb8f | ||
|
|
23820b7ea5 | ||
|
|
e92bfe3cfe | ||
|
|
102f0c7b06 | ||
|
|
8d57cc55a5 | ||
|
|
24b36efc9d | ||
|
|
fe91cad4af | ||
|
|
033ffcb7b9 | ||
|
|
499048e46c | ||
|
|
5b6c653862 | ||
|
|
4169ab87c6 | ||
|
|
80229a0214 | ||
|
|
fb48ba8300 | ||
|
|
b0f3c50c0f | ||
|
|
5806362ce0 | ||
|
|
2458af460b | ||
|
|
6c891cb131 | ||
|
|
8e41c3ce91 | ||
|
|
14d0a77a73 | ||
|
|
0333c56d52 | ||
|
|
52d4abb61c | ||
|
|
b0e7d3aa79 | ||
|
|
e9eea78283 | ||
|
|
0f4c509582 | ||
|
|
820bca6308 | ||
|
|
51e71a4d52 | ||
|
|
2c9e9576c5 | ||
|
|
9e7f751df6 | ||
|
|
b6d0e71544 | ||
|
|
93a9e03026 | ||
|
|
a62f6f6bb0 | ||
|
|
cd80aa85ea | ||
|
|
961c685310 | ||
|
|
160b5ac2c8 | ||
|
|
1cca40d4c5 | ||
|
|
bd2a59266d | ||
|
|
8acee7d357 | ||
|
|
fb49c715cb | ||
|
|
221bee70f5 | ||
|
|
8eb431f70c | ||
|
|
820b0954e7 | ||
|
|
19b350761c | ||
|
|
3b3e95a413 | ||
|
|
fe1130397c | ||
|
|
fd4b71d854 | ||
|
|
404ffd5b2d | ||
|
|
5af63eb9f1 | ||
|
|
038bfee383 | ||
|
|
bd09160a4a | ||
|
|
136a5b2bfb | ||
|
|
33deaef0ce | ||
|
|
3f541e5d59 | ||
|
|
ae463db150 | ||
|
|
9eb19fedf1 | ||
|
|
5df934c9ce | ||
|
|
e027a2d016 | ||
|
|
c7f3c78740 | ||
|
|
3245579b7c | ||
|
|
0107c9c523 | ||
|
|
05f4054b31 | ||
|
|
ce844296ed | ||
|
|
0a5856afe1 | ||
|
|
45b8bdef8a | ||
|
|
264f28ed39 | ||
|
|
59d3253737 | ||
|
|
1c8c231d13 | ||
|
|
77208b95e8 | ||
|
|
cdbbb482ce | ||
|
|
ccd8d76e98 | ||
|
|
17a5089c24 | ||
|
|
384866621c | ||
|
|
743625f66b | ||
|
|
ffd134c204 | ||
|
|
8da099ba98 | ||
|
|
75ca0267ae | ||
|
|
6ab3c80985 | ||
|
|
eab2d3a2cf | ||
|
|
c6cbc4eba8 | ||
|
|
fdd26c567c | ||
|
|
4b9be69719 | ||
|
|
b8511b6be1 | ||
|
|
5cc9945f16 | ||
|
|
cef251db6a | ||
|
|
687ab05f22 | ||
|
|
4b09213448 | ||
|
|
af4a344c85 | ||
|
|
c40e32d624 | ||
|
|
afbf5fee7a | ||
|
|
28b580499f | ||
|
|
9d7c54554e | ||
|
|
adf302bc34 | ||
|
|
6852d63cdb | ||
|
|
41178ba841 | ||
|
|
90bc6bc83e | ||
|
|
b8d365de3d | ||
|
|
87e7acecde | ||
|
|
e53301d18e | ||
|
|
ff04276623 | ||
|
|
b0e0321224 | ||
|
|
e95417c1ed | ||
|
|
5f3b3bb2ef | ||
|
|
06937b305a | ||
|
|
a693a36a6c | ||
|
|
c8ff481725 | ||
|
|
ef897538d1 | ||
|
|
07ffb06db1 | ||
|
|
ad9883ceb2 | ||
|
|
5c9a29570c | ||
|
|
9f9990d737 | ||
|
|
fd5c0c9747 | ||
|
|
b8091b69c2 | ||
|
|
502303aee7 | ||
|
|
632bc1cbb9 | ||
|
|
bcc7d35b7f | ||
|
|
45656ec6d7 | ||
|
|
15829d865e | ||
|
|
029376c3e4 | ||
|
|
3ca6f78bed | ||
|
|
12a729fafe | ||
|
|
0a17460c5a | ||
|
|
faadfa497f | ||
|
|
bbeb508738 | ||
|
|
333fd642be | ||
|
|
5e93178876 | ||
|
|
da433e1666 | ||
|
|
a87f6c658c | ||
|
|
4ebbfd3501 | ||
|
|
6dc3dcfd4e | ||
|
|
74146eecf1 | ||
|
|
2e69a6e4df | ||
|
|
afacbc1460 | ||
|
|
6e1316c05f | ||
|
|
d3851cedec | ||
|
|
a1989eb574 | ||
|
|
95455f761b | ||
|
|
69d1d88600 | ||
|
|
ceb40992cc | ||
|
|
1ab7d0ad7f | ||
|
|
2ee535f213 | ||
|
|
0ba1382c16 | ||
|
|
c025b2f1a5 | ||
|
|
918d9de4c9 | ||
|
|
047a5f52e7 | ||
|
|
7a88acfa9f | ||
|
|
366d2e1017 | ||
|
|
46e6f1a503 | ||
|
|
ce2a65f276 | ||
|
|
f168f90f10 | ||
|
|
b6cca71053 | ||
|
|
2841740afb | ||
|
|
927f96cb79 | ||
|
|
e174a11466 | ||
|
|
ee4c5cf45d | ||
|
|
78ddbb9233 | ||
|
|
66edf44f8b | ||
|
|
0af941e543 | ||
|
|
fd64d721c6 | ||
|
|
f965c69a26 | ||
|
|
9bb93d5daa | ||
|
|
4d19586eb9 | ||
|
|
5e10e168c6 | ||
|
|
aa2c14b7c1 | ||
|
|
4ef61f6fb5 | ||
|
|
95a5037abf | ||
|
|
23514d4b3f | ||
|
|
ee46413b13 | ||
|
|
9f57271af2 | ||
|
|
84771542a6 | ||
|
|
83f8b67f74 | ||
|
|
6af9f719c8 | ||
|
|
789427dd57 | ||
|
|
59bbc6a903 | ||
|
|
0529ee3afd | ||
|
|
307b0c1cd8 | ||
|
|
11a2ea48bc | ||
|
|
1146900dc0 | ||
|
|
0a999247e4 | ||
|
|
f13ad8a882 | ||
|
|
0d12fdddc9 | ||
|
|
c0a5415eb9 | ||
|
|
b8a70367ed | ||
|
|
1efe5c87e8 | ||
|
|
2dcbfe2ef9 | ||
|
|
fedc48bd0e | ||
|
|
de72e79fc6 | ||
|
|
d43bc3a2e9 | ||
|
|
8ba6a17055 | ||
|
|
e5809a5eff | ||
|
|
171fd5aa59 | ||
|
|
533fb71cb7 | ||
|
|
90964e8f50 | ||
|
|
7d5ac6a8c9 | ||
|
|
32b281f689 | ||
|
|
b175c836a3 | ||
|
|
4e54bced9c | ||
|
|
e2fa3c91e2 | ||
|
|
19c8fba445 | ||
|
|
94e8e0319d | ||
|
|
ec8f9a349d | ||
|
|
992cb2feca | ||
|
|
844f79a989 | ||
|
|
1ec06d360e | ||
|
|
fd76f7c302 | ||
|
|
c793d9d177 | ||
|
|
1c1a41bb55 | ||
|
|
6873f1c56b | ||
|
|
d79665cbea | ||
|
|
256c065153 | ||
|
|
114bd4080b | ||
|
|
d4965f2137 | ||
|
|
8ed97b353b | ||
|
|
ac232ef599 | ||
|
|
264f35cc9e | ||
|
|
d85f63c72e | ||
|
|
4b16e50e5f | ||
|
|
735b86d778 | ||
|
|
78bb1c3c6b | ||
|
|
968a3eefde | ||
|
|
1122ced4c3 | ||
|
|
b406893d00 | ||
|
|
8b2cf031ca | ||
|
|
fe06f43dd5 | ||
|
|
64e08916f9 | ||
|
|
99d6545720 | ||
|
|
7e4782ae71 | ||
|
|
ed3020dc7e | ||
|
|
74f6c2cd66 | ||
|
|
0533624c25 | ||
|
|
c271e01dfc | ||
|
|
c07ad14ffc | ||
|
|
b91d979c98 | ||
|
|
d63877de1c | ||
|
|
3a331d266c | ||
|
|
e8835d3058 | ||
|
|
3c32e8eec1 | ||
|
|
b1d51c19ea | ||
|
|
f6015f31f5 | ||
|
|
06113f7534 | ||
|
|
8500c1c11e | ||
|
|
fc542cd7d2 | ||
|
|
44a1d96d2d | ||
|
|
bb8e097759 | ||
|
|
7e7387001f | ||
|
|
5dd1256cd3 | ||
|
|
bf56cc53a7 | ||
|
|
da9b926b25 | ||
|
|
ef3ed8b690 | ||
|
|
b86e6fdadc | ||
|
|
1293cbde7d | ||
|
|
3e1f073e07 | ||
|
|
7cfe29adf3 | ||
|
|
aa07d41bb5 | ||
|
|
305c7ae064 | ||
|
|
724d5a2897 | ||
|
|
f8a40fd875 | ||
|
|
6c7880efbc | ||
|
|
2465029a6c | ||
|
|
11824d2993 | ||
|
|
0a4379be6b | ||
|
|
659aa7495f | ||
|
|
af5d730028 | ||
|
|
346fd76ea8 | ||
|
|
963c8354c6 | ||
|
|
4970bc365b | ||
|
|
def33daa6c | ||
|
|
f752876675 | ||
|
|
e046bcbe0a | ||
|
|
3ed8b3c27d | ||
|
|
e996600dc8 | ||
|
|
00a834b143 | ||
|
|
b4497edb05 | ||
|
|
231a3ac330 | ||
|
|
b70effa904 | ||
|
|
63b6b39d75 | ||
|
|
cd5d6e861d | ||
|
|
2144a90ea7 | ||
|
|
ecdb98b057 | ||
|
|
8d0c9d5a1f | ||
|
|
f8a1c9447b | ||
|
|
f1614b6626 | ||
|
|
adb359b3bf | ||
|
|
9949928335 | ||
|
|
6360b9a580 | ||
|
|
132de0af0d | ||
|
|
b70a641af5 | ||
|
|
ea142b9596 | ||
|
|
08340eb0f4 | ||
|
|
7b61d06454 | ||
|
|
d031210365 | ||
|
|
4b21194ec5 | ||
|
|
6bd5b60b1e | ||
|
|
c55b1971c4 | ||
|
|
e34e4fad6c | ||
|
|
57041140cb | ||
|
|
d70ecab1d9 | ||
|
|
c4c5fcc2b2 | ||
|
|
118412d4ab | ||
|
|
38653d200f | ||
|
|
3be8e8092d | ||
|
|
2654273f97 | ||
|
|
7dc70c0ce5 | ||
|
|
7d31197c78 | ||
|
|
f4b659e508 | ||
|
|
31290d7a89 | ||
|
|
198c5e3a92 | ||
|
|
7da11341cf | ||
|
|
9492234ccc | ||
|
|
ed528e7b5e | ||
|
|
d7a85d0920 | ||
|
|
f7339c8954 | ||
|
|
febe784322 | ||
|
|
f6cd20712d | ||
|
|
c2b84d18b5 | ||
|
|
6579b6842b | ||
|
|
ba55b359fb | ||
|
|
0d9c265452 | ||
|
|
b09becdcb7 | ||
|
|
4819907635 | ||
|
|
6e7ced6959 | ||
|
|
3a2e822bea | ||
|
|
4245dd49e8 | ||
|
|
b93e953fd9 | ||
|
|
a67ca7b870 | ||
|
|
d457332461 | ||
|
|
36a0fad5b4 | ||
|
|
e6c7c43246 | ||
|
|
b04bcb935e | ||
|
|
9e17673a4a | ||
|
|
1799f9d4a2 | ||
|
|
eed357a79b | ||
|
|
ff061567d8 | ||
|
|
ebee06b37a | ||
|
|
442611cb26 | ||
|
|
ad022f9cea | ||
|
|
b9ac2d2238 | ||
|
|
3191843829 | ||
|
|
5267a1c830 | ||
|
|
0b7b857d65 | ||
|
|
4ba16bada1 | ||
|
|
c7523a1526 | ||
|
|
3e722ea5ba | ||
|
|
06bad31a7d | ||
|
|
4f2b8d43b7 | ||
|
|
51ba151794 | ||
|
|
fda53bc4ad | ||
|
|
d45347da2b | ||
|
|
9c1be9b22a | ||
|
|
dd549b4c1f | ||
|
|
5dc5f085b9 | ||
|
|
e325eee47e | ||
|
|
0d68fcc428 | ||
|
|
e73d633518 | ||
|
|
22b95f308c | ||
|
|
f87c3e7a5e | ||
|
|
603df2d559 | ||
|
|
98405db9ff | ||
|
|
d4092ebc69 | ||
|
|
5d49a91dde | ||
|
|
8162236139 | ||
|
|
3dc933daf3 | ||
|
|
afb08cfe6d | ||
|
|
500d70aa67 | ||
|
|
c697c99fec | ||
|
|
600eba27a1 | ||
|
|
c50515e799 | ||
|
|
d29c7f20a4 | ||
|
|
a4b65c618f | ||
|
|
7a8be69c85 | ||
|
|
92c142ec33 | ||
|
|
8d878a3445 | ||
|
|
e1b05dbd33 | ||
|
|
30dd123f29 | ||
|
|
f88ff53e15 | ||
|
|
cb8d87e367 | ||
|
|
0e5fe14dc2 | ||
|
|
1feb4bdc64 | ||
|
|
de0c10de56 | ||
|
|
c59dbbc79d | ||
|
|
49c408f44e | ||
|
|
d374137e42 | ||
|
|
7caa386d2d | ||
|
|
82e170ff1c | ||
|
|
047a4d0108 | ||
|
|
7485016f92 | ||
|
|
ff6342298e | ||
|
|
8d8e6176be | ||
|
|
82621012de | ||
|
|
1b3a3dfc21 | ||
|
|
da923f13b9 | ||
|
|
2a52de073d | ||
|
|
ea8729dd93 | ||
|
|
84f9c02802 | ||
|
|
6ec7fe64a7 | ||
|
|
68c5d986fe | ||
|
|
cb977d54e1 | ||
|
|
392088be22 | ||
|
|
88c1f18c48 | ||
|
|
8597f9ef84 | ||
|
|
12f4d9a10c | ||
|
|
ab7e9e505d | ||
|
|
0484c0ccdd | ||
|
|
e72d492e66 | ||
|
|
12472cf84c | ||
|
|
2fc4f552d5 | ||
|
|
dbcb651f40 | ||
|
|
cd868f736b | ||
|
|
d4b3791b19 | ||
|
|
a8f167b5af | ||
|
|
5c1e5078b5 | ||
|
|
c2ce9b8466 | ||
|
|
3da965959b | ||
|
|
44108bd57e | ||
|
|
30c0e5abe9 | ||
|
|
3dd56cbf13 | ||
|
|
084749b6f9 | ||
|
|
693634fb14 | ||
|
|
d50ad9e579 | ||
|
|
c83dec7774 | ||
|
|
2b05bb59af | ||
|
|
4c6f23e31f | ||
|
|
312db29d23 | ||
|
|
3038fe58d0 | ||
|
|
defcc65848 | ||
|
|
14d64256a9 | ||
|
|
bced0611ea | ||
|
|
1dbf29a595 | ||
|
|
93db47901d | ||
|
|
260ed8ac19 | ||
|
|
2585107bd7 | ||
|
|
bd80b7fccd | ||
|
|
ab84a872db | ||
|
|
b4d2e685de | ||
|
|
16182031e1 | ||
|
|
6cbe17c8e6 | ||
|
|
fbfd0a9854 | ||
|
|
778112c751 | ||
|
|
0f744ec1a0 | ||
|
|
ab454894f8 | ||
|
|
6882c62a32 | ||
|
|
c2878bacd4 | ||
|
|
1a70e61de8 | ||
|
|
f4c94aa2d1 | ||
|
|
6ccf2e2887 | ||
|
|
2cd96b0df0 | ||
|
|
622d0a7dfa | ||
|
|
954e811be0 |
2683 changed files with 101916 additions and 57688 deletions
2
.github/workflows/api-ee.yaml
vendored
2
.github/workflows/api-ee.yaml
vendored
|
|
@ -10,8 +10,6 @@ on:
|
|||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
- v1.11.0-patch
|
||||
- actions_test
|
||||
paths:
|
||||
- "ee/api/**"
|
||||
- "api/**"
|
||||
|
|
|
|||
1
.github/workflows/api.yaml
vendored
1
.github/workflows/api.yaml
vendored
|
|
@ -10,7 +10,6 @@ on:
|
|||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
- v1.11.0-patch
|
||||
paths:
|
||||
- "api/**"
|
||||
- "!api/.gitignore"
|
||||
|
|
|
|||
1
.github/workflows/assist-ee.yaml
vendored
1
.github/workflows/assist-ee.yaml
vendored
|
|
@ -9,7 +9,6 @@ on:
|
|||
push:
|
||||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
paths:
|
||||
- "ee/assist/**"
|
||||
- "assist/**"
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
# This action will push the peers changes to aws
|
||||
# This action will push the assist changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
|
|
@ -9,14 +9,10 @@ on:
|
|||
push:
|
||||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
paths:
|
||||
- "ee/peers/**"
|
||||
- "peers/**"
|
||||
- "!peers/.gitignore"
|
||||
- "!peers/*-dev.sh"
|
||||
- "ee/assist-server/**"
|
||||
|
||||
name: Build and Deploy Peers EE
|
||||
name: Build and Deploy Assist-Server EE
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
|
|
@ -57,12 +53,7 @@ jobs:
|
|||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
- name: Building and Pushing peers image
|
||||
- name: Building and Pushing Assist-Server image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
|
|
@ -70,11 +61,11 @@ jobs:
|
|||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd peers
|
||||
cd assist-server
|
||||
PUSH_IMAGE=0 bash -x ./build.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("peers")
|
||||
images=("assist-server")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
|
|
@ -85,7 +76,7 @@ jobs:
|
|||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("peers")
|
||||
images=("assist-server")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
|
|
@ -109,43 +100,23 @@ jobs:
|
|||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||
EOF
|
||||
done
|
||||
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
pwd
|
||||
cd scripts/helmcharts/
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/peers/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
sed -i "/assist-server/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,peers,quickwit,connector} /tmp/charts/
|
||||
mv openreplay/charts/{ingress-nginx,assist-server,quickwit,connector} /tmp/charts/
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: ee
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: "Build failed :bomb:"
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# iimit-access-to-actor: true
|
||||
5
.github/workflows/assist-stats.yaml
vendored
5
.github/workflows/assist-stats.yaml
vendored
|
|
@ -15,7 +15,7 @@ on:
|
|||
- "!assist-stats/*-dev.sh"
|
||||
- "!assist-stats/requirements-*.txt"
|
||||
|
||||
name: Build and Deploy Assist Stats
|
||||
name: Build and Deploy Assist Stats ee
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
|
|
@ -123,8 +123,9 @@ jobs:
|
|||
tag: ${IMAGE_TAG}
|
||||
EOF
|
||||
|
||||
export IMAGE_TAG=${IMAGE_TAG}
|
||||
# Update changed image tag
|
||||
sed -i "/assist-stats/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
yq '.utilities.apiCrons.assiststats.image.tag = strenv(IMAGE_TAG)' -i /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
|
|
|
|||
1
.github/workflows/assist.yaml
vendored
1
.github/workflows/assist.yaml
vendored
|
|
@ -9,7 +9,6 @@ on:
|
|||
push:
|
||||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
paths:
|
||||
- "assist/**"
|
||||
- "!assist/.gitignore"
|
||||
|
|
|
|||
42
.github/workflows/crons-ee.yaml
vendored
42
.github/workflows/crons-ee.yaml
vendored
|
|
@ -10,7 +10,6 @@ on:
|
|||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
- v1.11.0-patch
|
||||
paths:
|
||||
- "ee/api/**"
|
||||
- "api/**"
|
||||
|
|
@ -101,33 +100,32 @@ jobs:
|
|||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
env:
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||
cd scripts/helmcharts/
|
||||
cat <<EOF>/tmp/image_override.yaml
|
||||
image: &image
|
||||
tag: "${IMAGE_TAG}"
|
||||
utilities:
|
||||
apiCrons:
|
||||
assiststats:
|
||||
image: *image
|
||||
report:
|
||||
image: *image
|
||||
sessionsCleaner:
|
||||
image: *image
|
||||
projectsStats:
|
||||
image: *image
|
||||
fixProjectsStats:
|
||||
image: *image
|
||||
EOF
|
||||
done
|
||||
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/crons/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
|
|
@ -137,8 +135,6 @@ jobs:
|
|||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
- name: Alert slack
|
||||
|
|
|
|||
189
.github/workflows/patch-build-old.yaml
vendored
Normal file
189
.github/workflows/patch-build-old.yaml
vendored
Normal file
|
|
@ -0,0 +1,189 @@
|
|||
# Ref: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
services:
|
||||
description: 'Comma separated names of services to build(in small letters).'
|
||||
required: true
|
||||
default: 'chalice,frontend'
|
||||
tag:
|
||||
description: 'Tag to update.'
|
||||
required: true
|
||||
type: string
|
||||
branch:
|
||||
description: 'Branch to build patches from. Make sure the branch is uptodate with tag. Else itll cause missing commits.'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
name: Build patches from tag, rewrite commit HEAD to older timestamp, and Push the tag
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Build Patch from old tag
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
|
||||
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 4
|
||||
ref: ${{ github.event.inputs.tag }}
|
||||
|
||||
- name: Set Remote with GITHUB_TOKEN
|
||||
run: |
|
||||
git config --unset http.https://github.com/.extraheader
|
||||
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}.git
|
||||
|
||||
- name: Create backup tag with timestamp
|
||||
run: |
|
||||
set -e # Exit immediately if a command exits with a non-zero status
|
||||
TIMESTAMP=$(date +%Y%m%d%H%M%S)
|
||||
BACKUP_TAG="${{ github.event.inputs.tag }}-backup-${TIMESTAMP}"
|
||||
echo "BACKUP_TAG=${BACKUP_TAG}" >> $GITHUB_ENV
|
||||
echo "INPUT_TAG=${{ github.event.inputs.tag }}" >> $GITHUB_ENV
|
||||
git tag $BACKUP_TAG || { echo "Failed to create backup tag"; exit 1; }
|
||||
git push origin $BACKUP_TAG || { echo "Failed to push backup tag"; exit 1; }
|
||||
echo "Created backup tag: $BACKUP_TAG"
|
||||
|
||||
# Get the oldest commit date from the last 3 commits in raw format
|
||||
OLDEST_COMMIT_TIMESTAMP=$(git log -3 --pretty=format:"%at" | tail -1)
|
||||
echo "Oldest commit timestamp: $OLDEST_COMMIT_TIMESTAMP"
|
||||
# Add 1 second to the timestamp
|
||||
NEW_TIMESTAMP=$((OLDEST_COMMIT_TIMESTAMP + 1))
|
||||
echo "NEW_TIMESTAMP=$NEW_TIMESTAMP" >> $GITHUB_ENV
|
||||
|
||||
|
||||
- name: Setup yq
|
||||
uses: mikefarah/yq@master
|
||||
|
||||
# Configure AWS credentials for the first registry
|
||||
- name: Configure AWS credentials for RELEASE_ARM_REGISTRY
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_DEPOT_ACCESS_KEY }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_DEPOT_SECRET_KEY }}
|
||||
aws-region: ${{ secrets.AWS_DEPOT_DEFAULT_REGION }}
|
||||
|
||||
- name: Login to Amazon ECR for RELEASE_ARM_REGISTRY
|
||||
id: login-ecr-arm
|
||||
run: |
|
||||
aws ecr get-login-password --region ${{ secrets.AWS_DEPOT_DEFAULT_REGION }} | docker login --username AWS --password-stdin ${{ secrets.RELEASE_ARM_REGISTRY }}
|
||||
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ secrets.RELEASE_OSS_REGISTRY }}
|
||||
|
||||
- uses: depot/setup-action@v1
|
||||
- name: Get HEAD Commit ID
|
||||
run: echo "HEAD_COMMIT_ID=$(git rev-parse HEAD)" >> $GITHUB_ENV
|
||||
- name: Define Branch Name
|
||||
run: echo "BRANCH_NAME=${{inputs.branch}}" >> $GITHUB_ENV
|
||||
|
||||
- name: Build
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO_ARM: ${{ secrets.RELEASE_ARM_REGISTRY }}
|
||||
DOCKER_REPO_OSS: ${{ secrets.RELEASE_OSS_REGISTRY }}
|
||||
MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
|
||||
MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
|
||||
MSAAS_REPO_FOLDER: /tmp/msaas
|
||||
run: |
|
||||
set -exo pipefail
|
||||
git config --local user.email "action@github.com"
|
||||
git config --local user.name "GitHub Action"
|
||||
git checkout -b $BRANCH_NAME
|
||||
working_dir=$(pwd)
|
||||
function image_version(){
|
||||
local service=$1
|
||||
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
|
||||
current_version=$(yq eval '.AppVersion' $chart_path)
|
||||
new_version=$(echo $current_version | awk -F. '{$NF += 1 ; print $1"."$2"."$3}')
|
||||
echo $new_version
|
||||
# yq eval ".AppVersion = \"$new_version\"" -i $chart_path
|
||||
}
|
||||
function clone_msaas() {
|
||||
[ -d $MSAAS_REPO_FOLDER ] || {
|
||||
git clone -b $INPUT_TAG --recursive https://x-access-token:$MSAAS_REPO_CLONE_TOKEN@$MSAAS_REPO_URL $MSAAS_REPO_FOLDER
|
||||
cd $MSAAS_REPO_FOLDER
|
||||
cd openreplay && git fetch origin && git checkout $INPUT_TAG
|
||||
git log -1
|
||||
cd $MSAAS_REPO_FOLDER
|
||||
bash git-init.sh
|
||||
git checkout
|
||||
}
|
||||
}
|
||||
function build_managed() {
|
||||
local service=$1
|
||||
local version=$2
|
||||
echo building managed
|
||||
clone_msaas
|
||||
if [[ $service == 'chalice' ]]; then
|
||||
cd $MSAAS_REPO_FOLDER/openreplay/api
|
||||
else
|
||||
cd $MSAAS_REPO_FOLDER/openreplay/$service
|
||||
fi
|
||||
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh >> /tmp/arm.txt
|
||||
}
|
||||
# Checking for backend images
|
||||
ls backend/cmd >> /tmp/backend.txt
|
||||
echo Services: "${{ github.event.inputs.services }}"
|
||||
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
||||
BUILD_SCRIPT_NAME="build.sh"
|
||||
# Build FOSS
|
||||
for SERVICE in "${SERVICES[@]}"; do
|
||||
# Check if service is backend
|
||||
if grep -q $SERVICE /tmp/backend.txt; then
|
||||
cd backend
|
||||
foss_build_args="nil $SERVICE"
|
||||
ee_build_args="ee $SERVICE"
|
||||
else
|
||||
[[ $SERVICE == 'chalice' || $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && cd $working_dir/api || cd $SERVICE
|
||||
[[ $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && BUILD_SCRIPT_NAME="build_${SERVICE}.sh"
|
||||
ee_build_args="ee"
|
||||
fi
|
||||
version=$(image_version $SERVICE)
|
||||
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
echo IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||
IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||
if [[ "$SERVICE" != "chalice" && "$SERVICE" != "frontend" ]]; then
|
||||
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
else
|
||||
build_managed $SERVICE $version
|
||||
fi
|
||||
cd $working_dir
|
||||
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$SERVICE/Chart.yaml"
|
||||
yq eval ".AppVersion = \"$version\"" -i $chart_path
|
||||
git add $chart_path
|
||||
git commit -m "Increment $SERVICE chart version"
|
||||
done
|
||||
|
||||
- name: Change commit timestamp
|
||||
run: |
|
||||
# Convert the timestamp to a date format git can understand
|
||||
NEW_DATE=$(perl -le 'print scalar gmtime($ARGV[0])." +0000"' $NEW_TIMESTAMP)
|
||||
echo "Setting commit date to: $NEW_DATE"
|
||||
|
||||
# Amend the commit with the new date
|
||||
GIT_COMMITTER_DATE="$NEW_DATE" git commit --amend --no-edit --date="$NEW_DATE"
|
||||
|
||||
# Verify the change
|
||||
git log -1 --pretty=format:"Commit now dated: %cD"
|
||||
|
||||
# git tag and push
|
||||
git tag $INPUT_TAG -f
|
||||
git push origin $INPUT_TAG -f
|
||||
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO_ARM: ${{ secrets.RELEASE_ARM_REGISTRY }}
|
||||
# DOCKER_REPO_OSS: ${{ secrets.RELEASE_OSS_REGISTRY }}
|
||||
# MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
|
||||
# MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
|
||||
# MSAAS_REPO_FOLDER: /tmp/msaas
|
||||
# with:
|
||||
# limit-access-to-actor: true
|
||||
246
.github/workflows/patch-build.yaml
vendored
246
.github/workflows/patch-build.yaml
vendored
|
|
@ -2,7 +2,6 @@
|
|||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
description: 'This workflow will build for patches for latest tag, and will Always use commit from main branch.'
|
||||
inputs:
|
||||
services:
|
||||
description: 'Comma separated names of services to build(in small letters).'
|
||||
|
|
@ -20,12 +19,20 @@ jobs:
|
|||
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Rebase with main branch, to make sure the code has latest main changes
|
||||
if: github.ref != 'refs/heads/main'
|
||||
run: |
|
||||
git pull --rebase origin main
|
||||
git remote -v
|
||||
git config --global user.email "action@github.com"
|
||||
git config --global user.name "GitHub Action"
|
||||
git config --global rebase.autoStash true
|
||||
git fetch origin main:main
|
||||
git rebase main
|
||||
git log -3
|
||||
|
||||
- name: Downloading yq
|
||||
run: |
|
||||
|
|
@ -48,6 +55,8 @@ jobs:
|
|||
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ secrets.RELEASE_OSS_REGISTRY }}
|
||||
|
||||
- uses: depot/setup-action@v1
|
||||
env:
|
||||
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
|
||||
- name: Get HEAD Commit ID
|
||||
run: echo "HEAD_COMMIT_ID=$(git rev-parse HEAD)" >> $GITHUB_ENV
|
||||
- name: Define Branch Name
|
||||
|
|
@ -65,78 +74,168 @@ jobs:
|
|||
MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
|
||||
MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
|
||||
MSAAS_REPO_FOLDER: /tmp/msaas
|
||||
SERVICES_INPUT: ${{ github.event.inputs.services }}
|
||||
run: |
|
||||
set -exo pipefail
|
||||
git config --local user.email "action@github.com"
|
||||
git config --local user.name "GitHub Action"
|
||||
git checkout -b $BRANCH_NAME
|
||||
working_dir=$(pwd)
|
||||
function image_version(){
|
||||
local service=$1
|
||||
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
|
||||
current_version=$(yq eval '.AppVersion' $chart_path)
|
||||
new_version=$(echo $current_version | awk -F. '{$NF += 1 ; print $1"."$2"."$3}')
|
||||
echo $new_version
|
||||
# yq eval ".AppVersion = \"$new_version\"" -i $chart_path
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
# Configuration
|
||||
readonly WORKING_DIR=$(pwd)
|
||||
readonly BUILD_SCRIPT_NAME="build.sh"
|
||||
readonly BACKEND_SERVICES_FILE="/tmp/backend.txt"
|
||||
|
||||
# Initialize git configuration
|
||||
setup_git() {
|
||||
git config --local user.email "action@github.com"
|
||||
git config --local user.name "GitHub Action"
|
||||
git checkout -b "$BRANCH_NAME"
|
||||
}
|
||||
function clone_msaas() {
|
||||
[ -d $MSAAS_REPO_FOLDER ] || {
|
||||
git clone -b dev --recursive https://x-access-token:$MSAAS_REPO_CLONE_TOKEN@$MSAAS_REPO_URL $MSAAS_REPO_FOLDER
|
||||
cd $MSAAS_REPO_FOLDER
|
||||
cd openreplay && git fetch origin && git checkout main # This have to be changed to specific tag
|
||||
git log -1
|
||||
cd $MSAAS_REPO_FOLDER
|
||||
bash git-init.sh
|
||||
git checkout
|
||||
}
|
||||
|
||||
# Get and increment image version
|
||||
image_version() {
|
||||
local service=$1
|
||||
local chart_path="$WORKING_DIR/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
|
||||
local current_version new_version
|
||||
|
||||
current_version=$(yq eval '.AppVersion' "$chart_path")
|
||||
new_version=$(echo "$current_version" | awk -F. '{$NF += 1; print $1"."$2"."$3}')
|
||||
echo "$new_version"
|
||||
}
|
||||
function build_managed() {
|
||||
local service=$1
|
||||
local version=$2
|
||||
echo building managed
|
||||
clone_msaas
|
||||
if [[ $service == 'chalice' ]]; then
|
||||
cd $MSAAS_REPO_FOLDER/openreplay/api
|
||||
else
|
||||
cd $MSAAS_REPO_FOLDER/openreplay/$service
|
||||
fi
|
||||
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh >> /tmp/arm.txt
|
||||
|
||||
# Clone MSAAS repository if not exists
|
||||
clone_msaas() {
|
||||
if [[ ! -d "$MSAAS_REPO_FOLDER" ]]; then
|
||||
git clone -b dev --recursive "https://x-access-token:${MSAAS_REPO_CLONE_TOKEN}@${MSAAS_REPO_URL}" "$MSAAS_REPO_FOLDER"
|
||||
cd "$MSAAS_REPO_FOLDER"
|
||||
cd openreplay && git fetch origin && git checkout main
|
||||
git log -1
|
||||
cd "$MSAAS_REPO_FOLDER"
|
||||
bash git-init.sh
|
||||
git checkout
|
||||
fi
|
||||
}
|
||||
# Checking for backend images
|
||||
ls backend/cmd >> /tmp/backend.txt
|
||||
echo Services: "${{ github.event.inputs.services }}"
|
||||
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
||||
BUILD_SCRIPT_NAME="build.sh"
|
||||
# Build FOSS
|
||||
for SERVICE in "${SERVICES[@]}"; do
|
||||
# Check if service is backend
|
||||
if grep -q $SERVICE /tmp/backend.txt; then
|
||||
cd backend
|
||||
foss_build_args="nil $SERVICE"
|
||||
ee_build_args="ee $SERVICE"
|
||||
else
|
||||
[[ $SERVICE == 'chalice' || $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && cd $working_dir/api || cd $SERVICE
|
||||
[[ $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && BUILD_SCRIPT_NAME="build_${SERVICE}.sh"
|
||||
ee_build_args="ee"
|
||||
fi
|
||||
version=$(image_version $SERVICE)
|
||||
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
echo IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||
IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||
if [[ "$SERVICE" != "chalice" && "$SERVICE" != "frontend" ]]; then
|
||||
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
else
|
||||
build_managed $SERVICE $version
|
||||
fi
|
||||
cd $working_dir
|
||||
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$SERVICE/Chart.yaml"
|
||||
yq eval ".AppVersion = \"$version\"" -i $chart_path
|
||||
git add $chart_path
|
||||
git commit -m "Increment $SERVICE chart version"
|
||||
git push --set-upstream origin $BRANCH_NAME
|
||||
done
|
||||
|
||||
# Build managed services
|
||||
build_managed() {
|
||||
local service=$1
|
||||
local version=$2
|
||||
|
||||
echo "Building managed service: $service"
|
||||
clone_msaas
|
||||
|
||||
if [[ $service == 'chalice' ]]; then
|
||||
cd "$MSAAS_REPO_FOLDER/openreplay/api"
|
||||
else
|
||||
cd "$MSAAS_REPO_FOLDER/openreplay/$service"
|
||||
fi
|
||||
|
||||
local build_cmd="IMAGE_TAG=$version DOCKER_RUNTIME=depot DOCKER_BUILD_ARGS=--push ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh"
|
||||
|
||||
echo "Executing: $build_cmd"
|
||||
if ! eval "$build_cmd" 2>&1; then
|
||||
echo "Build failed for $service"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Build service with given arguments
|
||||
build_service() {
|
||||
local service=$1
|
||||
local version=$2
|
||||
local build_args=$3
|
||||
local build_script=${4:-$BUILD_SCRIPT_NAME}
|
||||
|
||||
local command="IMAGE_TAG=$version DOCKER_RUNTIME=depot DOCKER_BUILD_ARGS=--push ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash $build_script $build_args"
|
||||
echo "Executing: $command"
|
||||
eval "$command"
|
||||
}
|
||||
|
||||
# Update chart version and commit changes
|
||||
update_chart_version() {
|
||||
local service=$1
|
||||
local version=$2
|
||||
local chart_path="$WORKING_DIR/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
|
||||
|
||||
# Ensure we're in the original working directory/repository
|
||||
cd "$WORKING_DIR"
|
||||
yq eval ".AppVersion = \"$version\"" -i "$chart_path"
|
||||
git add "$chart_path"
|
||||
git commit -m "Increment $service chart version to $version"
|
||||
git push --set-upstream origin "$BRANCH_NAME"
|
||||
cd -
|
||||
}
|
||||
|
||||
# Main execution
|
||||
main() {
|
||||
setup_git
|
||||
|
||||
# Get backend services list
|
||||
ls backend/cmd >"$BACKEND_SERVICES_FILE"
|
||||
|
||||
# Parse services input (fix for GitHub Actions syntax)
|
||||
echo "Services: ${SERVICES_INPUT:-$1}"
|
||||
IFS=',' read -ra services <<<"${SERVICES_INPUT:-$1}"
|
||||
|
||||
# Process each service
|
||||
for service in "${services[@]}"; do
|
||||
echo "Processing service: $service"
|
||||
cd "$WORKING_DIR"
|
||||
|
||||
local foss_build_args="" ee_build_args="" build_script="$BUILD_SCRIPT_NAME"
|
||||
|
||||
# Determine build configuration based on service type
|
||||
if grep -q "$service" "$BACKEND_SERVICES_FILE"; then
|
||||
# Backend service
|
||||
cd backend
|
||||
foss_build_args="nil $service"
|
||||
ee_build_args="ee $service"
|
||||
else
|
||||
# Non-backend service
|
||||
case "$service" in
|
||||
chalice | alerts | crons)
|
||||
cd "$WORKING_DIR/api"
|
||||
;;
|
||||
*)
|
||||
cd "$service"
|
||||
;;
|
||||
esac
|
||||
|
||||
# Special build scripts for alerts/crons
|
||||
if [[ $service == 'alerts' || $service == 'crons' ]]; then
|
||||
build_script="build_${service}.sh"
|
||||
fi
|
||||
|
||||
ee_build_args="ee"
|
||||
fi
|
||||
|
||||
# Get version and build
|
||||
local version
|
||||
version=$(image_version "$service")
|
||||
|
||||
# Build FOSS and EE versions
|
||||
build_service "$service" "$version" "$foss_build_args"
|
||||
build_service "$service" "${version}-ee" "$ee_build_args"
|
||||
|
||||
# Build managed version for specific services
|
||||
if [[ "$service" != "chalice" && "$service" != "frontend" ]]; then
|
||||
echo "Nothing to build in managed for service $service"
|
||||
else
|
||||
build_managed "$service" "$version"
|
||||
fi
|
||||
|
||||
# Update chart and commit
|
||||
update_chart_version "$service" "$version"
|
||||
done
|
||||
cd "$WORKING_DIR"
|
||||
|
||||
# Cleanup
|
||||
rm -f "$BACKEND_SERVICES_FILE"
|
||||
}
|
||||
|
||||
echo "Working directory: $WORKING_DIR"
|
||||
# Run main function with all arguments
|
||||
main "$SERVICES_INPUT"
|
||||
|
||||
|
||||
- name: Create Pull Request
|
||||
uses: repo-sync/pull-request@v2
|
||||
|
|
@ -147,8 +246,7 @@ jobs:
|
|||
pr_title: "Updated patch build from main ${{ env.HEAD_COMMIT_ID }}"
|
||||
pr_body: |
|
||||
This PR updates the Helm chart version after building the patch from $HEAD_COMMIT_ID.
|
||||
Once this PR is merged, To update the latest tag, run the following workflow.
|
||||
https://github.com/openreplay/openreplay/actions/workflows/update-tag.yaml
|
||||
Once this PR is merged, tag update job will run automatically.
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
|
|
|
|||
149
.github/workflows/peers.yaml
vendored
149
.github/workflows/peers.yaml
vendored
|
|
@ -1,149 +0,0 @@
|
|||
# This action will push the peers changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_security_checks:
|
||||
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
|
||||
required: false
|
||||
default: "false"
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
paths:
|
||||
- "peers/**"
|
||||
- "!peers/.gitignore"
|
||||
- "!peers/*-dev.sh"
|
||||
|
||||
name: Build and Deploy Peers
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
- name: Building and Pushing peers image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd peers
|
||||
PUSH_IMAGE=0 bash -x ./build.sh
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("peers")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("peers")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
tag: ${image_array[1]}
|
||||
EOF
|
||||
done
|
||||
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/peers/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,peers,quickwit,connector} /tmp/charts/
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: foss
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: "Build failed :bomb:"
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# limit-access-to-actor: true
|
||||
103
.github/workflows/release-deployment.yaml
vendored
Normal file
103
.github/workflows/release-deployment.yaml
vendored
Normal file
|
|
@ -0,0 +1,103 @@
|
|||
name: Release Deployment
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
services:
|
||||
description: 'Comma-separated list of services to deploy. eg: frontend,api,sink'
|
||||
required: true
|
||||
branch:
|
||||
description: 'Branch to deploy (defaults to dev)'
|
||||
required: false
|
||||
default: 'dev'
|
||||
|
||||
env:
|
||||
IMAGE_REGISTRY_URL: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
||||
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.inputs.branch }}
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login $IMAGE_REGISTRY_URL -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
|
||||
- name: Set image tag with branch info
|
||||
run: |
|
||||
SHORT_SHA=$(git rev-parse --short HEAD)
|
||||
echo "IMAGE_TAG=${{ github.event.inputs.branch }}-${SHORT_SHA}" >> $GITHUB_ENV
|
||||
echo "Using image tag: $IMAGE_TAG"
|
||||
|
||||
- uses: depot/setup-action@v1
|
||||
|
||||
- name: Build and push Docker images
|
||||
run: |
|
||||
# Parse the comma-separated services list into an array
|
||||
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
||||
working_dir=$(pwd)
|
||||
|
||||
# Define backend services (consider moving this to workflow inputs or repo config)
|
||||
ls backend/cmd >> /tmp/backend.txt
|
||||
BUILD_SCRIPT_NAME="build.sh"
|
||||
|
||||
for SERVICE in "${SERVICES[@]}"; do
|
||||
# Check if service is backend
|
||||
if grep -q $SERVICE /tmp/backend.txt; then
|
||||
cd $working_dir/backend
|
||||
foss_build_args="nil $SERVICE"
|
||||
ee_build_args="ee $SERVICE"
|
||||
else
|
||||
cd $working_dir
|
||||
[[ $SERVICE == 'chalice' || $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && cd $working_dir/api || cd $SERVICE
|
||||
[[ $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && BUILD_SCRIPT_NAME="build_${SERVICE}.sh"
|
||||
ee_build_args="ee"
|
||||
fi
|
||||
{
|
||||
echo IMAGE_TAG=$IMAGE_TAG DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
IMAGE_TAG=$IMAGE_TAG DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
}&
|
||||
{
|
||||
echo IMAGE_TAG=${IMAGE_TAG}-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||
IMAGE_TAG=${IMAGE_TAG}-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||
}&
|
||||
done
|
||||
wait
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
name: Using ee release cluster
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_RELEASE_KUBECONFIG }}
|
||||
|
||||
- name: Deploy to ee release Kubernetes
|
||||
run: |
|
||||
echo "Deploying services to EE cluster: ${{ github.event.inputs.services }}"
|
||||
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
||||
for SERVICE in "${SERVICES[@]}"; do
|
||||
SERVICE=$(echo $SERVICE | xargs) # Trim whitespace
|
||||
echo "Deploying $SERVICE to EE cluster with image tag: ${IMAGE_TAG}"
|
||||
kubectl set image deployment/$SERVICE-openreplay -n app $SERVICE=${IMAGE_REGISTRY_URL}/$SERVICE:${IMAGE_TAG}-ee
|
||||
done
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
name: Using foss release cluster
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.FOSS_RELEASE_KUBECONFIG }}
|
||||
|
||||
- name: Deploy to FOSS release Kubernetes
|
||||
run: |
|
||||
echo "Deploying services to FOSS cluster: ${{ github.event.inputs.services }}"
|
||||
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
||||
for SERVICE in "${SERVICES[@]}"; do
|
||||
SERVICE=$(echo $SERVICE | xargs) # Trim whitespace
|
||||
echo "Deploying $SERVICE to FOSS cluster with image tag: ${IMAGE_TAG}"
|
||||
echo "Deploying $SERVICE to FOSS cluster with image tag: ${IMAGE_TAG}"
|
||||
kubectl set image deployment/$SERVICE-openreplay -n app $SERVICE=${IMAGE_REGISTRY_URL}/$SERVICE:${IMAGE_TAG}
|
||||
done
|
||||
10
.github/workflows/sourcemaps-reader-ee.yaml
vendored
10
.github/workflows/sourcemaps-reader-ee.yaml
vendored
|
|
@ -1,4 +1,4 @@
|
|||
# This action will push the sourcemapreader changes to aws
|
||||
# This action will push the sourcemapreader changes to ee
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
|
|
@ -9,13 +9,13 @@ on:
|
|||
push:
|
||||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
paths:
|
||||
- "ee/sourcemap-reader/**"
|
||||
- "sourcemap-reader/**"
|
||||
- "!sourcemap-reader/.gitignore"
|
||||
- "!sourcemap-reader/*-dev.sh"
|
||||
|
||||
name: Build and Deploy sourcemap-reader
|
||||
name: Build and Deploy sourcemap-reader EE
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
|
|
@ -64,7 +64,7 @@ jobs:
|
|||
- name: Building and Pushing sourcemaps-reader image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
|
|
@ -132,7 +132,7 @@ jobs:
|
|||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: foss
|
||||
SLACK_CHANNEL: ee
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
|
|
|
|||
1
.github/workflows/sourcemaps-reader.yaml
vendored
1
.github/workflows/sourcemaps-reader.yaml
vendored
|
|
@ -9,7 +9,6 @@ on:
|
|||
push:
|
||||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
paths:
|
||||
- "sourcemap-reader/**"
|
||||
- "!sourcemap-reader/.gitignore"
|
||||
|
|
|
|||
47
.github/workflows/update-tag.yaml
vendored
47
.github/workflows/update-tag.yaml
vendored
|
|
@ -1,35 +1,42 @@
|
|||
on:
|
||||
workflow_dispatch:
|
||||
description: "This workflow will build for patches for latest tag, and will Always use commit from main branch."
|
||||
inputs:
|
||||
services:
|
||||
description: "This action will update the latest tag with current main branch HEAD. Should I proceed ? true/false"
|
||||
required: true
|
||||
default: "false"
|
||||
|
||||
name: Force Push tag with main branch HEAD
|
||||
pull_request:
|
||||
types: [closed]
|
||||
branches:
|
||||
- main
|
||||
name: Release tag update --force
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Build Patch from main
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
|
||||
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
||||
if: ${{ (github.event_name == 'pull_request' && github.event.pull_request.merged == true) || github.event.inputs.services == 'true' }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Get latest release tag using GitHub API
|
||||
id: get-latest-tag
|
||||
run: |
|
||||
LATEST_TAG=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
|
||||
"https://api.github.com/repos/${{ github.repository }}/releases/latest" \
|
||||
| jq -r .tag_name)
|
||||
|
||||
# Fallback to git command if API doesn't return a tag
|
||||
if [ "$LATEST_TAG" == "null" ] || [ -z "$LATEST_TAG" ]; then
|
||||
echo "Not found latest tag"
|
||||
exit 100
|
||||
fi
|
||||
|
||||
echo "LATEST_TAG=$LATEST_TAG" >> $GITHUB_ENV
|
||||
echo "Latest tag: $LATEST_TAG"
|
||||
|
||||
- name: Set Remote with GITHUB_TOKEN
|
||||
run: |
|
||||
git config --unset http.https://github.com/.extraheader
|
||||
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}.git
|
||||
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}
|
||||
|
||||
- name: Push main branch to tag
|
||||
run: |
|
||||
git fetch --tags
|
||||
git checkout main
|
||||
git push origin HEAD:refs/tags/$(git tag --list 'v[0-9]*' --sort=-v:refname | head -n 1) --force
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# with:
|
||||
# limit-access-to-actor: true
|
||||
echo "Updating tag ${{ env.LATEST_TAG }} to point to latest commit on main"
|
||||
git push origin HEAD:refs/tags/${{ env.LATEST_TAG }} --force
|
||||
|
|
|
|||
2
LICENSE
2
LICENSE
|
|
@ -1,4 +1,4 @@
|
|||
Copyright (c) 2021-2024 Asayer, Inc dba OpenReplay
|
||||
Copyright (c) 2021-2025 Asayer, Inc dba OpenReplay
|
||||
|
||||
OpenReplay monorepo uses multiple licenses. Portions of this software are licensed as follows:
|
||||
- All content that resides under the "ee/" directory of this repository, is licensed under the license defined in "ee/LICENSE".
|
||||
|
|
|
|||
|
|
@ -1,10 +1,17 @@
|
|||
FROM python:3.12-alpine
|
||||
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
|
||||
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
|
||||
ARG GIT_SHA
|
||||
LABEL GIT_SHA=$GIT_SHA
|
||||
FROM python:3.12-alpine AS builder
|
||||
LABEL maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
|
||||
LABEL maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
|
||||
|
||||
RUN apk add --no-cache build-base tini
|
||||
RUN apk add --no-cache build-base
|
||||
WORKDIR /work
|
||||
COPY requirements.txt ./requirements.txt
|
||||
RUN pip install --no-cache-dir --upgrade uv && \
|
||||
export UV_SYSTEM_PYTHON=true && \
|
||||
uv pip install --no-cache-dir --upgrade pip setuptools wheel && \
|
||||
uv pip install --no-cache-dir --upgrade -r requirements.txt
|
||||
|
||||
FROM python:3.12-alpine
|
||||
ARG GIT_SHA
|
||||
ARG envarg
|
||||
# Add Tini
|
||||
# Startup daemon
|
||||
|
|
@ -14,19 +21,11 @@ ENV SOURCE_MAP_VERSION=0.7.4 \
|
|||
PRIVATE_ENDPOINTS=false \
|
||||
ENTERPRISE_BUILD=${envarg} \
|
||||
GIT_SHA=$GIT_SHA
|
||||
|
||||
COPY --from=builder /usr/local/lib/python3.12/site-packages /usr/local/lib/python3.12/site-packages
|
||||
COPY --from=builder /usr/local/bin /usr/local/bin
|
||||
WORKDIR /work
|
||||
COPY requirements.txt ./requirements.txt
|
||||
RUN pip install --no-cache-dir --upgrade uv
|
||||
RUN uv pip install --no-cache-dir --upgrade pip setuptools wheel --system
|
||||
RUN uv pip install --no-cache-dir --upgrade -r requirements.txt --system
|
||||
|
||||
COPY . .
|
||||
RUN mv env.default .env
|
||||
|
||||
RUN adduser -u 1001 openreplay -D
|
||||
USER 1001
|
||||
RUN apk add --no-cache tini && mv env.default .env
|
||||
|
||||
ENTRYPOINT ["/sbin/tini", "--"]
|
||||
CMD ./entrypoint.sh
|
||||
|
||||
CMD ["./entrypoint.sh"]
|
||||
|
|
|
|||
21
api/Pipfile
21
api/Pipfile
|
|
@ -4,25 +4,26 @@ verify_ssl = true
|
|||
name = "pypi"
|
||||
|
||||
[packages]
|
||||
urllib3 = "==2.2.3"
|
||||
urllib3 = "==2.3.0"
|
||||
requests = "==2.32.3"
|
||||
boto3 = "==1.35.86"
|
||||
boto3 = "==1.36.12"
|
||||
pyjwt = "==2.10.1"
|
||||
psycopg2-binary = "==2.9.10"
|
||||
clickhouse-connect = "==0.8.11"
|
||||
elasticsearch = "==8.17.0"
|
||||
psycopg = {extras = ["pool", "binary"], version = "==3.2.4"}
|
||||
clickhouse-driver = {extras = ["lz4"], version = "==0.2.9"}
|
||||
clickhouse-connect = "==0.8.15"
|
||||
elasticsearch = "==8.17.1"
|
||||
jira = "==3.8.0"
|
||||
cachetools = "==5.5.0"
|
||||
fastapi = "==0.115.6"
|
||||
cachetools = "==5.5.1"
|
||||
fastapi = "==0.115.8"
|
||||
uvicorn = {extras = ["standard"], version = "==0.34.0"}
|
||||
python-decouple = "==3.8"
|
||||
pydantic = {extras = ["email"], version = "==2.10.6"}
|
||||
apscheduler = "==3.11.0"
|
||||
redis = "==5.2.1"
|
||||
psycopg = {extras = ["binary", "pool"], version = "==3.2.3"}
|
||||
clickhouse-driver = {extras = ["lz4"], version = "==0.2.9"}
|
||||
uvicorn = {extras = ["standard"], version = "==0.34.0"}
|
||||
pydantic = {extras = ["email"], version = "==2.10.4"}
|
||||
|
||||
[dev-packages]
|
||||
|
||||
[requires]
|
||||
python_version = "3.12"
|
||||
python_full_version = "3.12.8"
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ from chalicelib.utils import pg_client
|
|||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
# Startup
|
||||
logging.info(">>>>> starting up <<<<<")
|
||||
ap_logger.info(">>>>> starting up <<<<<")
|
||||
await pg_client.init()
|
||||
app.schedule.start()
|
||||
app.schedule.add_job(id="alerts_processor", **{"func": alerts_processor.process, "trigger": "interval",
|
||||
|
|
@ -27,14 +27,22 @@ async def lifespan(app: FastAPI):
|
|||
yield
|
||||
|
||||
# Shutdown
|
||||
logging.info(">>>>> shutting down <<<<<")
|
||||
ap_logger.info(">>>>> shutting down <<<<<")
|
||||
app.schedule.shutdown(wait=False)
|
||||
await pg_client.terminate()
|
||||
|
||||
|
||||
loglevel = config("LOGLEVEL", default=logging.INFO)
|
||||
print(f">Loglevel set to: {loglevel}")
|
||||
logging.basicConfig(level=loglevel)
|
||||
ap_logger = logging.getLogger('apscheduler')
|
||||
ap_logger.setLevel(loglevel)
|
||||
|
||||
app = FastAPI(root_path=config("root_path", default="/alerts"), docs_url=config("docs_url", default=""),
|
||||
redoc_url=config("redoc_url", default=""), lifespan=lifespan)
|
||||
logging.info("============= ALERTS =============")
|
||||
|
||||
app.schedule = AsyncIOScheduler()
|
||||
ap_logger.info("============= ALERTS =============")
|
||||
|
||||
|
||||
@app.get("/")
|
||||
|
|
@ -50,17 +58,8 @@ async def get_health_status():
|
|||
}}
|
||||
|
||||
|
||||
app.schedule = AsyncIOScheduler()
|
||||
|
||||
loglevel = config("LOGLEVEL", default=logging.INFO)
|
||||
print(f">Loglevel set to: {loglevel}")
|
||||
logging.basicConfig(level=loglevel)
|
||||
ap_logger = logging.getLogger('apscheduler')
|
||||
ap_logger.setLevel(loglevel)
|
||||
app.schedule = AsyncIOScheduler()
|
||||
|
||||
if config("LOCAL_DEV", default=False, cast=bool):
|
||||
@app.get('/trigger', tags=["private"])
|
||||
async def trigger_main_cron():
|
||||
logging.info("Triggering main cron")
|
||||
ap_logger.info("Triggering main cron")
|
||||
alerts_processor.process()
|
||||
|
|
|
|||
|
|
@ -4,7 +4,8 @@ from pydantic_core._pydantic_core import ValidationError
|
|||
|
||||
import schemas
|
||||
from chalicelib.core.alerts import alerts, alerts_listener
|
||||
from chalicelib.core.alerts.modules import sessions, alert_helpers
|
||||
from chalicelib.core.alerts.modules import alert_helpers
|
||||
from chalicelib.core.sessions import sessions_pg as sessions
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
|
|
@ -131,6 +132,7 @@ def Build(a):
|
|||
|
||||
|
||||
def process():
|
||||
logger.info("> processing alerts on PG")
|
||||
notifications = []
|
||||
all_alerts = alerts_listener.get_all_alerts()
|
||||
with pg_client.PostgresClient() as cur:
|
||||
|
|
|
|||
|
|
@ -3,10 +3,11 @@ import logging
|
|||
from pydantic_core._pydantic_core import ValidationError
|
||||
|
||||
import schemas
|
||||
from chalicelib.core.alerts import alerts, alerts_listener
|
||||
from chalicelib.core.alerts.modules import sessions, alert_helpers
|
||||
from chalicelib.utils import pg_client, ch_client, exp_ch_helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.core.alerts import alerts, alerts_listener
|
||||
from chalicelib.core.alerts.modules import alert_helpers
|
||||
from chalicelib.core.sessions import sessions_ch as sessions
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -155,6 +156,7 @@ def Build(a):
|
|||
|
||||
|
||||
def process():
|
||||
logger.info("> processing alerts on CH")
|
||||
notifications = []
|
||||
all_alerts = alerts_listener.get_all_alerts()
|
||||
with pg_client.PostgresClient() as cur, ch_client.ClickHouseClient() as ch_cur:
|
||||
|
|
@ -164,7 +166,7 @@ def process():
|
|||
if alert_helpers.can_check(alert):
|
||||
query, params = Build(alert)
|
||||
try:
|
||||
query = ch_cur.format(query, params)
|
||||
query = ch_cur.format(query=query, parameters=params)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"!!!Error while building alert query for alertId:{alert['alertId']} name: {alert['name']}")
|
||||
|
|
@ -173,7 +175,7 @@ def process():
|
|||
logger.debug(alert)
|
||||
logger.debug(query)
|
||||
try:
|
||||
result = ch_cur.execute(query)
|
||||
result = ch_cur.execute(query=query)
|
||||
if len(result) > 0:
|
||||
result = result[0]
|
||||
|
||||
|
|
|
|||
|
|
@ -1,9 +1,3 @@
|
|||
from decouple import config
|
||||
|
||||
TENANT_ID = "-1"
|
||||
if config("EXP_ALERTS", cast=bool, default=False):
|
||||
from chalicelib.core.sessions import sessions_ch as sessions
|
||||
else:
|
||||
from chalicelib.core.sessions import sessions
|
||||
|
||||
from . import helpers as alert_helpers
|
||||
|
|
|
|||
|
|
@ -37,8 +37,7 @@ def jwt_authorizer(scheme: str, token: str, leeway=0) -> dict | None:
|
|||
logger.debug("! JWT Expired signature")
|
||||
return None
|
||||
except BaseException as e:
|
||||
logger.warning("! JWT Base Exception")
|
||||
logger.debug(e)
|
||||
logger.warning("! JWT Base Exception", exc_info=e)
|
||||
return None
|
||||
return payload
|
||||
|
||||
|
|
@ -56,8 +55,7 @@ def jwt_refresh_authorizer(scheme: str, token: str):
|
|||
logger.debug("! JWT-refresh Expired signature")
|
||||
return None
|
||||
except BaseException as e:
|
||||
logger.warning("! JWT-refresh Base Exception")
|
||||
logger.debug(e)
|
||||
logger.error("! JWT-refresh Base Exception", exc_info=e)
|
||||
return None
|
||||
return payload
|
||||
|
||||
|
|
|
|||
|
|
@ -1,11 +0,0 @@
|
|||
import logging
|
||||
|
||||
from decouple import config
|
||||
|
||||
logging.basicConfig(level=config("LOGLEVEL", default=logging.INFO))
|
||||
|
||||
if config("EXP_AUTOCOMPLETE", cast=bool, default=False):
|
||||
logging.info(">>> Using experimental autocomplete")
|
||||
from . import autocomplete_ch as autocomplete
|
||||
else:
|
||||
from . import autocomplete
|
||||
|
|
@ -85,7 +85,8 @@ def __generic_query(typename, value_length=None):
|
|||
ORDER BY value"""
|
||||
|
||||
if value_length is None or value_length > 2:
|
||||
return f"""(SELECT DISTINCT value, type
|
||||
return f"""SELECT DISTINCT ON(value,type) value, type
|
||||
((SELECT DISTINCT value, type
|
||||
FROM {TABLE}
|
||||
WHERE
|
||||
project_id = %(project_id)s
|
||||
|
|
@ -101,7 +102,7 @@ def __generic_query(typename, value_length=None):
|
|||
AND type='{typename.upper()}'
|
||||
AND value ILIKE %(value)s
|
||||
ORDER BY value
|
||||
LIMIT 5);"""
|
||||
LIMIT 5)) AS raw;"""
|
||||
return f"""SELECT DISTINCT value, type
|
||||
FROM {TABLE}
|
||||
WHERE
|
||||
|
|
@ -124,7 +125,7 @@ def __generic_autocomplete(event: Event):
|
|||
return f
|
||||
|
||||
|
||||
def __generic_autocomplete_metas(typename):
|
||||
def generic_autocomplete_metas(typename):
|
||||
def f(project_id, text):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
params = {"project_id": project_id, "value": helper.string_to_sql_like(text),
|
||||
|
|
@ -326,7 +327,7 @@ def __search_metadata(project_id, value, key=None, source=None):
|
|||
AND {colname} ILIKE %(svalue)s LIMIT 5)""")
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(cur.mogrify(f"""\
|
||||
SELECT key, value, 'METADATA' AS TYPE
|
||||
SELECT DISTINCT ON(key, value) key, value, 'METADATA' AS TYPE
|
||||
FROM({" UNION ALL ".join(sub_from)}) AS all_metas
|
||||
LIMIT 5;""", {"project_id": project_id, "value": helper.string_to_sql_like(value),
|
||||
"svalue": helper.string_to_sql_like("^" + value)}))
|
||||
|
|
|
|||
|
|
@ -13,15 +13,18 @@ def get_state(tenant_id):
|
|||
|
||||
if len(pids) > 0:
|
||||
cur.execute(
|
||||
cur.mogrify("""SELECT EXISTS(( SELECT 1
|
||||
cur.mogrify(
|
||||
"""SELECT EXISTS(( SELECT 1
|
||||
FROM public.sessions AS s
|
||||
WHERE s.project_id IN %(ids)s)) AS exists;""",
|
||||
{"ids": tuple(pids)})
|
||||
{"ids": tuple(pids)},
|
||||
)
|
||||
)
|
||||
recorded = cur.fetchone()["exists"]
|
||||
meta = False
|
||||
if recorded:
|
||||
query = cur.mogrify("""SELECT EXISTS((SELECT 1
|
||||
query = cur.mogrify(
|
||||
f"""SELECT EXISTS((SELECT 1
|
||||
FROM public.projects AS p
|
||||
LEFT JOIN LATERAL ( SELECT 1
|
||||
FROM public.sessions
|
||||
|
|
@ -36,26 +39,35 @@ def get_state(tenant_id):
|
|||
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
|
||||
OR p.metadata_10 IS NOT NULL )
|
||||
)) AS exists;""",
|
||||
{"tenant_id": tenant_id})
|
||||
{"tenant_id": tenant_id},
|
||||
)
|
||||
cur.execute(query)
|
||||
|
||||
meta = cur.fetchone()["exists"]
|
||||
|
||||
return [
|
||||
{"task": "Install OpenReplay",
|
||||
"done": recorded,
|
||||
"URL": "https://docs.openreplay.com/getting-started/quick-start"},
|
||||
{"task": "Identify Users",
|
||||
"done": meta,
|
||||
"URL": "https://docs.openreplay.com/data-privacy-security/metadata"},
|
||||
{"task": "Invite Team Members",
|
||||
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
|
||||
"URL": "https://app.openreplay.com/client/manage-users"},
|
||||
{"task": "Integrations",
|
||||
"done": len(datadog.get_all(tenant_id=tenant_id)) > 0 \
|
||||
or len(sentry.get_all(tenant_id=tenant_id)) > 0 \
|
||||
or len(stackdriver.get_all(tenant_id=tenant_id)) > 0,
|
||||
"URL": "https://docs.openreplay.com/integrations"}
|
||||
{
|
||||
"task": "Install OpenReplay",
|
||||
"done": recorded,
|
||||
"URL": "https://docs.openreplay.com/getting-started/quick-start",
|
||||
},
|
||||
{
|
||||
"task": "Identify Users",
|
||||
"done": meta,
|
||||
"URL": "https://docs.openreplay.com/data-privacy-security/metadata",
|
||||
},
|
||||
{
|
||||
"task": "Invite Team Members",
|
||||
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
|
||||
"URL": "https://app.openreplay.com/client/manage-users",
|
||||
},
|
||||
{
|
||||
"task": "Integrations",
|
||||
"done": len(datadog.get_all(tenant_id=tenant_id)) > 0
|
||||
or len(sentry.get_all(tenant_id=tenant_id)) > 0
|
||||
or len(stackdriver.get_all(tenant_id=tenant_id)) > 0,
|
||||
"URL": "https://docs.openreplay.com/integrations",
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
|
|
@ -66,21 +78,26 @@ def get_state_installing(tenant_id):
|
|||
|
||||
if len(pids) > 0:
|
||||
cur.execute(
|
||||
cur.mogrify("""SELECT EXISTS(( SELECT 1
|
||||
cur.mogrify(
|
||||
"""SELECT EXISTS(( SELECT 1
|
||||
FROM public.sessions AS s
|
||||
WHERE s.project_id IN %(ids)s)) AS exists;""",
|
||||
{"ids": tuple(pids)})
|
||||
{"ids": tuple(pids)},
|
||||
)
|
||||
)
|
||||
recorded = cur.fetchone()["exists"]
|
||||
|
||||
return {"task": "Install OpenReplay",
|
||||
"done": recorded,
|
||||
"URL": "https://docs.openreplay.com/getting-started/quick-start"}
|
||||
return {
|
||||
"task": "Install OpenReplay",
|
||||
"done": recorded,
|
||||
"URL": "https://docs.openreplay.com/getting-started/quick-start",
|
||||
}
|
||||
|
||||
|
||||
def get_state_identify_users(tenant_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(f"""SELECT EXISTS((SELECT 1
|
||||
query = cur.mogrify(
|
||||
f"""SELECT EXISTS((SELECT 1
|
||||
FROM public.projects AS p
|
||||
LEFT JOIN LATERAL ( SELECT 1
|
||||
FROM public.sessions
|
||||
|
|
@ -95,25 +112,32 @@ def get_state_identify_users(tenant_id):
|
|||
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
|
||||
OR p.metadata_10 IS NOT NULL )
|
||||
)) AS exists;""",
|
||||
{"tenant_id": tenant_id})
|
||||
{"tenant_id": tenant_id},
|
||||
)
|
||||
cur.execute(query)
|
||||
|
||||
meta = cur.fetchone()["exists"]
|
||||
|
||||
return {"task": "Identify Users",
|
||||
"done": meta,
|
||||
"URL": "https://docs.openreplay.com/data-privacy-security/metadata"}
|
||||
return {
|
||||
"task": "Identify Users",
|
||||
"done": meta,
|
||||
"URL": "https://docs.openreplay.com/data-privacy-security/metadata",
|
||||
}
|
||||
|
||||
|
||||
def get_state_manage_users(tenant_id):
|
||||
return {"task": "Invite Team Members",
|
||||
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
|
||||
"URL": "https://app.openreplay.com/client/manage-users"}
|
||||
return {
|
||||
"task": "Invite Team Members",
|
||||
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
|
||||
"URL": "https://app.openreplay.com/client/manage-users",
|
||||
}
|
||||
|
||||
|
||||
def get_state_integrations(tenant_id):
|
||||
return {"task": "Integrations",
|
||||
"done": len(datadog.get_all(tenant_id=tenant_id)) > 0 \
|
||||
or len(sentry.get_all(tenant_id=tenant_id)) > 0 \
|
||||
or len(stackdriver.get_all(tenant_id=tenant_id)) > 0,
|
||||
"URL": "https://docs.openreplay.com/integrations"}
|
||||
return {
|
||||
"task": "Integrations",
|
||||
"done": len(datadog.get_all(tenant_id=tenant_id)) > 0
|
||||
or len(sentry.get_all(tenant_id=tenant_id)) > 0
|
||||
or len(stackdriver.get_all(tenant_id=tenant_id)) > 0,
|
||||
"URL": "https://docs.openreplay.com/integrations",
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,10 +4,10 @@ from decouple import config
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
from . import errors as errors_legacy
|
||||
from . import errors_pg as errors_legacy
|
||||
|
||||
if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
|
||||
logger.info(">>> Using experimental error search")
|
||||
from . import errors_ch as errors
|
||||
else:
|
||||
from . import errors
|
||||
from . import errors_pg as errors
|
||||
|
|
|
|||
|
|
@ -1,11 +1,11 @@
|
|||
import schemas
|
||||
from chalicelib.core import metadata
|
||||
from chalicelib.core.errors import errors_legacy
|
||||
from chalicelib.core.errors.modules import errors_helper
|
||||
from chalicelib.core.errors.modules import sessions
|
||||
from chalicelib.core.metrics import metrics
|
||||
from chalicelib.utils import ch_client, exp_ch_helper
|
||||
from chalicelib.utils import helper, metrics_helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from . import errors as errors_legacy
|
||||
|
||||
|
||||
def _multiple_values(values, value_key="value"):
|
||||
|
|
@ -62,25 +62,6 @@ def get_batch(error_ids):
|
|||
return errors_legacy.get_batch(error_ids=error_ids)
|
||||
|
||||
|
||||
def __get_basic_constraints(platform=None, time_constraint=True, startTime_arg_name="startDate",
|
||||
endTime_arg_name="endDate", type_condition=True, project_key="project_id", table_name=None):
|
||||
ch_sub_query = [f"{project_key} =toUInt16(%(project_id)s)"]
|
||||
if table_name is not None:
|
||||
table_name = table_name + "."
|
||||
else:
|
||||
table_name = ""
|
||||
if type_condition:
|
||||
ch_sub_query.append(f"{table_name}`$event_name`='ERROR'")
|
||||
if time_constraint:
|
||||
ch_sub_query += [f"{table_name}datetime >= toDateTime(%({startTime_arg_name})s/1000)",
|
||||
f"{table_name}datetime < toDateTime(%({endTime_arg_name})s/1000)"]
|
||||
if platform == schemas.PlatformType.MOBILE:
|
||||
ch_sub_query.append("user_device_type = 'mobile'")
|
||||
elif platform == schemas.PlatformType.DESKTOP:
|
||||
ch_sub_query.append("user_device_type = 'desktop'")
|
||||
return ch_sub_query
|
||||
|
||||
|
||||
def __get_basic_constraints_events(platform=None, time_constraint=True, startTime_arg_name="startDate",
|
||||
endTime_arg_name="endDate", type_condition=True, project_key="project_id",
|
||||
table_name=None):
|
||||
|
|
@ -109,7 +90,7 @@ def __get_sort_key(key):
|
|||
}.get(key, 'max_datetime')
|
||||
|
||||
|
||||
def search(data: schemas.SearchErrorsSchema, project_id, user_id):
|
||||
def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, user_id):
|
||||
MAIN_EVENTS_TABLE = exp_ch_helper.get_main_events_table(data.startTimestamp)
|
||||
MAIN_SESSIONS_TABLE = exp_ch_helper.get_main_sessions_table(data.startTimestamp)
|
||||
|
||||
|
|
@ -117,7 +98,7 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id):
|
|||
for f in data.filters:
|
||||
if f.type == schemas.FilterType.PLATFORM and len(f.value) > 0:
|
||||
platform = f.value[0]
|
||||
ch_sessions_sub_query = __get_basic_constraints(platform, type_condition=False)
|
||||
ch_sessions_sub_query = errors_helper.__get_basic_constraints_ch(platform, type_condition=False)
|
||||
# ignore platform for errors table
|
||||
ch_sub_query = __get_basic_constraints_events(None, type_condition=True)
|
||||
ch_sub_query.append("JSONExtractString(toString(`$properties`), 'source') = 'js_exception'")
|
||||
|
|
@ -149,7 +130,8 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id):
|
|||
if len(data.events) > errors_condition_count:
|
||||
subquery_part_args, subquery_part = sessions.search_query_parts_ch(data=data, error_status=data.status,
|
||||
errors_only=True,
|
||||
project_id=project_id, user_id=user_id,
|
||||
project_id=project.project_id,
|
||||
user_id=user_id,
|
||||
issue=None,
|
||||
favorite_only=False)
|
||||
subquery_part = f"INNER JOIN {subquery_part} USING(session_id)"
|
||||
|
|
@ -251,7 +233,7 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id):
|
|||
elif filter_type == schemas.FilterType.METADATA:
|
||||
# get metadata list only if you need it
|
||||
if meta_keys is None:
|
||||
meta_keys = metadata.get(project_id=project_id)
|
||||
meta_keys = metadata.get(project_id=project.project_id)
|
||||
meta_keys = {m["key"]: m["index"] for m in meta_keys}
|
||||
if f.source in meta_keys.keys():
|
||||
if is_any:
|
||||
|
|
@ -328,7 +310,7 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id):
|
|||
**params,
|
||||
"startDate": data.startTimestamp,
|
||||
"endDate": data.endTimestamp,
|
||||
"project_id": project_id,
|
||||
"project_id": project.project_id,
|
||||
"userId": user_id,
|
||||
"step_size": step_size}
|
||||
if data.limit is not None and data.page is not None:
|
||||
|
|
@ -356,14 +338,14 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id):
|
|||
SELECT details.error_id as error_id,
|
||||
name, message, users, total,
|
||||
sessions, last_occurrence, first_occurrence, chart
|
||||
FROM (SELECT JSONExtractString(toString(`$properties`), 'error_id') AS error_id,
|
||||
FROM (SELECT error_id,
|
||||
JSONExtractString(toString(`$properties`), 'name') AS name,
|
||||
JSONExtractString(toString(`$properties`), 'message') AS message,
|
||||
COUNT(DISTINCT user_id) AS users,
|
||||
COUNT(DISTINCT events.session_id) AS sessions,
|
||||
MAX(created_at) AS max_datetime,
|
||||
MIN(created_at) AS min_datetime,
|
||||
COUNT(DISTINCT JSONExtractString(toString(`$properties`), 'error_id'))
|
||||
COUNT(DISTINCT error_id)
|
||||
OVER() AS total
|
||||
FROM {MAIN_EVENTS_TABLE} AS events
|
||||
INNER JOIN (SELECT session_id, coalesce(user_id,toString(user_uuid)) AS user_id
|
||||
|
|
@ -375,7 +357,7 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id):
|
|||
GROUP BY error_id, name, message
|
||||
ORDER BY {sort} {order}
|
||||
LIMIT %(errors_limit)s OFFSET %(errors_offset)s) AS details
|
||||
INNER JOIN (SELECT JSONExtractString(toString(`$properties`), 'error_id') AS error_id,
|
||||
INNER JOIN (SELECT error_id,
|
||||
toUnixTimestamp(MAX(created_at))*1000 AS last_occurrence,
|
||||
toUnixTimestamp(MIN(created_at))*1000 AS first_occurrence
|
||||
FROM {MAIN_EVENTS_TABLE}
|
||||
|
|
@ -384,11 +366,14 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id):
|
|||
GROUP BY error_id) AS time_details
|
||||
ON details.error_id=time_details.error_id
|
||||
INNER JOIN (SELECT error_id, groupArray([timestamp, count]) AS chart
|
||||
FROM (SELECT JSONExtractString(toString(`$properties`), 'error_id') AS error_id,
|
||||
toUnixTimestamp(toStartOfInterval(created_at, INTERVAL %(step_size)s second)) * 1000 AS timestamp,
|
||||
FROM (SELECT error_id,
|
||||
gs.generate_series AS timestamp,
|
||||
COUNT(DISTINCT session_id) AS count
|
||||
FROM {MAIN_EVENTS_TABLE}
|
||||
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS gs
|
||||
LEFT JOIN {MAIN_EVENTS_TABLE} ON(TRUE)
|
||||
WHERE {" AND ".join(ch_sub_query)}
|
||||
AND created_at >= toDateTime(timestamp / 1000)
|
||||
AND created_at < toDateTime((timestamp + %(step_size)s) / 1000)
|
||||
GROUP BY error_id, timestamp
|
||||
ORDER BY timestamp) AS sub_table
|
||||
GROUP BY error_id) AS chart_details ON details.error_id=chart_details.error_id;"""
|
||||
|
|
@ -398,16 +383,14 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id):
|
|||
# print("------------")
|
||||
query = ch.format(query=main_ch_query, parameters=params)
|
||||
|
||||
rows = ch.execute(query)
|
||||
rows = ch.execute(query=query)
|
||||
total = rows[0]["total"] if len(rows) > 0 else 0
|
||||
|
||||
for r in rows:
|
||||
r["chart"] = list(r["chart"])
|
||||
for i in range(len(r["chart"])):
|
||||
r["chart"][i] = {"timestamp": r["chart"][i][0], "count": r["chart"][i][1]}
|
||||
r["chart"] = metrics.__complete_missing_steps(rows=r["chart"], start_time=data.startTimestamp,
|
||||
end_time=data.endTimestamp,
|
||||
density=data.density, neutral={"count": 0})
|
||||
|
||||
return {
|
||||
'total': total,
|
||||
'errors': helper.list_to_camel_case(rows)
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
from chalicelib.core.errors import errors_legacy as errors
|
||||
from chalicelib.utils import errors_helper
|
||||
from chalicelib.core.errors.modules import errors_helper
|
||||
|
||||
from chalicelib.utils import pg_client, helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.utils.metrics_helper import get_step_size
|
||||
|
|
@ -40,26 +40,29 @@ def __process_tags(row):
|
|||
|
||||
|
||||
def get_details(project_id, error_id, user_id, **data):
|
||||
pg_sub_query24 = errors.__get_basic_constraints(time_constraint=False, chart=True, step_size_name="step_size24")
|
||||
pg_sub_query24 = errors_helper.__get_basic_constraints(time_constraint=False, chart=True,
|
||||
step_size_name="step_size24")
|
||||
pg_sub_query24.append("error_id = %(error_id)s")
|
||||
pg_sub_query30_session = errors.__get_basic_constraints(time_constraint=True, chart=False,
|
||||
startTime_arg_name="startDate30",
|
||||
endTime_arg_name="endDate30",
|
||||
project_key="sessions.project_id")
|
||||
pg_sub_query30_session = errors_helper.__get_basic_constraints(time_constraint=True, chart=False,
|
||||
startTime_arg_name="startDate30",
|
||||
endTime_arg_name="endDate30",
|
||||
project_key="sessions.project_id")
|
||||
pg_sub_query30_session.append("sessions.start_ts >= %(startDate30)s")
|
||||
pg_sub_query30_session.append("sessions.start_ts <= %(endDate30)s")
|
||||
pg_sub_query30_session.append("error_id = %(error_id)s")
|
||||
pg_sub_query30_err = errors.__get_basic_constraints(time_constraint=True, chart=False,
|
||||
startTime_arg_name="startDate30",
|
||||
endTime_arg_name="endDate30", project_key="errors.project_id")
|
||||
pg_sub_query30_err = errors_helper.__get_basic_constraints(time_constraint=True, chart=False,
|
||||
startTime_arg_name="startDate30",
|
||||
endTime_arg_name="endDate30",
|
||||
project_key="errors.project_id")
|
||||
pg_sub_query30_err.append("sessions.project_id = %(project_id)s")
|
||||
pg_sub_query30_err.append("sessions.start_ts >= %(startDate30)s")
|
||||
pg_sub_query30_err.append("sessions.start_ts <= %(endDate30)s")
|
||||
pg_sub_query30_err.append("error_id = %(error_id)s")
|
||||
pg_sub_query30_err.append("source ='js_exception'")
|
||||
pg_sub_query30 = errors.__get_basic_constraints(time_constraint=False, chart=True, step_size_name="step_size30")
|
||||
pg_sub_query30 = errors_helper.__get_basic_constraints(time_constraint=False, chart=True,
|
||||
step_size_name="step_size30")
|
||||
pg_sub_query30.append("error_id = %(error_id)s")
|
||||
pg_basic_query = errors.__get_basic_constraints(time_constraint=False)
|
||||
pg_basic_query = errors_helper.__get_basic_constraints(time_constraint=False)
|
||||
pg_basic_query.append("error_id = %(error_id)s")
|
||||
with pg_client.PostgresClient() as cur:
|
||||
data["startDate24"] = TimeUTC.now(-1)
|
||||
|
|
@ -95,8 +98,7 @@ def get_details(project_id, error_id, user_id, **data):
|
|||
device_partition,
|
||||
country_partition,
|
||||
chart24,
|
||||
chart30,
|
||||
custom_tags
|
||||
chart30
|
||||
FROM (SELECT error_id,
|
||||
name,
|
||||
message,
|
||||
|
|
@ -111,15 +113,8 @@ def get_details(project_id, error_id, user_id, **data):
|
|||
MIN(timestamp) AS first_occurrence
|
||||
FROM events.errors
|
||||
WHERE error_id = %(error_id)s) AS time_details ON (TRUE)
|
||||
INNER JOIN (SELECT session_id AS last_session_id,
|
||||
coalesce(custom_tags, '[]')::jsonb AS custom_tags
|
||||
INNER JOIN (SELECT session_id AS last_session_id
|
||||
FROM events.errors
|
||||
LEFT JOIN LATERAL (
|
||||
SELECT jsonb_agg(jsonb_build_object(errors_tags.key, errors_tags.value)) AS custom_tags
|
||||
FROM errors_tags
|
||||
WHERE errors_tags.error_id = %(error_id)s
|
||||
AND errors_tags.session_id = errors.session_id
|
||||
AND errors_tags.message_id = errors.message_id) AS errors_tags ON (TRUE)
|
||||
WHERE error_id = %(error_id)s
|
||||
ORDER BY errors.timestamp DESC
|
||||
LIMIT 1) AS last_session_details ON (TRUE)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
import json
|
||||
from typing import Optional, List
|
||||
from typing import List
|
||||
|
||||
import schemas
|
||||
from chalicelib.core.errors.modules import errors_helper
|
||||
from chalicelib.core.sessions import sessions_search
|
||||
from chalicelib.core.sourcemaps import sourcemaps
|
||||
from chalicelib.utils import pg_client, helper
|
||||
|
|
@ -51,27 +52,6 @@ def get_batch(error_ids):
|
|||
return helper.list_to_camel_case(errors)
|
||||
|
||||
|
||||
def __get_basic_constraints(platform: Optional[schemas.PlatformType] = None, time_constraint: bool = True,
|
||||
startTime_arg_name: str = "startDate", endTime_arg_name: str = "endDate",
|
||||
chart: bool = False, step_size_name: str = "step_size",
|
||||
project_key: Optional[str] = "project_id"):
|
||||
if project_key is None:
|
||||
ch_sub_query = []
|
||||
else:
|
||||
ch_sub_query = [f"{project_key} =%(project_id)s"]
|
||||
if time_constraint:
|
||||
ch_sub_query += [f"timestamp >= %({startTime_arg_name})s",
|
||||
f"timestamp < %({endTime_arg_name})s"]
|
||||
if chart:
|
||||
ch_sub_query += [f"timestamp >= generated_timestamp",
|
||||
f"timestamp < generated_timestamp + %({step_size_name})s"]
|
||||
if platform == schemas.PlatformType.MOBILE:
|
||||
ch_sub_query.append("user_device_type = 'mobile'")
|
||||
elif platform == schemas.PlatformType.DESKTOP:
|
||||
ch_sub_query.append("user_device_type = 'desktop'")
|
||||
return ch_sub_query
|
||||
|
||||
|
||||
def __get_sort_key(key):
|
||||
return {
|
||||
schemas.ErrorSort.OCCURRENCE: "max_datetime",
|
||||
|
|
@ -80,7 +60,7 @@ def __get_sort_key(key):
|
|||
}.get(key, 'max_datetime')
|
||||
|
||||
|
||||
def search(data: schemas.SearchErrorsSchema, project_id, user_id):
|
||||
def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, user_id):
|
||||
empty_response = {
|
||||
'total': 0,
|
||||
'errors': []
|
||||
|
|
@ -90,12 +70,13 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id):
|
|||
for f in data.filters:
|
||||
if f.type == schemas.FilterType.PLATFORM and len(f.value) > 0:
|
||||
platform = f.value[0]
|
||||
pg_sub_query = __get_basic_constraints(platform, project_key="sessions.project_id")
|
||||
pg_sub_query = errors_helper.__get_basic_constraints(platform, project_key="sessions.project_id")
|
||||
pg_sub_query += ["sessions.start_ts>=%(startDate)s", "sessions.start_ts<%(endDate)s", "source ='js_exception'",
|
||||
"pe.project_id=%(project_id)s"]
|
||||
# To ignore Script error
|
||||
pg_sub_query.append("pe.message!='Script error.'")
|
||||
pg_sub_query_chart = __get_basic_constraints(platform, time_constraint=False, chart=True, project_key=None)
|
||||
pg_sub_query_chart = errors_helper.__get_basic_constraints(platform, time_constraint=False, chart=True,
|
||||
project_key=None)
|
||||
if platform:
|
||||
pg_sub_query_chart += ["start_ts>=%(startDate)s", "start_ts<%(endDate)s", "project_id=%(project_id)s"]
|
||||
pg_sub_query_chart.append("errors.error_id =details.error_id")
|
||||
|
|
@ -107,7 +88,7 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id):
|
|||
data.endTimestamp = TimeUTC.now(1)
|
||||
if len(data.events) > 0 or len(data.filters) > 0:
|
||||
print("-- searching for sessions before errors")
|
||||
statuses = sessions_search.search_sessions(data=data, project_id=project_id, user_id=user_id, errors_only=True,
|
||||
statuses = sessions_search.search_sessions(data=data, project=project, user_id=user_id, errors_only=True,
|
||||
error_status=data.status)
|
||||
if len(statuses) == 0:
|
||||
return empty_response
|
||||
|
|
@ -125,7 +106,7 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id):
|
|||
params = {
|
||||
"startDate": data.startTimestamp,
|
||||
"endDate": data.endTimestamp,
|
||||
"project_id": project_id,
|
||||
"project_id": project.project_id,
|
||||
"userId": user_id,
|
||||
"step_size": step_size}
|
||||
if data.status != schemas.ErrorStatus.ALL:
|
||||
|
|
@ -207,7 +188,7 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id):
|
|||
"""SELECT error_id
|
||||
FROM public.errors
|
||||
WHERE project_id = %(project_id)s AND error_id IN %(error_ids)s;""",
|
||||
{"project_id": project_id, "error_ids": tuple([r["error_id"] for r in rows]),
|
||||
{"project_id": project.project_id, "error_ids": tuple([r["error_id"] for r in rows]),
|
||||
"user_id": user_id})
|
||||
cur.execute(query=query)
|
||||
statuses = helper.list_to_camel_case(cur.fetchall())
|
||||
|
|
@ -3,8 +3,9 @@ import logging
|
|||
from decouple import config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
from . import helper as errors_helper
|
||||
|
||||
if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
|
||||
from chalicelib.core.sessions import sessions_ch as sessions
|
||||
import chalicelib.core.sessions.sessions_ch as sessions
|
||||
else:
|
||||
from chalicelib.core.sessions import sessions
|
||||
import chalicelib.core.sessions.sessions_pg as sessions
|
||||
|
|
|
|||
58
api/chalicelib/core/errors/modules/helper.py
Normal file
58
api/chalicelib/core/errors/modules/helper.py
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
from typing import Optional
|
||||
|
||||
import schemas
|
||||
from chalicelib.core.sourcemaps import sourcemaps
|
||||
|
||||
|
||||
def __get_basic_constraints(platform: Optional[schemas.PlatformType] = None, time_constraint: bool = True,
|
||||
startTime_arg_name: str = "startDate", endTime_arg_name: str = "endDate",
|
||||
chart: bool = False, step_size_name: str = "step_size",
|
||||
project_key: Optional[str] = "project_id"):
|
||||
if project_key is None:
|
||||
ch_sub_query = []
|
||||
else:
|
||||
ch_sub_query = [f"{project_key} =%(project_id)s"]
|
||||
if time_constraint:
|
||||
ch_sub_query += [f"timestamp >= %({startTime_arg_name})s",
|
||||
f"timestamp < %({endTime_arg_name})s"]
|
||||
if chart:
|
||||
ch_sub_query += [f"timestamp >= generated_timestamp",
|
||||
f"timestamp < generated_timestamp + %({step_size_name})s"]
|
||||
if platform == schemas.PlatformType.MOBILE:
|
||||
ch_sub_query.append("user_device_type = 'mobile'")
|
||||
elif platform == schemas.PlatformType.DESKTOP:
|
||||
ch_sub_query.append("user_device_type = 'desktop'")
|
||||
return ch_sub_query
|
||||
|
||||
|
||||
def __get_basic_constraints_ch(platform=None, time_constraint=True, startTime_arg_name="startDate",
|
||||
endTime_arg_name="endDate", type_condition=True, project_key="project_id",
|
||||
table_name=None):
|
||||
ch_sub_query = [f"{project_key} =toUInt16(%(project_id)s)"]
|
||||
if table_name is not None:
|
||||
table_name = table_name + "."
|
||||
else:
|
||||
table_name = ""
|
||||
if type_condition:
|
||||
ch_sub_query.append(f"{table_name}`$event_name`='ERROR'")
|
||||
if time_constraint:
|
||||
ch_sub_query += [f"{table_name}datetime >= toDateTime(%({startTime_arg_name})s/1000)",
|
||||
f"{table_name}datetime < toDateTime(%({endTime_arg_name})s/1000)"]
|
||||
if platform == schemas.PlatformType.MOBILE:
|
||||
ch_sub_query.append("user_device_type = 'mobile'")
|
||||
elif platform == schemas.PlatformType.DESKTOP:
|
||||
ch_sub_query.append("user_device_type = 'desktop'")
|
||||
return ch_sub_query
|
||||
|
||||
|
||||
def format_first_stack_frame(error):
|
||||
error["stack"] = sourcemaps.format_payload(error.pop("payload"), truncate_to_first=True)
|
||||
for s in error["stack"]:
|
||||
for c in s.get("context", []):
|
||||
for sci, sc in enumerate(c):
|
||||
if isinstance(sc, str) and len(sc) > 1000:
|
||||
c[sci] = sc[:1000]
|
||||
# convert bytes to string:
|
||||
if isinstance(s["filename"], bytes):
|
||||
s["filename"] = s["filename"].decode("utf-8")
|
||||
return error
|
||||
|
|
@ -1,8 +1,9 @@
|
|||
from functools import cache
|
||||
from typing import Optional
|
||||
|
||||
import schemas
|
||||
from chalicelib.core.autocomplete import autocomplete
|
||||
from chalicelib.core import issues
|
||||
from chalicelib.core.autocomplete import autocomplete
|
||||
from chalicelib.core.sessions import sessions_metas
|
||||
from chalicelib.utils import pg_client, helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
|
@ -137,52 +138,57 @@ class EventType:
|
|||
column=None) # column=None because errors are searched by name or message
|
||||
|
||||
|
||||
SUPPORTED_TYPES = {
|
||||
EventType.CLICK.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK),
|
||||
query=autocomplete.__generic_query(typename=EventType.CLICK.ui_type)),
|
||||
EventType.INPUT.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT),
|
||||
query=autocomplete.__generic_query(typename=EventType.INPUT.ui_type)),
|
||||
EventType.LOCATION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.LOCATION),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.LOCATION.ui_type)),
|
||||
EventType.CUSTOM.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CUSTOM),
|
||||
query=autocomplete.__generic_query(typename=EventType.CUSTOM.ui_type)),
|
||||
EventType.REQUEST.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.REQUEST),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.REQUEST.ui_type)),
|
||||
EventType.GRAPHQL.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.GRAPHQL),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.GRAPHQL.ui_type)),
|
||||
EventType.STATEACTION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.STATEACTION),
|
||||
@cache
|
||||
def supported_types():
|
||||
return {
|
||||
EventType.CLICK.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK),
|
||||
query=autocomplete.__generic_query(typename=EventType.CLICK.ui_type)),
|
||||
EventType.INPUT.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT),
|
||||
query=autocomplete.__generic_query(typename=EventType.INPUT.ui_type)),
|
||||
EventType.LOCATION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.LOCATION),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.LOCATION.ui_type)),
|
||||
EventType.CUSTOM.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CUSTOM),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.CUSTOM.ui_type)),
|
||||
EventType.REQUEST.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.REQUEST),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.STATEACTION.ui_type)),
|
||||
EventType.TAG.ui_type: SupportedFilter(get=_search_tags, query=None),
|
||||
EventType.ERROR.ui_type: SupportedFilter(get=autocomplete.__search_errors,
|
||||
query=None),
|
||||
EventType.METADATA.ui_type: SupportedFilter(get=autocomplete.__search_metadata,
|
||||
query=None),
|
||||
# MOBILE
|
||||
EventType.CLICK_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.CLICK_MOBILE.ui_type)),
|
||||
EventType.SWIPE_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.SWIPE_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.SWIPE_MOBILE.ui_type)),
|
||||
EventType.INPUT_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.INPUT_MOBILE.ui_type)),
|
||||
EventType.VIEW_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.VIEW_MOBILE),
|
||||
typename=EventType.REQUEST.ui_type)),
|
||||
EventType.GRAPHQL.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.GRAPHQL),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.VIEW_MOBILE.ui_type)),
|
||||
EventType.CUSTOM_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CUSTOM_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.CUSTOM_MOBILE.ui_type)),
|
||||
EventType.REQUEST_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.REQUEST_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.REQUEST_MOBILE.ui_type)),
|
||||
EventType.CRASH_MOBILE.ui_type: SupportedFilter(get=autocomplete.__search_errors_mobile,
|
||||
typename=EventType.GRAPHQL.ui_type)),
|
||||
EventType.STATEACTION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.STATEACTION),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.STATEACTION.ui_type)),
|
||||
EventType.TAG.ui_type: SupportedFilter(get=_search_tags, query=None),
|
||||
EventType.ERROR.ui_type: SupportedFilter(get=autocomplete.__search_errors,
|
||||
query=None),
|
||||
EventType.METADATA.ui_type: SupportedFilter(get=autocomplete.__search_metadata,
|
||||
query=None),
|
||||
}
|
||||
# MOBILE
|
||||
EventType.CLICK_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.CLICK_MOBILE.ui_type)),
|
||||
EventType.SWIPE_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.SWIPE_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.SWIPE_MOBILE.ui_type)),
|
||||
EventType.INPUT_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.INPUT_MOBILE.ui_type)),
|
||||
EventType.VIEW_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.VIEW_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.VIEW_MOBILE.ui_type)),
|
||||
EventType.CUSTOM_MOBILE.ui_type: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete(EventType.CUSTOM_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.CUSTOM_MOBILE.ui_type)),
|
||||
EventType.REQUEST_MOBILE.ui_type: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete(EventType.REQUEST_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.REQUEST_MOBILE.ui_type)),
|
||||
EventType.CRASH_MOBILE.ui_type: SupportedFilter(get=autocomplete.__search_errors_mobile,
|
||||
query=None),
|
||||
}
|
||||
|
||||
|
||||
def get_errors_by_session_id(session_id, project_id):
|
||||
|
|
@ -202,20 +208,17 @@ def search(text, event_type, project_id, source, key):
|
|||
if not event_type:
|
||||
return {"data": autocomplete.__get_autocomplete_table(text, project_id)}
|
||||
|
||||
if event_type in SUPPORTED_TYPES.keys():
|
||||
rows = SUPPORTED_TYPES[event_type].get(project_id=project_id, value=text, key=key, source=source)
|
||||
# for IOS events autocomplete
|
||||
# if event_type + "_IOS" in SUPPORTED_TYPES.keys():
|
||||
# rows += SUPPORTED_TYPES[event_type + "_IOS"].get(project_id=project_id, value=text, key=key,source=source)
|
||||
elif event_type + "_MOBILE" in SUPPORTED_TYPES.keys():
|
||||
rows = SUPPORTED_TYPES[event_type + "_MOBILE"].get(project_id=project_id, value=text, key=key, source=source)
|
||||
elif event_type in sessions_metas.SUPPORTED_TYPES.keys():
|
||||
if event_type in supported_types().keys():
|
||||
rows = supported_types()[event_type].get(project_id=project_id, value=text, key=key, source=source)
|
||||
elif event_type + "_MOBILE" in supported_types().keys():
|
||||
rows = supported_types()[event_type + "_MOBILE"].get(project_id=project_id, value=text, key=key, source=source)
|
||||
elif event_type in sessions_metas.supported_types().keys():
|
||||
return sessions_metas.search(text, event_type, project_id)
|
||||
elif event_type.endswith("_IOS") \
|
||||
and event_type[:-len("_IOS")] in sessions_metas.SUPPORTED_TYPES.keys():
|
||||
and event_type[:-len("_IOS")] in sessions_metas.supported_types().keys():
|
||||
return sessions_metas.search(text, event_type, project_id)
|
||||
elif event_type.endswith("_MOBILE") \
|
||||
and event_type[:-len("_MOBILE")] in sessions_metas.SUPPORTED_TYPES.keys():
|
||||
and event_type[:-len("_MOBILE")] in sessions_metas.supported_types().keys():
|
||||
return sessions_metas.search(text, event_type, project_id)
|
||||
else:
|
||||
return {"errors": ["unsupported event"]}
|
||||
|
|
|
|||
|
|
@ -27,7 +27,6 @@ HEALTH_ENDPOINTS = {
|
|||
"http": app_connection_string("http-openreplay", 8888, "metrics"),
|
||||
"ingress-nginx": app_connection_string("ingress-nginx-openreplay", 80, "healthz"),
|
||||
"integrations": app_connection_string("integrations-openreplay", 8888, "metrics"),
|
||||
"peers": app_connection_string("peers-openreplay", 8888, "health"),
|
||||
"sink": app_connection_string("sink-openreplay", 8888, "metrics"),
|
||||
"sourcemapreader": app_connection_string(
|
||||
"sourcemapreader-openreplay", 8888, "health"
|
||||
|
|
@ -39,9 +38,7 @@ HEALTH_ENDPOINTS = {
|
|||
def __check_database_pg(*_):
|
||||
fail_response = {
|
||||
"health": False,
|
||||
"details": {
|
||||
"errors": ["Postgres health-check failed"]
|
||||
}
|
||||
"details": {"errors": ["Postgres health-check failed"]},
|
||||
}
|
||||
with pg_client.PostgresClient() as cur:
|
||||
try:
|
||||
|
|
@ -63,29 +60,26 @@ def __check_database_pg(*_):
|
|||
"details": {
|
||||
# "version": server_version["server_version"],
|
||||
# "schema": schema_version["version"]
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def __always_healthy(*_):
|
||||
return {
|
||||
"health": True,
|
||||
"details": {}
|
||||
}
|
||||
return {"health": True, "details": {}}
|
||||
|
||||
|
||||
def __check_be_service(service_name):
|
||||
def fn(*_):
|
||||
fail_response = {
|
||||
"health": False,
|
||||
"details": {
|
||||
"errors": ["server health-check failed"]
|
||||
}
|
||||
"details": {"errors": ["server health-check failed"]},
|
||||
}
|
||||
try:
|
||||
results = requests.get(HEALTH_ENDPOINTS.get(service_name), timeout=2)
|
||||
if results.status_code != 200:
|
||||
logger.error(f"!! issue with the {service_name}-health code:{results.status_code}")
|
||||
logger.error(
|
||||
f"!! issue with the {service_name}-health code:{results.status_code}"
|
||||
)
|
||||
logger.error(results.text)
|
||||
# fail_response["details"]["errors"].append(results.text)
|
||||
return fail_response
|
||||
|
|
@ -103,10 +97,7 @@ def __check_be_service(service_name):
|
|||
logger.error("couldn't get response")
|
||||
# fail_response["details"]["errors"].append(str(e))
|
||||
return fail_response
|
||||
return {
|
||||
"health": True,
|
||||
"details": {}
|
||||
}
|
||||
return {"health": True, "details": {}}
|
||||
|
||||
return fn
|
||||
|
||||
|
|
@ -114,7 +105,7 @@ def __check_be_service(service_name):
|
|||
def __check_redis(*_):
|
||||
fail_response = {
|
||||
"health": False,
|
||||
"details": {"errors": ["server health-check failed"]}
|
||||
"details": {"errors": ["server health-check failed"]},
|
||||
}
|
||||
if config("REDIS_STRING", default=None) is None:
|
||||
# fail_response["details"]["errors"].append("REDIS_STRING not defined in env-vars")
|
||||
|
|
@ -133,16 +124,14 @@ def __check_redis(*_):
|
|||
"health": True,
|
||||
"details": {
|
||||
# "version": r.execute_command('INFO')['redis_version']
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def __check_SSL(*_):
|
||||
fail_response = {
|
||||
"health": False,
|
||||
"details": {
|
||||
"errors": ["SSL Certificate health-check failed"]
|
||||
}
|
||||
"details": {"errors": ["SSL Certificate health-check failed"]},
|
||||
}
|
||||
try:
|
||||
requests.get(config("SITE_URL"), verify=True, allow_redirects=True)
|
||||
|
|
@ -150,36 +139,28 @@ def __check_SSL(*_):
|
|||
logger.error("!! health failed: SSL Certificate")
|
||||
logger.exception(e)
|
||||
return fail_response
|
||||
return {
|
||||
"health": True,
|
||||
"details": {}
|
||||
}
|
||||
return {"health": True, "details": {}}
|
||||
|
||||
|
||||
def __get_sessions_stats(*_):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
constraints = ["projects.deleted_at IS NULL"]
|
||||
query = cur.mogrify(f"""SELECT COALESCE(SUM(sessions_count),0) AS s_c,
|
||||
query = cur.mogrify(
|
||||
f"""SELECT COALESCE(SUM(sessions_count),0) AS s_c,
|
||||
COALESCE(SUM(events_count),0) AS e_c
|
||||
FROM public.projects_stats
|
||||
INNER JOIN public.projects USING(project_id)
|
||||
WHERE {" AND ".join(constraints)};""")
|
||||
WHERE {" AND ".join(constraints)};"""
|
||||
)
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
return {
|
||||
"numberOfSessionsCaptured": row["s_c"],
|
||||
"numberOfEventCaptured": row["e_c"]
|
||||
}
|
||||
return {"numberOfSessionsCaptured": row["s_c"], "numberOfEventCaptured": row["e_c"]}
|
||||
|
||||
|
||||
def get_health(tenant_id=None):
|
||||
health_map = {
|
||||
"databases": {
|
||||
"postgres": __check_database_pg
|
||||
},
|
||||
"ingestionPipeline": {
|
||||
"redis": __check_redis
|
||||
},
|
||||
"databases": {"postgres": __check_database_pg},
|
||||
"ingestionPipeline": {"redis": __check_redis},
|
||||
"backendServices": {
|
||||
"alerts": __check_be_service("alerts"),
|
||||
"assets": __check_be_service("assets"),
|
||||
|
|
@ -192,13 +173,12 @@ def get_health(tenant_id=None):
|
|||
"http": __check_be_service("http"),
|
||||
"ingress-nginx": __always_healthy,
|
||||
"integrations": __check_be_service("integrations"),
|
||||
"peers": __check_be_service("peers"),
|
||||
"sink": __check_be_service("sink"),
|
||||
"sourcemapreader": __check_be_service("sourcemapreader"),
|
||||
"storage": __check_be_service("storage")
|
||||
"storage": __check_be_service("storage"),
|
||||
},
|
||||
"details": __get_sessions_stats,
|
||||
"ssl": __check_SSL
|
||||
"ssl": __check_SSL,
|
||||
}
|
||||
return __process_health(health_map=health_map)
|
||||
|
||||
|
|
@ -210,10 +190,16 @@ def __process_health(health_map):
|
|||
response.pop(parent_key)
|
||||
elif isinstance(health_map[parent_key], dict):
|
||||
for element_key in health_map[parent_key]:
|
||||
if config(f"SKIP_H_{parent_key.upper()}_{element_key.upper()}", cast=bool, default=False):
|
||||
if config(
|
||||
f"SKIP_H_{parent_key.upper()}_{element_key.upper()}",
|
||||
cast=bool,
|
||||
default=False,
|
||||
):
|
||||
response[parent_key].pop(element_key)
|
||||
else:
|
||||
response[parent_key][element_key] = health_map[parent_key][element_key]()
|
||||
response[parent_key][element_key] = health_map[parent_key][
|
||||
element_key
|
||||
]()
|
||||
else:
|
||||
response[parent_key] = health_map[parent_key]()
|
||||
return response
|
||||
|
|
@ -221,7 +207,8 @@ def __process_health(health_map):
|
|||
|
||||
def cron():
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify("""SELECT projects.project_id,
|
||||
query = cur.mogrify(
|
||||
"""SELECT projects.project_id,
|
||||
projects.created_at,
|
||||
projects.sessions_last_check_at,
|
||||
projects.first_recorded_session_at,
|
||||
|
|
@ -229,7 +216,8 @@ def cron():
|
|||
FROM public.projects
|
||||
LEFT JOIN public.projects_stats USING (project_id)
|
||||
WHERE projects.deleted_at IS NULL
|
||||
ORDER BY project_id;""")
|
||||
ORDER BY project_id;"""
|
||||
)
|
||||
cur.execute(query)
|
||||
rows = cur.fetchall()
|
||||
for r in rows:
|
||||
|
|
@ -250,20 +238,24 @@ def cron():
|
|||
count_start_from = r["last_update_at"]
|
||||
|
||||
count_start_from = TimeUTC.datetime_to_timestamp(count_start_from)
|
||||
params = {"project_id": r["project_id"],
|
||||
"start_ts": count_start_from,
|
||||
"end_ts": TimeUTC.now(),
|
||||
"sessions_count": 0,
|
||||
"events_count": 0}
|
||||
params = {
|
||||
"project_id": r["project_id"],
|
||||
"start_ts": count_start_from,
|
||||
"end_ts": TimeUTC.now(),
|
||||
"sessions_count": 0,
|
||||
"events_count": 0,
|
||||
}
|
||||
|
||||
query = cur.mogrify("""SELECT COUNT(1) AS sessions_count,
|
||||
query = cur.mogrify(
|
||||
"""SELECT COUNT(1) AS sessions_count,
|
||||
COALESCE(SUM(events_count),0) AS events_count
|
||||
FROM public.sessions
|
||||
WHERE project_id=%(project_id)s
|
||||
AND start_ts>=%(start_ts)s
|
||||
AND start_ts<=%(end_ts)s
|
||||
AND duration IS NOT NULL;""",
|
||||
params)
|
||||
params,
|
||||
)
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
if row is not None:
|
||||
|
|
@ -271,56 +263,68 @@ def cron():
|
|||
params["events_count"] = row["events_count"]
|
||||
|
||||
if insert:
|
||||
query = cur.mogrify("""INSERT INTO public.projects_stats(project_id, sessions_count, events_count, last_update_at)
|
||||
query = cur.mogrify(
|
||||
"""INSERT INTO public.projects_stats(project_id, sessions_count, events_count, last_update_at)
|
||||
VALUES (%(project_id)s, %(sessions_count)s, %(events_count)s, (now() AT TIME ZONE 'utc'::text));""",
|
||||
params)
|
||||
params,
|
||||
)
|
||||
else:
|
||||
query = cur.mogrify("""UPDATE public.projects_stats
|
||||
query = cur.mogrify(
|
||||
"""UPDATE public.projects_stats
|
||||
SET sessions_count=sessions_count+%(sessions_count)s,
|
||||
events_count=events_count+%(events_count)s,
|
||||
last_update_at=(now() AT TIME ZONE 'utc'::text)
|
||||
WHERE project_id=%(project_id)s;""",
|
||||
params)
|
||||
params,
|
||||
)
|
||||
cur.execute(query)
|
||||
|
||||
|
||||
# this cron is used to correct the sessions&events count every week
|
||||
def weekly_cron():
|
||||
with pg_client.PostgresClient(long_query=True) as cur:
|
||||
query = cur.mogrify("""SELECT project_id,
|
||||
query = cur.mogrify(
|
||||
"""SELECT project_id,
|
||||
projects_stats.last_update_at
|
||||
FROM public.projects
|
||||
LEFT JOIN public.projects_stats USING (project_id)
|
||||
WHERE projects.deleted_at IS NULL
|
||||
ORDER BY project_id;""")
|
||||
ORDER BY project_id;"""
|
||||
)
|
||||
cur.execute(query)
|
||||
rows = cur.fetchall()
|
||||
for r in rows:
|
||||
if r["last_update_at"] is None:
|
||||
continue
|
||||
|
||||
params = {"project_id": r["project_id"],
|
||||
"end_ts": TimeUTC.now(),
|
||||
"sessions_count": 0,
|
||||
"events_count": 0}
|
||||
params = {
|
||||
"project_id": r["project_id"],
|
||||
"end_ts": TimeUTC.now(),
|
||||
"sessions_count": 0,
|
||||
"events_count": 0,
|
||||
}
|
||||
|
||||
query = cur.mogrify("""SELECT COUNT(1) AS sessions_count,
|
||||
query = cur.mogrify(
|
||||
"""SELECT COUNT(1) AS sessions_count,
|
||||
COALESCE(SUM(events_count),0) AS events_count
|
||||
FROM public.sessions
|
||||
WHERE project_id=%(project_id)s
|
||||
AND start_ts<=%(end_ts)s
|
||||
AND duration IS NOT NULL;""",
|
||||
params)
|
||||
params,
|
||||
)
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
if row is not None:
|
||||
params["sessions_count"] = row["sessions_count"]
|
||||
params["events_count"] = row["events_count"]
|
||||
|
||||
query = cur.mogrify("""UPDATE public.projects_stats
|
||||
query = cur.mogrify(
|
||||
"""UPDATE public.projects_stats
|
||||
SET sessions_count=%(sessions_count)s,
|
||||
events_count=%(events_count)s,
|
||||
last_update_at=(now() AT TIME ZONE 'utc'::text)
|
||||
WHERE project_id=%(project_id)s;""",
|
||||
params)
|
||||
params,
|
||||
)
|
||||
cur.execute(query)
|
||||
|
|
|
|||
|
|
@ -50,8 +50,8 @@ class JIRAIntegration(base.BaseIntegration):
|
|||
cur.execute(
|
||||
cur.mogrify(
|
||||
"""SELECT username, token, url
|
||||
FROM public.jira_cloud
|
||||
WHERE user_id=%(user_id)s;""",
|
||||
FROM public.jira_cloud
|
||||
WHERE user_id = %(user_id)s;""",
|
||||
{"user_id": self._user_id})
|
||||
)
|
||||
data = helper.dict_to_camel_case(cur.fetchone())
|
||||
|
|
@ -95,10 +95,9 @@ class JIRAIntegration(base.BaseIntegration):
|
|||
def add(self, username, token, url, obfuscate=False):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
INSERT INTO public.jira_cloud(username, token, user_id,url)
|
||||
VALUES (%(username)s, %(token)s, %(user_id)s,%(url)s)
|
||||
RETURNING username, token, url;""",
|
||||
cur.mogrify(""" \
|
||||
INSERT INTO public.jira_cloud(username, token, user_id, url)
|
||||
VALUES (%(username)s, %(token)s, %(user_id)s, %(url)s) RETURNING username, token, url;""",
|
||||
{"user_id": self._user_id, "username": username,
|
||||
"token": token, "url": url})
|
||||
)
|
||||
|
|
@ -112,9 +111,10 @@ class JIRAIntegration(base.BaseIntegration):
|
|||
def delete(self):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
DELETE FROM public.jira_cloud
|
||||
WHERE user_id=%(user_id)s;""",
|
||||
cur.mogrify(""" \
|
||||
DELETE
|
||||
FROM public.jira_cloud
|
||||
WHERE user_id = %(user_id)s;""",
|
||||
{"user_id": self._user_id})
|
||||
)
|
||||
return {"state": "success"}
|
||||
|
|
@ -125,7 +125,7 @@ class JIRAIntegration(base.BaseIntegration):
|
|||
changes={
|
||||
"username": data.username,
|
||||
"token": data.token if len(data.token) > 0 and data.token.find("***") == -1 \
|
||||
else self.integration.token,
|
||||
else self.integration["token"],
|
||||
"url": str(data.url)
|
||||
},
|
||||
obfuscate=True
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
from chalicelib.core import log_tools
|
||||
import requests
|
||||
|
||||
from chalicelib.core.log_tools import log_tools
|
||||
from schemas import schemas
|
||||
|
||||
IN_TY = "bugsnag"
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
import boto3
|
||||
from chalicelib.core import log_tools
|
||||
from chalicelib.core.log_tools import log_tools
|
||||
from schemas import schemas
|
||||
|
||||
IN_TY = "cloudwatch"
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
from chalicelib.core import log_tools
|
||||
from chalicelib.core.log_tools import log_tools
|
||||
from schemas import schemas
|
||||
|
||||
IN_TY = "datadog"
|
||||
|
|
|
|||
|
|
@ -1,8 +1,7 @@
|
|||
import logging
|
||||
|
||||
from chalicelib.core.log_tools import log_tools
|
||||
from elasticsearch import Elasticsearch
|
||||
|
||||
from chalicelib.core import log_tools
|
||||
from schemas import schemas
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
from chalicelib.utils import pg_client, helper
|
||||
import json
|
||||
|
||||
from chalicelib.core.modules import TENANT_CONDITION
|
||||
from chalicelib.utils import pg_client, helper
|
||||
|
||||
EXCEPT = ["jira_server", "jira_cloud"]
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
from chalicelib.core import log_tools
|
||||
from chalicelib.core.log_tools import log_tools
|
||||
from schemas import schemas
|
||||
|
||||
IN_TY = "newrelic"
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
from chalicelib.core import log_tools
|
||||
from chalicelib.core.log_tools import log_tools
|
||||
from schemas import schemas
|
||||
|
||||
IN_TY = "rollbar"
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
import requests
|
||||
from chalicelib.core import log_tools
|
||||
from chalicelib.core.log_tools import log_tools
|
||||
from schemas import schemas
|
||||
|
||||
IN_TY = "sentry"
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
from chalicelib.core import log_tools
|
||||
from chalicelib.core.log_tools import log_tools
|
||||
from schemas import schemas
|
||||
|
||||
IN_TY = "stackdriver"
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
from chalicelib.core import log_tools
|
||||
from chalicelib.core.log_tools import log_tools
|
||||
from schemas import schemas
|
||||
|
||||
IN_TY = "sumologic"
|
||||
|
|
|
|||
|
|
@ -98,17 +98,23 @@ def __edit(project_id, col_index, colname, new_name):
|
|||
if col_index not in list(old_metas.keys()):
|
||||
return {"errors": ["custom field not found"]}
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
if old_metas[col_index]["key"] != new_name:
|
||||
if old_metas[col_index]["key"] != new_name:
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(f"""UPDATE public.projects
|
||||
SET {colname} = %(value)s
|
||||
WHERE project_id = %(project_id)s
|
||||
AND deleted_at ISNULL
|
||||
RETURNING {colname};""",
|
||||
RETURNING {colname},
|
||||
(SELECT {colname} FROM projects WHERE project_id = %(project_id)s) AS old_{colname};""",
|
||||
{"project_id": project_id, "value": new_name})
|
||||
cur.execute(query=query)
|
||||
new_name = cur.fetchone()[colname]
|
||||
row = cur.fetchone()
|
||||
new_name = row[colname]
|
||||
old_name = row['old_' + colname]
|
||||
old_metas[col_index]["key"] = new_name
|
||||
projects.rename_metadata_condition(project_id=project_id,
|
||||
old_metadata_key=old_name,
|
||||
new_metadata_key=new_name)
|
||||
return {"data": old_metas[col_index]}
|
||||
|
||||
|
||||
|
|
@ -121,8 +127,8 @@ def edit(tenant_id, project_id, index: int, new_name: str):
|
|||
def delete(tenant_id, project_id, index: int):
|
||||
index = int(index)
|
||||
old_segments = get(project_id)
|
||||
old_segments = [k["index"] for k in old_segments]
|
||||
if index not in old_segments:
|
||||
old_indexes = [k["index"] for k in old_segments]
|
||||
if index not in old_indexes:
|
||||
return {"errors": ["custom field not found"]}
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
|
|
@ -132,7 +138,8 @@ def delete(tenant_id, project_id, index: int):
|
|||
WHERE project_id = %(project_id)s AND deleted_at ISNULL;""",
|
||||
{"project_id": project_id})
|
||||
cur.execute(query=query)
|
||||
|
||||
projects.delete_metadata_condition(project_id=project_id,
|
||||
metadata_key=old_segments[old_indexes.index(index)]["key"])
|
||||
return {"data": get(project_id)}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -6,10 +6,5 @@ logger = logging.getLogger(__name__)
|
|||
|
||||
if config("EXP_METRICS", cast=bool, default=False):
|
||||
logger.info(">>> Using experimental metrics")
|
||||
from chalicelib.core.metrics import heatmaps_ch as heatmaps
|
||||
from chalicelib.core.metrics import metrics_ch as metrics
|
||||
from chalicelib.core.metrics import product_analytics_ch as product_analytics
|
||||
else:
|
||||
from chalicelib.core.metrics import heatmaps
|
||||
from chalicelib.core.metrics import metrics
|
||||
from chalicelib.core.metrics import product_analytics
|
||||
pass
|
||||
|
|
@ -6,7 +6,7 @@ from fastapi import HTTPException, status
|
|||
import schemas
|
||||
from chalicelib.core import issues
|
||||
from chalicelib.core.errors import errors
|
||||
from chalicelib.core.metrics import heatmaps, product_analytics, funnels, custom_metrics_predefined
|
||||
from chalicelib.core.metrics import heatmaps, product_analytics, funnels
|
||||
from chalicelib.core.sessions import sessions, sessions_search
|
||||
from chalicelib.utils import helper, pg_client
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
|
@ -42,7 +42,7 @@ def __get_errors_list(project: schemas.ProjectContext, user_id, data: schemas.Ca
|
|||
"total": 0,
|
||||
"errors": []
|
||||
}
|
||||
return errors.search(data.series[0].filter, project_id=project.project_id, user_id=user_id)
|
||||
return errors.search(data.series[0].filter, project=project, user_id=user_id)
|
||||
|
||||
|
||||
def __get_sessions_list(project: schemas.ProjectContext, user_id, data: schemas.CardSchema):
|
||||
|
|
@ -52,11 +52,11 @@ def __get_sessions_list(project: schemas.ProjectContext, user_id, data: schemas.
|
|||
"total": 0,
|
||||
"sessions": []
|
||||
}
|
||||
return sessions_search.search_sessions(data=data.series[0].filter, project_id=project.project_id, user_id=user_id)
|
||||
return sessions_search.search_sessions(data=data.series[0].filter, project=project, user_id=user_id)
|
||||
|
||||
|
||||
def __get_heat_map_chart(project: schemas.ProjectContext, user_id, data: schemas.CardHeatMap,
|
||||
include_mobs: bool = True):
|
||||
def get_heat_map_chart(project: schemas.ProjectContext, user_id, data: schemas.CardHeatMap,
|
||||
include_mobs: bool = True):
|
||||
if len(data.series) == 0:
|
||||
return None
|
||||
data.series[0].filter.filters += data.series[0].filter.events
|
||||
|
|
@ -153,33 +153,28 @@ def __get_table_chart(project: schemas.ProjectContext, data: schemas.CardTable,
|
|||
|
||||
|
||||
def get_chart(project: schemas.ProjectContext, data: schemas.CardSchema, user_id: int):
|
||||
if data.is_predefined:
|
||||
return custom_metrics_predefined.get_metric(key=data.metric_of,
|
||||
project_id=project.project_id,
|
||||
data=data.model_dump())
|
||||
|
||||
supported = {
|
||||
schemas.MetricType.TIMESERIES: __get_timeseries_chart,
|
||||
schemas.MetricType.TABLE: __get_table_chart,
|
||||
schemas.MetricType.HEAT_MAP: __get_heat_map_chart,
|
||||
schemas.MetricType.HEAT_MAP: get_heat_map_chart,
|
||||
schemas.MetricType.FUNNEL: __get_funnel_chart,
|
||||
schemas.MetricType.PATH_ANALYSIS: __get_path_analysis_chart
|
||||
}
|
||||
return supported.get(data.metric_type, not_supported)(project=project, data=data, user_id=user_id)
|
||||
|
||||
|
||||
def get_sessions_by_card_id(project_id, user_id, metric_id, data: schemas.CardSessionsSchema):
|
||||
if not card_exists(metric_id=metric_id, project_id=project_id, user_id=user_id):
|
||||
def get_sessions_by_card_id(project: schemas.ProjectContext, user_id, metric_id, data: schemas.CardSessionsSchema):
|
||||
if not card_exists(metric_id=metric_id, project_id=project.project_id, user_id=user_id):
|
||||
return None
|
||||
results = []
|
||||
for s in data.series:
|
||||
results.append({"seriesId": s.series_id, "seriesName": s.name,
|
||||
**sessions_search.search_sessions(data=s.filter, project_id=project_id, user_id=user_id)})
|
||||
**sessions_search.search_sessions(data=s.filter, project=project, user_id=user_id)})
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def get_sessions(project_id, user_id, data: schemas.CardSessionsSchema):
|
||||
def get_sessions(project: schemas.ProjectContext, user_id, data: schemas.CardSessionsSchema):
|
||||
results = []
|
||||
if len(data.series) == 0:
|
||||
return results
|
||||
|
|
@ -189,14 +184,12 @@ def get_sessions(project_id, user_id, data: schemas.CardSessionsSchema):
|
|||
s.filter = schemas.SessionsSearchPayloadSchema(**s.filter.model_dump(by_alias=True))
|
||||
|
||||
results.append({"seriesId": None, "seriesName": s.name,
|
||||
**sessions_search.search_sessions(data=s.filter, project_id=project_id, user_id=user_id)})
|
||||
**sessions_search.search_sessions(data=s.filter, project=project, user_id=user_id)})
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def get_issues(project: schemas.ProjectContext, user_id: int, data: schemas.CardSchema):
|
||||
if data.is_predefined:
|
||||
return not_supported()
|
||||
if data.metric_of == schemas.MetricOfTable.ISSUES:
|
||||
return __get_table_of_issues(project=project, user_id=user_id, data=data)
|
||||
supported = {
|
||||
|
|
@ -208,12 +201,12 @@ def get_issues(project: schemas.ProjectContext, user_id: int, data: schemas.Card
|
|||
return supported.get(data.metric_type, not_supported)()
|
||||
|
||||
|
||||
def __get_global_card_info(data: schemas.CardSchema):
|
||||
def get_global_card_info(data: schemas.CardSchema):
|
||||
r = {"hideExcess": data.hide_excess, "compareTo": data.compare_to, "rows": data.rows}
|
||||
return r
|
||||
|
||||
|
||||
def __get_path_analysis_card_info(data: schemas.CardPathAnalysis):
|
||||
def get_path_analysis_card_info(data: schemas.CardPathAnalysis):
|
||||
r = {"start_point": [s.model_dump() for s in data.start_point],
|
||||
"start_type": data.start_type,
|
||||
"excludes": [e.model_dump() for e in data.excludes],
|
||||
|
|
@ -228,8 +221,8 @@ def create_card(project: schemas.ProjectContext, user_id, data: schemas.CardSche
|
|||
if data.session_id is not None:
|
||||
session_data = {"sessionId": data.session_id}
|
||||
else:
|
||||
session_data = __get_heat_map_chart(project=project, user_id=user_id,
|
||||
data=data, include_mobs=False)
|
||||
session_data = get_heat_map_chart(project=project, user_id=user_id,
|
||||
data=data, include_mobs=False)
|
||||
if session_data is not None:
|
||||
session_data = {"sessionId": session_data["sessionId"]}
|
||||
|
||||
|
|
@ -242,9 +235,9 @@ def create_card(project: schemas.ProjectContext, user_id, data: schemas.CardSche
|
|||
series_len = len(data.series)
|
||||
params = {"user_id": user_id, "project_id": project.project_id, **data.model_dump(), **_data,
|
||||
"default_config": json.dumps(data.default_config.model_dump()), "card_info": None}
|
||||
params["card_info"] = __get_global_card_info(data=data)
|
||||
params["card_info"] = get_global_card_info(data=data)
|
||||
if data.metric_type == schemas.MetricType.PATH_ANALYSIS:
|
||||
params["card_info"] = {**params["card_info"], **__get_path_analysis_card_info(data=data)}
|
||||
params["card_info"] = {**params["card_info"], **get_path_analysis_card_info(data=data)}
|
||||
params["card_info"] = json.dumps(params["card_info"])
|
||||
|
||||
query = """INSERT INTO metrics (project_id, user_id, name, is_public,
|
||||
|
|
@ -306,9 +299,9 @@ def update_card(metric_id, user_id, project_id, data: schemas.CardSchema):
|
|||
d_series_ids.append(i)
|
||||
params["d_series_ids"] = tuple(d_series_ids)
|
||||
params["session_data"] = json.dumps(metric["data"])
|
||||
params["card_info"] = __get_global_card_info(data=data)
|
||||
params["card_info"] = get_global_card_info(data=data)
|
||||
if data.metric_type == schemas.MetricType.PATH_ANALYSIS:
|
||||
params["card_info"] = {**params["card_info"], **__get_path_analysis_card_info(data=data)}
|
||||
params["card_info"] = {**params["card_info"], **get_path_analysis_card_info(data=data)}
|
||||
elif data.metric_type == schemas.MetricType.HEAT_MAP:
|
||||
if data.session_id is not None:
|
||||
params["session_data"] = json.dumps({"sessionId": data.session_id})
|
||||
|
|
@ -359,6 +352,100 @@ def update_card(metric_id, user_id, project_id, data: schemas.CardSchema):
|
|||
return get_card(metric_id=metric_id, project_id=project_id, user_id=user_id)
|
||||
|
||||
|
||||
def search_metrics(project_id, user_id, data: schemas.MetricSearchSchema, include_series=False):
|
||||
constraints = ["metrics.project_id = %(project_id)s", "metrics.deleted_at ISNULL"]
|
||||
params = {
|
||||
"project_id": project_id,
|
||||
"user_id": user_id,
|
||||
"offset": (data.page - 1) * data.limit,
|
||||
"limit": data.limit,
|
||||
}
|
||||
if data.mine_only:
|
||||
constraints.append("user_id = %(user_id)s")
|
||||
else:
|
||||
constraints.append("(user_id = %(user_id)s OR metrics.is_public)")
|
||||
if data.shared_only:
|
||||
constraints.append("is_public")
|
||||
|
||||
if data.filter is not None:
|
||||
if data.filter.type:
|
||||
constraints.append("metrics.metric_type = %(filter_type)s")
|
||||
params["filter_type"] = data.filter.type
|
||||
if data.filter.query and len(data.filter.query) > 0:
|
||||
constraints.append("(metrics.name ILIKE %(filter_query)s OR owner.owner_name ILIKE %(filter_query)s)")
|
||||
params["filter_query"] = helper.values_for_operator(
|
||||
value=data.filter.query, op=schemas.SearchEventOperator.CONTAINS
|
||||
)
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
sub_join = ""
|
||||
if include_series:
|
||||
sub_join = """LEFT JOIN LATERAL (
|
||||
SELECT COALESCE(jsonb_agg(metric_series.* ORDER BY index),'[]'::jsonb) AS series
|
||||
FROM metric_series
|
||||
WHERE metric_series.metric_id = metrics.metric_id
|
||||
AND metric_series.deleted_at ISNULL
|
||||
) AS metric_series ON (TRUE)"""
|
||||
|
||||
sort_column = data.sort.field if data.sort.field is not None and len(data.sort.field) > 0 \
|
||||
else "created_at"
|
||||
# change ascend to asc and descend to desc
|
||||
sort_order = data.sort.order.value if hasattr(data.sort.order, "value") else data.sort.order
|
||||
if sort_order == "ascend":
|
||||
sort_order = "asc"
|
||||
elif sort_order == "descend":
|
||||
sort_order = "desc"
|
||||
|
||||
query = cur.mogrify(
|
||||
f"""SELECT count(1) OVER () AS total,metric_id, project_id, user_id, name, is_public, created_at, edited_at,
|
||||
metric_type, metric_of, metric_format, metric_value, view_type, is_pinned,
|
||||
dashboards, owner_email, owner_name, default_config AS config, thumbnail
|
||||
FROM metrics
|
||||
{sub_join}
|
||||
LEFT JOIN LATERAL (
|
||||
SELECT COALESCE(jsonb_agg(connected_dashboards.* ORDER BY is_public, name),'[]'::jsonb) AS dashboards
|
||||
FROM (
|
||||
SELECT DISTINCT dashboard_id, name, is_public
|
||||
FROM dashboards
|
||||
INNER JOIN dashboard_widgets USING (dashboard_id)
|
||||
WHERE deleted_at ISNULL
|
||||
AND dashboard_widgets.metric_id = metrics.metric_id
|
||||
AND project_id = %(project_id)s
|
||||
AND ((dashboards.user_id = %(user_id)s OR is_public))
|
||||
) AS connected_dashboards
|
||||
) AS connected_dashboards ON (TRUE)
|
||||
LEFT JOIN LATERAL (
|
||||
SELECT email AS owner_email, name AS owner_name
|
||||
FROM users
|
||||
WHERE deleted_at ISNULL
|
||||
AND users.user_id = metrics.user_id
|
||||
) AS owner ON (TRUE)
|
||||
WHERE {" AND ".join(constraints)}
|
||||
ORDER BY {sort_column} {sort_order}
|
||||
LIMIT %(limit)s OFFSET %(offset)s;""",
|
||||
params
|
||||
)
|
||||
cur.execute(query)
|
||||
rows = cur.fetchall()
|
||||
if len(rows) > 0:
|
||||
total = rows[0]["total"]
|
||||
if include_series:
|
||||
for r in rows:
|
||||
r.pop("total")
|
||||
for s in r.get("series", []):
|
||||
s["filter"] = helper.old_search_payload_to_flat(s["filter"])
|
||||
else:
|
||||
for r in rows:
|
||||
r.pop("total")
|
||||
r["created_at"] = TimeUTC.datetime_to_timestamp(r["created_at"])
|
||||
r["edited_at"] = TimeUTC.datetime_to_timestamp(r["edited_at"])
|
||||
rows = helper.list_to_camel_case(rows)
|
||||
else:
|
||||
total = 0
|
||||
|
||||
return {"total": total, "list": rows}
|
||||
|
||||
|
||||
def search_all(project_id, user_id, data: schemas.SearchCardsSchema, include_series=False):
|
||||
constraints = ["metrics.project_id = %(project_id)s",
|
||||
"metrics.deleted_at ISNULL"]
|
||||
|
|
@ -455,7 +542,7 @@ def __get_global_attributes(row):
|
|||
if row is None or row.get("cardInfo") is None:
|
||||
return row
|
||||
card_info = row.get("cardInfo", {})
|
||||
row["compareTo"] = card_info.get("compareTo", [])
|
||||
row["compareTo"] = card_info["compareTo"] if card_info.get("compareTo") is not None else []
|
||||
return row
|
||||
|
||||
|
||||
|
|
@ -598,11 +685,7 @@ def make_chart_from_card(project: schemas.ProjectContext, user_id, metric_id, da
|
|||
raw_metric["density"] = data.density
|
||||
metric: schemas.CardSchema = schemas.CardSchema(**raw_metric)
|
||||
|
||||
if metric.is_predefined:
|
||||
return custom_metrics_predefined.get_metric(key=metric.metric_of,
|
||||
project_id=project.project_id,
|
||||
data=data.model_dump())
|
||||
elif metric.metric_type == schemas.MetricType.HEAT_MAP:
|
||||
if metric.metric_type == schemas.MetricType.HEAT_MAP:
|
||||
if raw_metric["data"] and raw_metric["data"].get("sessionId"):
|
||||
return heatmaps.get_selected_session(project_id=project.project_id,
|
||||
session_id=raw_metric["data"]["sessionId"])
|
||||
|
|
|
|||
|
|
@ -1,25 +0,0 @@
|
|||
import logging
|
||||
from typing import Union
|
||||
|
||||
import schemas
|
||||
from chalicelib.core.metrics import metrics
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_metric(key: Union[schemas.MetricOfWebVitals, schemas.MetricOfErrors], project_id: int, data: dict):
|
||||
supported = {
|
||||
schemas.MetricOfWebVitals.COUNT_SESSIONS: metrics.get_processed_sessions,
|
||||
schemas.MetricOfWebVitals.AVG_VISITED_PAGES: metrics.get_user_activity_avg_visited_pages,
|
||||
schemas.MetricOfWebVitals.COUNT_REQUESTS: metrics.get_top_metrics_count_requests,
|
||||
schemas.MetricOfErrors.IMPACTED_SESSIONS_BY_JS_ERRORS: metrics.get_impacted_sessions_by_js_errors,
|
||||
schemas.MetricOfErrors.DOMAINS_ERRORS_4XX: metrics.get_domains_errors_4xx,
|
||||
schemas.MetricOfErrors.DOMAINS_ERRORS_5XX: metrics.get_domains_errors_5xx,
|
||||
schemas.MetricOfErrors.ERRORS_PER_DOMAINS: metrics.get_errors_per_domains,
|
||||
schemas.MetricOfErrors.ERRORS_PER_TYPE: metrics.get_errors_per_type,
|
||||
schemas.MetricOfErrors.RESOURCES_BY_PARTY: metrics.get_resources_by_party,
|
||||
schemas.MetricOfWebVitals.COUNT_USERS: metrics.get_unique_users,
|
||||
schemas.MetricOfWebVitals.SPEED_LOCATION: metrics.get_speed_index_location,
|
||||
}
|
||||
|
||||
return supported.get(key, lambda *args: None)(project_id=project_id, **data)
|
||||
11
api/chalicelib/core/metrics/heatmaps/__init__.py
Normal file
11
api/chalicelib/core/metrics/heatmaps/__init__.py
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
import logging
|
||||
|
||||
from decouple import config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if config("EXP_METRICS", cast=bool, default=False):
|
||||
logger.info(">>> Using experimental heatmaps")
|
||||
from .heatmaps_ch import *
|
||||
else:
|
||||
from .heatmaps import *
|
||||
|
|
@ -84,7 +84,7 @@ def get_by_url(project_id, data: schemas.GetHeatMapPayloadSchema):
|
|||
logger.debug(query)
|
||||
logger.debug("---------")
|
||||
try:
|
||||
rows = cur.execute(query)
|
||||
rows = cur.execute(query=query)
|
||||
except Exception as err:
|
||||
logger.warning("--------- HEATMAP 2 SEARCH QUERY EXCEPTION CH -----------")
|
||||
logger.warning(query)
|
||||
|
|
@ -122,7 +122,7 @@ def get_x_y_by_url_and_session_id(project_id, session_id, data: schemas.GetHeatM
|
|||
logger.debug(query)
|
||||
logger.debug("---------")
|
||||
try:
|
||||
rows = cur.execute(query)
|
||||
rows = cur.execute(query=query)
|
||||
except Exception as err:
|
||||
logger.warning("--------- HEATMAP-session_id SEARCH QUERY EXCEPTION CH -----------")
|
||||
logger.warning(query)
|
||||
|
|
@ -149,7 +149,7 @@ def get_selectors_by_url_and_session_id(project_id, session_id, data: schemas.Ge
|
|||
query_from = f"{exp_ch_helper.get_main_events_table(0)} AS main_events"
|
||||
|
||||
with ch_client.ClickHouseClient() as cur:
|
||||
query = cur.format(query=f"""SELECT main_events.selector AS selector,
|
||||
query = cur.format(query=f"""SELECT CAST(`$properties`.selector AS String) AS selector,
|
||||
COUNT(1) AS count
|
||||
FROM {query_from}
|
||||
WHERE {" AND ".join(constraints)}
|
||||
|
|
@ -160,7 +160,7 @@ def get_selectors_by_url_and_session_id(project_id, session_id, data: schemas.Ge
|
|||
logger.debug(query)
|
||||
logger.debug("---------")
|
||||
try:
|
||||
rows = cur.execute(query)
|
||||
rows = cur.execute(query=query)
|
||||
except Exception as err:
|
||||
logger.warning("--------- HEATMAP-session_id SEARCH QUERY EXCEPTION CH -----------")
|
||||
logger.warning(query)
|
||||
|
|
@ -221,7 +221,7 @@ def __get_1_url(location_condition: schemas.SessionSearchEventSchema2 | None, se
|
|||
logger.debug(main_query)
|
||||
logger.debug("--------------------")
|
||||
try:
|
||||
url = cur.execute(main_query)
|
||||
url = cur.execute(query=main_query)
|
||||
except Exception as err:
|
||||
logger.warning("--------- CLICK MAP BEST URL SEARCH QUERY EXCEPTION CH-----------")
|
||||
logger.warning(main_query.decode('UTF-8'))
|
||||
|
|
@ -295,7 +295,7 @@ def search_short_session(data: schemas.HeatMapSessionsSearch, project_id, user_i
|
|||
logger.debug(main_query)
|
||||
logger.debug("--------------------")
|
||||
try:
|
||||
session = cur.execute(main_query)
|
||||
session = cur.execute(query=main_query)
|
||||
except Exception as err:
|
||||
logger.warning("--------- CLICK MAP SHORT SESSION SEARCH QUERY EXCEPTION CH -----------")
|
||||
logger.warning(main_query)
|
||||
|
|
@ -342,7 +342,7 @@ def get_selected_session(project_id, session_id):
|
|||
logger.debug(main_query)
|
||||
logger.debug("--------------------")
|
||||
try:
|
||||
session = cur.execute(main_query)
|
||||
session = cur.execute(query=main_query)
|
||||
except Exception as err:
|
||||
logger.warning("--------- CLICK MAP GET SELECTED SESSION QUERY EXCEPTION -----------")
|
||||
logger.warning(main_query.decode('UTF-8'))
|
||||
|
|
@ -1,624 +0,0 @@
|
|||
import logging
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import metadata
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.utils.metrics_helper import get_step_size
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def __get_constraints(project_id, time_constraint=True, chart=False, duration=True, project=True,
|
||||
project_identifier="project_id",
|
||||
main_table="sessions", time_column="start_ts", data={}):
|
||||
pg_sub_query = []
|
||||
main_table = main_table + "." if main_table is not None and len(main_table) > 0 else ""
|
||||
if project:
|
||||
pg_sub_query.append(f"{main_table}{project_identifier} =%({project_identifier})s")
|
||||
if duration:
|
||||
pg_sub_query.append(f"{main_table}duration>0")
|
||||
if time_constraint:
|
||||
pg_sub_query.append(f"{main_table}{time_column} >= %(startTimestamp)s")
|
||||
pg_sub_query.append(f"{main_table}{time_column} < %(endTimestamp)s")
|
||||
if chart:
|
||||
pg_sub_query.append(f"{main_table}{time_column} >= generated_timestamp")
|
||||
pg_sub_query.append(f"{main_table}{time_column} < generated_timestamp + %(step_size)s")
|
||||
return pg_sub_query + __get_meta_constraint(project_id=project_id, data=data)
|
||||
|
||||
|
||||
def __merge_charts(list1, list2, time_key="timestamp"):
|
||||
if len(list1) != len(list2):
|
||||
raise Exception("cannot merge unequal lists")
|
||||
result = []
|
||||
for i in range(len(list1)):
|
||||
timestamp = min(list1[i][time_key], list2[i][time_key])
|
||||
result.append({**list1[i], **list2[i], time_key: timestamp})
|
||||
return result
|
||||
|
||||
|
||||
def __get_constraint_values(data):
|
||||
params = {}
|
||||
for i, f in enumerate(data.get("filters", [])):
|
||||
params[f"{f['key']}_{i}"] = f["value"]
|
||||
return params
|
||||
|
||||
|
||||
def __get_meta_constraint(project_id, data):
|
||||
if len(data.get("filters", [])) == 0:
|
||||
return []
|
||||
constraints = []
|
||||
meta_keys = metadata.get(project_id=project_id)
|
||||
meta_keys = {m["key"]: m["index"] for m in meta_keys}
|
||||
|
||||
for i, f in enumerate(data.get("filters", [])):
|
||||
if f["key"] in meta_keys.keys():
|
||||
key = f"sessions.metadata_{meta_keys[f['key']]})"
|
||||
if f["value"] in ["*", ""]:
|
||||
constraints.append(f"{key} IS NOT NULL")
|
||||
else:
|
||||
constraints.append(f"{key} = %({f['key']}_{i})s")
|
||||
else:
|
||||
filter_type = f["key"].upper()
|
||||
filter_type = [filter_type, "USER" + filter_type, filter_type[4:]]
|
||||
if any(item in [schemas.FilterType.USER_BROWSER] \
|
||||
for item in filter_type):
|
||||
constraints.append(f"sessions.user_browser = %({f['key']}_{i})s")
|
||||
elif any(item in [schemas.FilterType.USER_OS, schemas.FilterType.USER_OS_MOBILE] \
|
||||
for item in filter_type):
|
||||
constraints.append(f"sessions.user_os = %({f['key']}_{i})s")
|
||||
elif any(item in [schemas.FilterType.USER_DEVICE, schemas.FilterType.USER_DEVICE_MOBILE] \
|
||||
for item in filter_type):
|
||||
constraints.append(f"sessions.user_device = %({f['key']}_{i})s")
|
||||
elif any(item in [schemas.FilterType.USER_COUNTRY, schemas.FilterType.USER_COUNTRY_MOBILE] \
|
||||
for item in filter_type):
|
||||
constraints.append(f"sessions.user_country = %({f['key']}_{i})s")
|
||||
elif any(item in [schemas.FilterType.USER_ID, schemas.FilterType.USER_ID_MOBILE] \
|
||||
for item in filter_type):
|
||||
constraints.append(f"sessions.user_id = %({f['key']}_{i})s")
|
||||
elif any(item in [schemas.FilterType.USER_ANONYMOUS_ID, schemas.FilterType.USER_ANONYMOUS_ID_MOBILE] \
|
||||
for item in filter_type):
|
||||
constraints.append(f"sessions.user_anonymous_id = %({f['key']}_{i})s")
|
||||
elif any(item in [schemas.FilterType.REV_ID, schemas.FilterType.REV_ID_MOBILE] \
|
||||
for item in filter_type):
|
||||
constraints.append(f"sessions.rev_id = %({f['key']}_{i})s")
|
||||
return constraints
|
||||
|
||||
|
||||
def get_processed_sessions(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
||||
endTimestamp=TimeUTC.now(),
|
||||
density=7, **args):
|
||||
step_size = get_step_size(startTimestamp, endTimestamp, density, factor=1)
|
||||
pg_sub_query = __get_constraints(project_id=project_id, data=args)
|
||||
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=True,
|
||||
chart=True, data=args)
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = f"""SELECT generated_timestamp AS timestamp,
|
||||
COALESCE(COUNT(sessions), 0) AS value
|
||||
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
|
||||
LEFT JOIN LATERAL ( SELECT 1
|
||||
FROM public.sessions
|
||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||
) AS sessions ON (TRUE)
|
||||
GROUP BY generated_timestamp
|
||||
ORDER BY generated_timestamp;"""
|
||||
params = {"step_size": step_size, "project_id": project_id, "startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args)}
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
rows = cur.fetchall()
|
||||
results = {
|
||||
"value": sum([r["value"] for r in rows]),
|
||||
"chart": rows
|
||||
}
|
||||
|
||||
diff = endTimestamp - startTimestamp
|
||||
endTimestamp = startTimestamp
|
||||
startTimestamp = endTimestamp - diff
|
||||
|
||||
pg_query = f"""SELECT COUNT(sessions.session_id) AS count
|
||||
FROM public.sessions
|
||||
WHERE {" AND ".join(pg_sub_query)};"""
|
||||
params = {"project_id": project_id, "startTimestamp": startTimestamp, "endTimestamp": endTimestamp,
|
||||
**__get_constraint_values(args)}
|
||||
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
|
||||
count = cur.fetchone()["count"]
|
||||
|
||||
results["progress"] = helper.__progress(old_val=count, new_val=results["value"])
|
||||
results["unit"] = schemas.TemplatePredefinedUnits.COUNT
|
||||
return results
|
||||
|
||||
|
||||
def __get_neutral(rows, add_All_if_empty=True):
|
||||
neutral = {l: 0 for l in [i for k in [list(v.keys()) for v in rows] for i in k]}
|
||||
if add_All_if_empty and len(neutral.keys()) <= 1:
|
||||
neutral = {"All": 0}
|
||||
return neutral
|
||||
|
||||
|
||||
def __merge_rows_with_neutral(rows, neutral):
|
||||
for i in range(len(rows)):
|
||||
rows[i] = {**neutral, **rows[i]}
|
||||
return rows
|
||||
|
||||
|
||||
def __get_domains_errors_4xx_and_5xx(status, project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
||||
endTimestamp=TimeUTC.now(), density=6, **args):
|
||||
step_size = get_step_size(startTimestamp, endTimestamp, density, factor=1)
|
||||
pg_sub_query_subset = __get_constraints(project_id=project_id, time_constraint=True, chart=False, data=args)
|
||||
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=False, chart=True,
|
||||
data=args, main_table="requests", time_column="timestamp", project=False,
|
||||
duration=False)
|
||||
pg_sub_query_subset.append("requests.status_code/100 = %(status_code)s")
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = f"""WITH requests AS (SELECT host, timestamp
|
||||
FROM events_common.requests INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query_subset)}
|
||||
)
|
||||
SELECT generated_timestamp AS timestamp,
|
||||
COALESCE(JSONB_AGG(requests) FILTER ( WHERE requests IS NOT NULL ), '[]'::JSONB) AS keys
|
||||
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
|
||||
LEFT JOIN LATERAL ( SELECT requests.host, COUNT(*) AS count
|
||||
FROM requests
|
||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||
GROUP BY host
|
||||
ORDER BY count DESC
|
||||
LIMIT 5
|
||||
) AS requests ON (TRUE)
|
||||
GROUP BY generated_timestamp
|
||||
ORDER BY generated_timestamp;"""
|
||||
params = {"project_id": project_id,
|
||||
"startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp,
|
||||
"step_size": step_size,
|
||||
"status_code": status, **__get_constraint_values(args)}
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
rows = cur.fetchall()
|
||||
rows = __nested_array_to_dict_array(rows, key="host")
|
||||
neutral = __get_neutral(rows)
|
||||
rows = __merge_rows_with_neutral(rows, neutral)
|
||||
|
||||
return rows
|
||||
|
||||
|
||||
def get_domains_errors_4xx(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
||||
endTimestamp=TimeUTC.now(), density=6, **args):
|
||||
return __get_domains_errors_4xx_and_5xx(status=4, project_id=project_id, startTimestamp=startTimestamp,
|
||||
endTimestamp=endTimestamp, density=density, **args)
|
||||
|
||||
|
||||
def get_domains_errors_5xx(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
||||
endTimestamp=TimeUTC.now(), density=6, **args):
|
||||
return __get_domains_errors_4xx_and_5xx(status=5, project_id=project_id, startTimestamp=startTimestamp,
|
||||
endTimestamp=endTimestamp, density=density, **args)
|
||||
|
||||
|
||||
def __nested_array_to_dict_array(rows, key="url_host", value="count"):
|
||||
for r in rows:
|
||||
for i in range(len(r["keys"])):
|
||||
r[r["keys"][i][key]] = r["keys"][i][value]
|
||||
r.pop("keys")
|
||||
return rows
|
||||
|
||||
|
||||
def get_errors_per_domains(project_id, limit, page, startTimestamp=TimeUTC.now(delta_days=-1),
|
||||
endTimestamp=TimeUTC.now(), **args):
|
||||
pg_sub_query = __get_constraints(project_id=project_id, data=args)
|
||||
pg_sub_query.append("requests.success = FALSE")
|
||||
params = {"project_id": project_id,
|
||||
"startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp,
|
||||
"limit_s": (page - 1) * limit,
|
||||
"limit_e": page * limit,
|
||||
**__get_constraint_values(args)}
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = f"""SELECT COALESCE(SUM(errors_count),0)::INT AS count,
|
||||
COUNT(raw.domain) AS total,
|
||||
jsonb_agg(raw) FILTER ( WHERE rn > %(limit_s)s
|
||||
AND rn <= %(limit_e)s ) AS values
|
||||
FROM (SELECT requests.host AS domain,
|
||||
COUNT(requests.session_id) AS errors_count,
|
||||
row_number() over (ORDER BY COUNT(requests.session_id) DESC ) AS rn
|
||||
FROM events_common.requests
|
||||
INNER JOIN sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
GROUP BY requests.host
|
||||
ORDER BY errors_count DESC) AS raw;"""
|
||||
pg_query = cur.mogrify(pg_query, params)
|
||||
logger.debug("-----------")
|
||||
logger.debug(pg_query)
|
||||
logger.debug("-----------")
|
||||
cur.execute(pg_query)
|
||||
row = cur.fetchone()
|
||||
if row:
|
||||
row["values"] = row["values"] or []
|
||||
for r in row["values"]:
|
||||
r.pop("rn")
|
||||
|
||||
return helper.dict_to_camel_case(row)
|
||||
|
||||
|
||||
def get_errors_per_type(project_id, startTimestamp=TimeUTC.now(delta_days=-1), endTimestamp=TimeUTC.now(),
|
||||
platform=None, density=7, **args):
|
||||
step_size = get_step_size(startTimestamp, endTimestamp, density, factor=1)
|
||||
|
||||
pg_sub_query_subset = __get_constraints(project_id=project_id, data=args)
|
||||
pg_sub_query_subset.append("requests.timestamp>=%(startTimestamp)s")
|
||||
pg_sub_query_subset.append("requests.timestamp<%(endTimestamp)s")
|
||||
pg_sub_query_subset.append("requests.status_code > 200")
|
||||
|
||||
pg_sub_query_subset_e = __get_constraints(project_id=project_id, data=args, duration=False, main_table="m_errors",
|
||||
time_constraint=False)
|
||||
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=False,
|
||||
chart=True, data=args, main_table="", time_column="timestamp",
|
||||
project=False, duration=False)
|
||||
pg_sub_query_subset_e.append("timestamp>=%(startTimestamp)s")
|
||||
pg_sub_query_subset_e.append("timestamp<%(endTimestamp)s")
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = f"""WITH requests AS (SELECT status_code AS status, timestamp
|
||||
FROM events_common.requests
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query_subset)}
|
||||
),
|
||||
errors_integ AS (SELECT timestamp
|
||||
FROM events.errors
|
||||
INNER JOIN public.errors AS m_errors USING (error_id)
|
||||
WHERE {" AND ".join(pg_sub_query_subset_e)}
|
||||
AND source != 'js_exception'
|
||||
),
|
||||
errors_js AS (SELECT timestamp
|
||||
FROM events.errors
|
||||
INNER JOIN public.errors AS m_errors USING (error_id)
|
||||
WHERE {" AND ".join(pg_sub_query_subset_e)}
|
||||
AND source = 'js_exception'
|
||||
)
|
||||
SELECT generated_timestamp AS timestamp,
|
||||
COALESCE(SUM(CASE WHEN status / 100 = 4 THEN 1 ELSE 0 END), 0) AS _4xx,
|
||||
COALESCE(SUM(CASE WHEN status / 100 = 5 THEN 1 ELSE 0 END), 0) AS _5xx,
|
||||
COALESCE((SELECT COUNT(*)
|
||||
FROM errors_js
|
||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||
), 0) AS js,
|
||||
COALESCE((SELECT COUNT(*)
|
||||
FROM errors_integ
|
||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||
), 0) AS integrations
|
||||
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
|
||||
LEFT JOIN LATERAL (SELECT status
|
||||
FROM requests
|
||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||
) AS errors_partition ON (TRUE)
|
||||
GROUP BY timestamp
|
||||
ORDER BY timestamp;"""
|
||||
params = {"step_size": step_size,
|
||||
"project_id": project_id,
|
||||
"startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args)}
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
rows = cur.fetchall()
|
||||
rows = helper.list_to_camel_case(rows)
|
||||
return rows
|
||||
|
||||
|
||||
def get_impacted_sessions_by_js_errors(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
||||
endTimestamp=TimeUTC.now(), density=7, **args):
|
||||
step_size = get_step_size(startTimestamp, endTimestamp, density, factor=1)
|
||||
pg_sub_query = __get_constraints(project_id=project_id, data=args)
|
||||
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=True,
|
||||
chart=True, data=args)
|
||||
pg_sub_query.append("m_errors.source = 'js_exception'")
|
||||
pg_sub_query.append("m_errors.project_id = %(project_id)s")
|
||||
pg_sub_query.append("errors.timestamp >= %(startTimestamp)s")
|
||||
pg_sub_query.append("errors.timestamp < %(endTimestamp)s")
|
||||
pg_sub_query_chart.append("m_errors.source = 'js_exception'")
|
||||
pg_sub_query_chart.append("m_errors.project_id = %(project_id)s")
|
||||
pg_sub_query_chart.append("errors.timestamp >= generated_timestamp")
|
||||
pg_sub_query_chart.append("errors.timestamp < generated_timestamp+ %(step_size)s")
|
||||
|
||||
pg_sub_query_subset = __get_constraints(project_id=project_id, data=args, duration=False, main_table="m_errors",
|
||||
time_constraint=False)
|
||||
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=False,
|
||||
chart=True, data=args, main_table="errors", time_column="timestamp",
|
||||
project=False, duration=False)
|
||||
pg_sub_query_subset.append("m_errors.source = 'js_exception'")
|
||||
pg_sub_query_subset.append("errors.timestamp>=%(startTimestamp)s")
|
||||
pg_sub_query_subset.append("errors.timestamp<%(endTimestamp)s")
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = f"""WITH errors AS (SELECT DISTINCT ON (session_id,timestamp) session_id, timestamp
|
||||
FROM events.errors
|
||||
INNER JOIN public.errors AS m_errors USING (error_id)
|
||||
WHERE {" AND ".join(pg_sub_query_subset)}
|
||||
)
|
||||
SELECT *
|
||||
FROM (SELECT COUNT(DISTINCT session_id) AS sessions_count
|
||||
FROM errors) AS counts
|
||||
LEFT JOIN
|
||||
(SELECT jsonb_agg(chart) AS chart
|
||||
FROM (SELECT generated_timestamp AS timestamp,
|
||||
COALESCE(COUNT(session_id), 0) AS sessions_count
|
||||
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
|
||||
LEFT JOIN LATERAL ( SELECT DISTINCT session_id
|
||||
FROM errors
|
||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||
) AS sessions ON (TRUE)
|
||||
GROUP BY generated_timestamp
|
||||
ORDER BY generated_timestamp) AS chart) AS chart ON (TRUE);"""
|
||||
cur.execute(cur.mogrify(pg_query, {"step_size": step_size,
|
||||
"project_id": project_id,
|
||||
"startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp,
|
||||
**__get_constraint_values(args)}))
|
||||
row_sessions = cur.fetchone()
|
||||
pg_query = f"""WITH errors AS ( SELECT DISTINCT ON(errors.error_id,timestamp) errors.error_id,timestamp
|
||||
FROM events.errors
|
||||
INNER JOIN public.errors AS m_errors USING (error_id)
|
||||
WHERE {" AND ".join(pg_sub_query_subset)}
|
||||
)
|
||||
SELECT *
|
||||
FROM (SELECT COUNT(DISTINCT errors.error_id) AS errors_count
|
||||
FROM errors) AS counts
|
||||
LEFT JOIN
|
||||
(SELECT jsonb_agg(chart) AS chart
|
||||
FROM (SELECT generated_timestamp AS timestamp,
|
||||
COALESCE(COUNT(error_id), 0) AS errors_count
|
||||
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
|
||||
LEFT JOIN LATERAL ( SELECT DISTINCT errors.error_id
|
||||
FROM errors
|
||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||
) AS errors ON (TRUE)
|
||||
GROUP BY generated_timestamp
|
||||
ORDER BY generated_timestamp) AS chart) AS chart ON (TRUE);"""
|
||||
cur.execute(cur.mogrify(pg_query, {"step_size": step_size,
|
||||
"project_id": project_id,
|
||||
"startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp,
|
||||
**__get_constraint_values(args)}))
|
||||
row_errors = cur.fetchone()
|
||||
chart = __merge_charts(row_sessions.pop("chart"), row_errors.pop("chart"))
|
||||
row_sessions = helper.dict_to_camel_case(row_sessions)
|
||||
row_errors = helper.dict_to_camel_case(row_errors)
|
||||
return {**row_sessions, **row_errors, "chart": chart}
|
||||
|
||||
|
||||
def get_resources_by_party(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
||||
endTimestamp=TimeUTC.now(), density=7, **args):
|
||||
step_size = get_step_size(startTimestamp, endTimestamp, density, factor=1)
|
||||
pg_sub_query_subset = __get_constraints(project_id=project_id, time_constraint=True,
|
||||
chart=False, data=args)
|
||||
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=False, project=False,
|
||||
chart=True, data=args, main_table="requests", time_column="timestamp",
|
||||
duration=False)
|
||||
pg_sub_query_subset.append("requests.timestamp >= %(startTimestamp)s")
|
||||
pg_sub_query_subset.append("requests.timestamp < %(endTimestamp)s")
|
||||
# pg_sub_query_subset.append("resources.type IN ('fetch', 'script')")
|
||||
pg_sub_query_subset.append("requests.success = FALSE")
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = f"""WITH requests AS (
|
||||
SELECT requests.host, timestamp
|
||||
FROM events_common.requests
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query_subset)}
|
||||
)
|
||||
SELECT generated_timestamp AS timestamp,
|
||||
SUM(CASE WHEN first.host = sub_requests.host THEN 1 ELSE 0 END) AS first_party,
|
||||
SUM(CASE WHEN first.host != sub_requests.host THEN 1 ELSE 0 END) AS third_party
|
||||
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
|
||||
LEFT JOIN (
|
||||
SELECT requests.host,
|
||||
COUNT(requests.session_id) AS count
|
||||
FROM events_common.requests
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
WHERE sessions.project_id = '1'
|
||||
AND sessions.start_ts > (EXTRACT(EPOCH FROM now() - INTERVAL '31 days') * 1000)::BIGINT
|
||||
AND sessions.start_ts < (EXTRACT(EPOCH FROM now()) * 1000)::BIGINT
|
||||
AND requests.timestamp > (EXTRACT(EPOCH FROM now() - INTERVAL '31 days') * 1000)::BIGINT
|
||||
AND requests.timestamp < (EXTRACT(EPOCH FROM now()) * 1000)::BIGINT
|
||||
AND sessions.duration>0
|
||||
GROUP BY requests.host
|
||||
ORDER BY count DESC
|
||||
LIMIT 1
|
||||
) AS first ON (TRUE)
|
||||
LEFT JOIN LATERAL (
|
||||
SELECT requests.host
|
||||
FROM requests
|
||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||
) AS sub_requests ON (TRUE)
|
||||
GROUP BY generated_timestamp
|
||||
ORDER BY generated_timestamp;"""
|
||||
cur.execute(cur.mogrify(pg_query, {"step_size": step_size,
|
||||
"project_id": project_id,
|
||||
"startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args)}))
|
||||
|
||||
rows = cur.fetchall()
|
||||
return rows
|
||||
|
||||
|
||||
def get_user_activity_avg_visited_pages(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
||||
endTimestamp=TimeUTC.now(), **args):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
row = __get_user_activity_avg_visited_pages(cur, project_id, startTimestamp, endTimestamp, **args)
|
||||
results = helper.dict_to_camel_case(row)
|
||||
results["chart"] = __get_user_activity_avg_visited_pages_chart(cur, project_id, startTimestamp,
|
||||
endTimestamp, **args)
|
||||
|
||||
diff = endTimestamp - startTimestamp
|
||||
endTimestamp = startTimestamp
|
||||
startTimestamp = endTimestamp - diff
|
||||
row = __get_user_activity_avg_visited_pages(cur, project_id, startTimestamp, endTimestamp, **args)
|
||||
|
||||
previous = helper.dict_to_camel_case(row)
|
||||
results["progress"] = helper.__progress(old_val=previous["value"], new_val=results["value"])
|
||||
results["unit"] = schemas.TemplatePredefinedUnits.COUNT
|
||||
return results
|
||||
|
||||
|
||||
def __get_user_activity_avg_visited_pages(cur, project_id, startTimestamp, endTimestamp, **args):
|
||||
pg_sub_query = __get_constraints(project_id=project_id, data=args)
|
||||
pg_sub_query.append("sessions.pages_count>0")
|
||||
pg_query = f"""SELECT COALESCE(CEIL(AVG(sessions.pages_count)),0) AS value
|
||||
FROM public.sessions
|
||||
WHERE {" AND ".join(pg_sub_query)};"""
|
||||
params = {"project_id": project_id, "startTimestamp": startTimestamp, "endTimestamp": endTimestamp,
|
||||
**__get_constraint_values(args)}
|
||||
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
row = cur.fetchone()
|
||||
return row
|
||||
|
||||
|
||||
def __get_user_activity_avg_visited_pages_chart(cur, project_id, startTimestamp, endTimestamp, density=20, **args):
|
||||
step_size = get_step_size(endTimestamp=endTimestamp, startTimestamp=startTimestamp, density=density, factor=1)
|
||||
params = {"step_size": step_size, "project_id": project_id, "startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp}
|
||||
pg_sub_query_subset = __get_constraints(project_id=project_id, time_constraint=True,
|
||||
chart=False, data=args)
|
||||
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=False, project=False,
|
||||
chart=True, data=args, main_table="sessions", time_column="start_ts",
|
||||
duration=False)
|
||||
pg_sub_query_subset.append("sessions.duration IS NOT NULL")
|
||||
|
||||
pg_query = f"""WITH sessions AS(SELECT sessions.pages_count, sessions.start_ts
|
||||
FROM public.sessions
|
||||
WHERE {" AND ".join(pg_sub_query_subset)}
|
||||
)
|
||||
SELECT generated_timestamp AS timestamp,
|
||||
COALESCE(AVG(sessions.pages_count),0) AS value
|
||||
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
|
||||
LEFT JOIN LATERAL (
|
||||
SELECT sessions.pages_count
|
||||
FROM sessions
|
||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||
) AS sessions ON (TRUE)
|
||||
GROUP BY generated_timestamp
|
||||
ORDER BY generated_timestamp;"""
|
||||
cur.execute(cur.mogrify(pg_query, {**params, **__get_constraint_values(args)}))
|
||||
rows = cur.fetchall()
|
||||
return rows
|
||||
|
||||
|
||||
def get_top_metrics_count_requests(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
||||
endTimestamp=TimeUTC.now(), value=None, density=20, **args):
|
||||
step_size = get_step_size(endTimestamp=endTimestamp, startTimestamp=startTimestamp, density=density, factor=1)
|
||||
params = {"step_size": step_size, "project_id": project_id, "startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp}
|
||||
pg_sub_query = __get_constraints(project_id=project_id, data=args)
|
||||
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=False, project=False,
|
||||
chart=True, data=args, main_table="pages", time_column="timestamp",
|
||||
duration=False)
|
||||
|
||||
if value is not None:
|
||||
pg_sub_query.append("pages.path = %(value)s")
|
||||
pg_sub_query_chart.append("pages.path = %(value)s")
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = f"""SELECT COUNT(pages.session_id) AS value
|
||||
FROM events.pages INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)};"""
|
||||
cur.execute(cur.mogrify(pg_query, {"project_id": project_id,
|
||||
"startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp,
|
||||
"value": value, **__get_constraint_values(args)}))
|
||||
row = cur.fetchone()
|
||||
pg_query = f"""WITH pages AS(SELECT pages.timestamp
|
||||
FROM events.pages INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
)
|
||||
SELECT generated_timestamp AS timestamp,
|
||||
COUNT(pages.*) AS value
|
||||
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
|
||||
LEFT JOIN LATERAL (
|
||||
SELECT 1
|
||||
FROM pages
|
||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||
) AS pages ON (TRUE)
|
||||
GROUP BY generated_timestamp
|
||||
ORDER BY generated_timestamp;"""
|
||||
cur.execute(cur.mogrify(pg_query, {**params, **__get_constraint_values(args)}))
|
||||
rows = cur.fetchall()
|
||||
row["chart"] = rows
|
||||
row["unit"] = schemas.TemplatePredefinedUnits.COUNT
|
||||
return helper.dict_to_camel_case(row)
|
||||
|
||||
|
||||
def get_unique_users(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
||||
endTimestamp=TimeUTC.now(),
|
||||
density=7, **args):
|
||||
step_size = get_step_size(startTimestamp, endTimestamp, density, factor=1)
|
||||
pg_sub_query = __get_constraints(project_id=project_id, data=args)
|
||||
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=True,
|
||||
chart=True, data=args)
|
||||
pg_sub_query.append("user_id IS NOT NULL")
|
||||
pg_sub_query.append("user_id != ''")
|
||||
pg_sub_query_chart.append("user_id IS NOT NULL")
|
||||
pg_sub_query_chart.append("user_id != ''")
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = f"""SELECT generated_timestamp AS timestamp,
|
||||
COALESCE(COUNT(sessions), 0) AS value
|
||||
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
|
||||
LEFT JOIN LATERAL ( SELECT DISTINCT user_id
|
||||
FROM public.sessions
|
||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||
) AS sessions ON (TRUE)
|
||||
GROUP BY generated_timestamp
|
||||
ORDER BY generated_timestamp;"""
|
||||
params = {"step_size": step_size, "project_id": project_id, "startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args)}
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
rows = cur.fetchall()
|
||||
results = {
|
||||
"value": sum([r["value"] for r in rows]),
|
||||
"chart": rows
|
||||
}
|
||||
|
||||
diff = endTimestamp - startTimestamp
|
||||
endTimestamp = startTimestamp
|
||||
startTimestamp = endTimestamp - diff
|
||||
|
||||
pg_query = f"""SELECT COUNT(DISTINCT sessions.user_id) AS count
|
||||
FROM public.sessions
|
||||
WHERE {" AND ".join(pg_sub_query)};"""
|
||||
params = {"project_id": project_id, "startTimestamp": startTimestamp, "endTimestamp": endTimestamp,
|
||||
**__get_constraint_values(args)}
|
||||
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
|
||||
count = cur.fetchone()["count"]
|
||||
|
||||
results["progress"] = helper.__progress(old_val=count, new_val=results["value"])
|
||||
results["unit"] = schemas.TemplatePredefinedUnits.COUNT
|
||||
return results
|
||||
|
||||
|
||||
def get_speed_index_location(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
||||
endTimestamp=TimeUTC.now(), **args):
|
||||
pg_sub_query = __get_constraints(project_id=project_id, data=args)
|
||||
pg_sub_query.append("pages.speed_index IS NOT NULL")
|
||||
pg_sub_query.append("pages.speed_index>0")
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = f"""SELECT sessions.user_country, AVG(pages.speed_index) AS value
|
||||
FROM events.pages INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
GROUP BY sessions.user_country
|
||||
ORDER BY value, sessions.user_country;"""
|
||||
params = {"project_id": project_id,
|
||||
"startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args)}
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
rows = cur.fetchall()
|
||||
if len(rows) > 0:
|
||||
pg_query = f"""SELECT AVG(pages.speed_index) AS avg
|
||||
FROM events.pages INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)};"""
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
avg = cur.fetchone()["avg"]
|
||||
else:
|
||||
avg = 0
|
||||
return {"value": avg, "chart": helper.list_to_camel_case(rows), "unit": schemas.TemplatePredefinedUnits.MILLISECOND}
|
||||
|
|
@ -1,629 +0,0 @@
|
|||
import logging
|
||||
from math import isnan
|
||||
|
||||
import schemas
|
||||
from chalicelib.utils import ch_client
|
||||
from chalicelib.utils import exp_ch_helper
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.utils.metrics_helper import get_step_size
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def __get_basic_constraints(table_name=None, time_constraint=True, round_start=False, data={}, identifier="project_id"):
|
||||
if table_name:
|
||||
table_name += "."
|
||||
else:
|
||||
table_name = ""
|
||||
ch_sub_query = [f"{table_name}{identifier} =toUInt16(%({identifier})s)"]
|
||||
if time_constraint:
|
||||
if round_start:
|
||||
ch_sub_query.append(
|
||||
f"toStartOfInterval({table_name}datetime, INTERVAL %(step_size)s second) >= toDateTime(%(startTimestamp)s/1000)")
|
||||
else:
|
||||
ch_sub_query.append(f"{table_name}datetime >= toDateTime(%(startTimestamp)s/1000)")
|
||||
ch_sub_query.append(f"{table_name}datetime < toDateTime(%(endTimestamp)s/1000)")
|
||||
return ch_sub_query + __get_generic_constraint(data=data, table_name=table_name)
|
||||
|
||||
|
||||
def __get_basic_constraints_events(table_name=None, time_constraint=True, round_start=False, data={}, identifier="project_id"):
|
||||
if table_name:
|
||||
table_name += "."
|
||||
else:
|
||||
table_name = ""
|
||||
ch_sub_query = [f"{table_name}{identifier} =toUInt16(%({identifier})s)"]
|
||||
if time_constraint:
|
||||
if round_start:
|
||||
ch_sub_query.append(
|
||||
f"toStartOfInterval({table_name}created_at, INTERVAL %(step_size)s second) >= toDateTime(%(startTimestamp)s/1000)")
|
||||
else:
|
||||
ch_sub_query.append(f"{table_name}created_at >= toDateTime(%(startTimestamp)s/1000)")
|
||||
ch_sub_query.append(f"{table_name}created_at < toDateTime(%(endTimestamp)s/1000)")
|
||||
return ch_sub_query + __get_generic_constraint(data=data, table_name=table_name)
|
||||
|
||||
|
||||
def __frange(start, stop, step):
|
||||
result = []
|
||||
i = start
|
||||
while i < stop:
|
||||
result.append(i)
|
||||
i += step
|
||||
return result
|
||||
|
||||
|
||||
def __add_missing_keys(original, complete):
|
||||
for missing in [key for key in complete.keys() if key not in original.keys()]:
|
||||
original[missing] = complete[missing]
|
||||
return original
|
||||
|
||||
|
||||
def __complete_missing_steps(start_time, end_time, density, neutral, rows, time_key="timestamp", time_coefficient=1000):
|
||||
if len(rows) == density:
|
||||
return rows
|
||||
step = get_step_size(start_time, end_time, density, decimal=True)
|
||||
optimal = [(int(i * time_coefficient), int((i + step) * time_coefficient)) for i in
|
||||
__frange(start_time // time_coefficient, end_time // time_coefficient, step)]
|
||||
result = []
|
||||
r = 0
|
||||
o = 0
|
||||
for i in range(density):
|
||||
neutral_clone = dict(neutral)
|
||||
for k in neutral_clone.keys():
|
||||
if callable(neutral_clone[k]):
|
||||
neutral_clone[k] = neutral_clone[k]()
|
||||
if r < len(rows) and len(result) + len(rows) - r == density:
|
||||
result += rows[r:]
|
||||
break
|
||||
if r < len(rows) and o < len(optimal) and rows[r][time_key] < optimal[o][0]:
|
||||
# complete missing keys in original object
|
||||
rows[r] = __add_missing_keys(original=rows[r], complete=neutral_clone)
|
||||
result.append(rows[r])
|
||||
r += 1
|
||||
elif r < len(rows) and o < len(optimal) and optimal[o][0] <= rows[r][time_key] < optimal[o][1]:
|
||||
# complete missing keys in original object
|
||||
rows[r] = __add_missing_keys(original=rows[r], complete=neutral_clone)
|
||||
result.append(rows[r])
|
||||
r += 1
|
||||
o += 1
|
||||
else:
|
||||
neutral_clone[time_key] = optimal[o][0]
|
||||
result.append(neutral_clone)
|
||||
o += 1
|
||||
# elif r < len(rows) and rows[r][time_key] >= optimal[o][1]:
|
||||
# neutral_clone[time_key] = optimal[o][0]
|
||||
# result.append(neutral_clone)
|
||||
# o += 1
|
||||
# else:
|
||||
# neutral_clone[time_key] = optimal[o][0]
|
||||
# result.append(neutral_clone)
|
||||
# o += 1
|
||||
return result
|
||||
|
||||
|
||||
def __get_constraint(data, fields, table_name):
|
||||
constraints = []
|
||||
# for k in fields.keys():
|
||||
for i, f in enumerate(data.get("filters", [])):
|
||||
if f["key"] in fields.keys():
|
||||
if f["value"] in ["*", ""]:
|
||||
constraints.append(f"isNotNull({table_name}{fields[f['key']]})")
|
||||
else:
|
||||
constraints.append(f"{table_name}{fields[f['key']]} = %({f['key']}_{i})s")
|
||||
# TODO: remove this in next release
|
||||
offset = len(data.get("filters", []))
|
||||
for i, f in enumerate(data.keys()):
|
||||
if f in fields.keys():
|
||||
if data[f] in ["*", ""]:
|
||||
constraints.append(f"isNotNull({table_name}{fields[f]})")
|
||||
else:
|
||||
constraints.append(f"{table_name}{fields[f]} = %({f}_{i + offset})s")
|
||||
return constraints
|
||||
|
||||
|
||||
def __get_constraint_values(data):
|
||||
params = {}
|
||||
for i, f in enumerate(data.get("filters", [])):
|
||||
params[f"{f['key']}_{i}"] = f["value"]
|
||||
|
||||
# TODO: remove this in next release
|
||||
offset = len(data.get("filters", []))
|
||||
for i, f in enumerate(data.keys()):
|
||||
params[f"{f}_{i + offset}"] = data[f]
|
||||
return params
|
||||
|
||||
|
||||
METADATA_FIELDS = {"userId": "user_id",
|
||||
"userAnonymousId": "user_anonymous_id",
|
||||
"metadata1": "metadata_1",
|
||||
"metadata2": "metadata_2",
|
||||
"metadata3": "metadata_3",
|
||||
"metadata4": "metadata_4",
|
||||
"metadata5": "metadata_5",
|
||||
"metadata6": "metadata_6",
|
||||
"metadata7": "metadata_7",
|
||||
"metadata8": "metadata_8",
|
||||
"metadata9": "metadata_9",
|
||||
"metadata10": "metadata_10"}
|
||||
|
||||
|
||||
def __get_meta_constraint(data):
|
||||
return __get_constraint(data=data, fields=METADATA_FIELDS, table_name="sessions_metadata.")
|
||||
|
||||
|
||||
SESSIONS_META_FIELDS = {"revId": "rev_id",
|
||||
"country": "user_country",
|
||||
"os": "user_os",
|
||||
"platform": "user_device_type",
|
||||
"device": "user_device",
|
||||
"browser": "user_browser"}
|
||||
|
||||
|
||||
def __get_generic_constraint(data, table_name):
|
||||
return __get_constraint(data=data, fields=SESSIONS_META_FIELDS, table_name=table_name)
|
||||
|
||||
|
||||
def get_processed_sessions(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
||||
endTimestamp=TimeUTC.now(),
|
||||
density=7, **args):
|
||||
step_size = get_step_size(startTimestamp, endTimestamp, density)
|
||||
ch_sub_query = __get_basic_constraints(table_name="sessions", data=args)
|
||||
ch_sub_query_chart = __get_basic_constraints(table_name="sessions", round_start=True, data=args)
|
||||
meta_condition = __get_meta_constraint(args)
|
||||
ch_sub_query += meta_condition
|
||||
ch_sub_query_chart += meta_condition
|
||||
with ch_client.ClickHouseClient() as ch:
|
||||
ch_query = f"""\
|
||||
SELECT toUnixTimestamp(toStartOfInterval(sessions.datetime, INTERVAL %(step_size)s second)) * 1000 AS timestamp,
|
||||
COUNT(DISTINCT sessions.session_id) AS value
|
||||
FROM {exp_ch_helper.get_main_sessions_table(startTimestamp)} AS sessions
|
||||
WHERE {" AND ".join(ch_sub_query_chart)}
|
||||
GROUP BY timestamp
|
||||
ORDER BY timestamp;\
|
||||
"""
|
||||
params = {"step_size": step_size, "project_id": project_id, "startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args)}
|
||||
|
||||
rows = ch.execute(query=ch_query, parameters=params)
|
||||
|
||||
results = {
|
||||
"value": sum([r["value"] for r in rows]),
|
||||
"chart": __complete_missing_steps(rows=rows, start_time=startTimestamp, end_time=endTimestamp,
|
||||
density=density,
|
||||
neutral={"value": 0})
|
||||
}
|
||||
|
||||
diff = endTimestamp - startTimestamp
|
||||
endTimestamp = startTimestamp
|
||||
startTimestamp = endTimestamp - diff
|
||||
|
||||
ch_query = f""" SELECT COUNT(1) AS count
|
||||
FROM {exp_ch_helper.get_main_sessions_table(startTimestamp)} AS sessions
|
||||
WHERE {" AND ".join(ch_sub_query)};"""
|
||||
params = {"project_id": project_id, "startTimestamp": startTimestamp, "endTimestamp": endTimestamp,
|
||||
**__get_constraint_values(args)}
|
||||
|
||||
count = ch.execute(query=ch_query, parameters=params)
|
||||
|
||||
count = count[0]["count"]
|
||||
|
||||
results["progress"] = helper.__progress(old_val=count, new_val=results["value"])
|
||||
results["unit"] = schemas.TemplatePredefinedUnits.COUNT
|
||||
return results
|
||||
|
||||
|
||||
def __get_domains_errors_neutral(rows):
|
||||
neutral = {l: 0 for l in [i for k in [list(v.keys()) for v in rows] for i in k]}
|
||||
if len(neutral.keys()) == 0:
|
||||
neutral = {"All": 0}
|
||||
return neutral
|
||||
|
||||
|
||||
def __merge_rows_with_neutral(rows, neutral):
|
||||
for i in range(len(rows)):
|
||||
rows[i] = {**neutral, **rows[i]}
|
||||
return rows
|
||||
|
||||
|
||||
def __get_domains_errors_4xx_and_5xx(status, project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
||||
endTimestamp=TimeUTC.now(), density=6, **args):
|
||||
step_size = get_step_size(startTimestamp, endTimestamp, density)
|
||||
ch_sub_query = __get_basic_constraints(table_name="requests", round_start=True, data=args)
|
||||
ch_sub_query.append("requests.event_type='REQUEST'")
|
||||
ch_sub_query.append("intDiv(requests.status, 100) == %(status_code)s")
|
||||
meta_condition = __get_meta_constraint(args)
|
||||
ch_sub_query += meta_condition
|
||||
|
||||
with ch_client.ClickHouseClient() as ch:
|
||||
ch_query = f"""SELECT timestamp,
|
||||
groupArray([domain, toString(count)]) AS keys
|
||||
FROM (SELECT toUnixTimestamp(toStartOfInterval(requests.datetime, INTERVAL %(step_size)s second)) * 1000 AS timestamp,
|
||||
requests.url_host AS domain, COUNT(1) AS count
|
||||
FROM {exp_ch_helper.get_main_events_table(startTimestamp)} AS requests
|
||||
WHERE {" AND ".join(ch_sub_query)}
|
||||
GROUP BY timestamp,requests.url_host
|
||||
ORDER BY timestamp, count DESC
|
||||
LIMIT 5 BY timestamp) AS domain_stats
|
||||
GROUP BY timestamp;"""
|
||||
params = {"project_id": project_id,
|
||||
"startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp,
|
||||
"step_size": step_size,
|
||||
"status_code": status, **__get_constraint_values(args)}
|
||||
rows = ch.execute(query=ch_query, parameters=params)
|
||||
rows = __nested_array_to_dict_array(rows)
|
||||
neutral = __get_domains_errors_neutral(rows)
|
||||
rows = __merge_rows_with_neutral(rows, neutral)
|
||||
|
||||
return __complete_missing_steps(rows=rows, start_time=startTimestamp,
|
||||
end_time=endTimestamp,
|
||||
density=density, neutral=neutral)
|
||||
|
||||
|
||||
def get_domains_errors_4xx(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
||||
endTimestamp=TimeUTC.now(), density=6, **args):
|
||||
return __get_domains_errors_4xx_and_5xx(status=4, project_id=project_id, startTimestamp=startTimestamp,
|
||||
endTimestamp=endTimestamp, density=density, **args)
|
||||
|
||||
|
||||
def get_domains_errors_5xx(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
||||
endTimestamp=TimeUTC.now(), density=6, **args):
|
||||
return __get_domains_errors_4xx_and_5xx(status=5, project_id=project_id, startTimestamp=startTimestamp,
|
||||
endTimestamp=endTimestamp, density=density, **args)
|
||||
|
||||
|
||||
def __nested_array_to_dict_array(rows):
|
||||
for r in rows:
|
||||
for i in range(len(r["keys"])):
|
||||
r[r["keys"][i][0]] = int(r["keys"][i][1])
|
||||
r.pop("keys")
|
||||
return rows
|
||||
|
||||
|
||||
def get_errors_per_domains(project_id, limit, page, startTimestamp=TimeUTC.now(delta_days=-1),
|
||||
endTimestamp=TimeUTC.now(), **args):
|
||||
ch_sub_query = __get_basic_constraints(table_name="requests", data=args)
|
||||
ch_sub_query.append("requests.event_type = 'REQUEST'")
|
||||
ch_sub_query.append("requests.success = 0")
|
||||
meta_condition = __get_meta_constraint(args)
|
||||
ch_sub_query += meta_condition
|
||||
params = {"project_id": project_id,
|
||||
"startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp,
|
||||
**__get_constraint_values(args),
|
||||
"limit_s": (page - 1) * limit,
|
||||
"limit": limit}
|
||||
with ch_client.ClickHouseClient() as ch:
|
||||
ch_query = f"""SELECT
|
||||
requests.url_host AS domain,
|
||||
COUNT(1) AS errors_count,
|
||||
COUNT(1) OVER () AS total,
|
||||
SUM(errors_count) OVER () AS count
|
||||
FROM {exp_ch_helper.get_main_events_table(startTimestamp)} AS requests
|
||||
WHERE {" AND ".join(ch_sub_query)}
|
||||
GROUP BY requests.url_host
|
||||
ORDER BY errors_count DESC
|
||||
LIMIT %(limit)s OFFSET %(limit_s)s;"""
|
||||
logger.debug("-----------")
|
||||
logger.debug(ch.format(query=ch_query, parameters=params))
|
||||
logger.debug("-----------")
|
||||
rows = ch.execute(query=ch_query, parameters=params)
|
||||
response = {"count": 0, "total": 0, "values": []}
|
||||
if len(rows) > 0:
|
||||
response["count"] = rows[0]["count"]
|
||||
response["total"] = rows[0]["total"]
|
||||
rows = helper.list_to_camel_case(rows)
|
||||
for r in rows:
|
||||
r.pop("count")
|
||||
r.pop("total")
|
||||
|
||||
return response
|
||||
|
||||
|
||||
def get_errors_per_type(project_id, startTimestamp=TimeUTC.now(delta_days=-1), endTimestamp=TimeUTC.now(),
|
||||
platform=None, density=7, **args):
|
||||
step_size = get_step_size(startTimestamp, endTimestamp, density)
|
||||
ch_sub_query_chart = __get_basic_constraints(table_name="events", round_start=True,
|
||||
data=args)
|
||||
ch_sub_query_chart.append("(events.event_type = 'REQUEST' OR events.event_type = 'ERROR')")
|
||||
ch_sub_query_chart.append("(events.status>200 OR events.event_type = 'ERROR')")
|
||||
meta_condition = __get_meta_constraint(args)
|
||||
ch_sub_query_chart += meta_condition
|
||||
|
||||
with ch_client.ClickHouseClient() as ch:
|
||||
ch_query = f"""SELECT toUnixTimestamp(toStartOfInterval(datetime, INTERVAL %(step_size)s second)) * 1000 AS timestamp,
|
||||
SUM(events.event_type = 'REQUEST' AND intDiv(events.status, 100) == 4) AS _4xx,
|
||||
SUM(events.event_type = 'REQUEST' AND intDiv(events.status, 100) == 5) AS _5xx,
|
||||
SUM(events.event_type = 'ERROR' AND events.source == 'js_exception') AS js,
|
||||
SUM(events.event_type = 'ERROR' AND events.source != 'js_exception') AS integrations
|
||||
FROM {exp_ch_helper.get_main_events_table(startTimestamp)} AS events
|
||||
WHERE {" AND ".join(ch_sub_query_chart)}
|
||||
GROUP BY timestamp
|
||||
ORDER BY timestamp;"""
|
||||
params = {"step_size": step_size,
|
||||
"project_id": project_id,
|
||||
"startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args)}
|
||||
rows = ch.execute(query=ch_query, parameters=params)
|
||||
rows = helper.list_to_camel_case(rows)
|
||||
|
||||
return __complete_missing_steps(rows=rows, start_time=startTimestamp,
|
||||
end_time=endTimestamp,
|
||||
density=density,
|
||||
neutral={"4xx": 0, "5xx": 0, "js": 0, "integrations": 0})
|
||||
|
||||
|
||||
def get_impacted_sessions_by_js_errors(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
||||
endTimestamp=TimeUTC.now(), density=7, **args):
|
||||
step_size = get_step_size(startTimestamp, endTimestamp, density)
|
||||
ch_sub_query_chart = __get_basic_constraints(table_name="errors", round_start=True, data=args)
|
||||
ch_sub_query_chart.append("errors.event_type='ERROR'")
|
||||
ch_sub_query_chart.append("errors.source == 'js_exception'")
|
||||
meta_condition = __get_meta_constraint(args)
|
||||
ch_sub_query_chart += meta_condition
|
||||
|
||||
with ch_client.ClickHouseClient() as ch:
|
||||
ch_query = f"""SELECT toUnixTimestamp(toStartOfInterval(errors.datetime, INTERVAL %(step_size)s second)) * 1000 AS timestamp,
|
||||
COUNT(DISTINCT errors.session_id) AS sessions_count,
|
||||
COUNT(DISTINCT errors.error_id) AS errors_count
|
||||
FROM {exp_ch_helper.get_main_events_table(startTimestamp)} AS errors
|
||||
WHERE {" AND ".join(ch_sub_query_chart)}
|
||||
GROUP BY timestamp
|
||||
ORDER BY timestamp;;"""
|
||||
rows = ch.execute(query=ch_query,
|
||||
params={"step_size": step_size,
|
||||
"project_id": project_id,
|
||||
"startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args)})
|
||||
ch_query = f"""SELECT COUNT(DISTINCT errors.session_id) AS sessions_count,
|
||||
COUNT(DISTINCT errors.error_id) AS errors_count
|
||||
FROM {exp_ch_helper.get_main_events_table(startTimestamp)} AS errors
|
||||
WHERE {" AND ".join(ch_sub_query_chart)};"""
|
||||
counts = ch.execute(query=ch_query,
|
||||
params={"step_size": step_size,
|
||||
"project_id": project_id,
|
||||
"startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args)})
|
||||
return {"sessionsCount": counts[0]["sessions_count"],
|
||||
"errorsCount": counts[0]["errors_count"],
|
||||
"chart": helper.list_to_camel_case(__complete_missing_steps(rows=rows, start_time=startTimestamp,
|
||||
end_time=endTimestamp,
|
||||
density=density,
|
||||
neutral={"sessions_count": 0,
|
||||
"errors_count": 0}))}
|
||||
|
||||
|
||||
def get_resources_by_party(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
||||
endTimestamp=TimeUTC.now(), density=7, **args):
|
||||
step_size = get_step_size(startTimestamp, endTimestamp, density)
|
||||
ch_sub_query = __get_basic_constraints(table_name="requests", round_start=True, data=args)
|
||||
ch_sub_query.append("requests.event_type='REQUEST'")
|
||||
ch_sub_query.append("requests.success = 0")
|
||||
sch_sub_query = ["rs.project_id =toUInt16(%(project_id)s)", "rs.event_type='REQUEST'"]
|
||||
meta_condition = __get_meta_constraint(args)
|
||||
ch_sub_query += meta_condition
|
||||
# sch_sub_query += meta_condition
|
||||
|
||||
with ch_client.ClickHouseClient() as ch:
|
||||
ch_query = f"""SELECT toUnixTimestamp(toStartOfInterval(sub_requests.datetime, INTERVAL %(step_size)s second)) * 1000 AS timestamp,
|
||||
SUM(first.url_host = sub_requests.url_host) AS first_party,
|
||||
SUM(first.url_host != sub_requests.url_host) AS third_party
|
||||
FROM
|
||||
(
|
||||
SELECT requests.datetime, requests.url_host
|
||||
FROM {exp_ch_helper.get_main_events_table(startTimestamp)} AS requests
|
||||
WHERE {" AND ".join(ch_sub_query)}
|
||||
) AS sub_requests
|
||||
CROSS JOIN
|
||||
(
|
||||
SELECT
|
||||
rs.url_host,
|
||||
COUNT(1) AS count
|
||||
FROM {exp_ch_helper.get_main_events_table(startTimestamp)} AS rs
|
||||
WHERE {" AND ".join(sch_sub_query)}
|
||||
GROUP BY rs.url_host
|
||||
ORDER BY count DESC
|
||||
LIMIT 1
|
||||
) AS first
|
||||
GROUP BY timestamp
|
||||
ORDER BY timestamp;"""
|
||||
params = {"step_size": step_size,
|
||||
"project_id": project_id,
|
||||
"startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args)}
|
||||
rows = ch.execute(query=ch_query, parameters=params)
|
||||
return helper.list_to_camel_case(__complete_missing_steps(rows=rows, start_time=startTimestamp,
|
||||
end_time=endTimestamp,
|
||||
density=density,
|
||||
neutral={"first_party": 0,
|
||||
"third_party": 0}))
|
||||
|
||||
|
||||
def get_user_activity_avg_visited_pages(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
||||
endTimestamp=TimeUTC.now(), **args):
|
||||
results = {}
|
||||
|
||||
with ch_client.ClickHouseClient() as ch:
|
||||
rows = __get_user_activity_avg_visited_pages(ch, project_id, startTimestamp, endTimestamp, **args)
|
||||
if len(rows) > 0:
|
||||
results = helper.dict_to_camel_case(rows[0])
|
||||
for key in results:
|
||||
if isnan(results[key]):
|
||||
results[key] = 0
|
||||
results["chart"] = __get_user_activity_avg_visited_pages_chart(ch, project_id, startTimestamp,
|
||||
endTimestamp, **args)
|
||||
|
||||
diff = endTimestamp - startTimestamp
|
||||
endTimestamp = startTimestamp
|
||||
startTimestamp = endTimestamp - diff
|
||||
rows = __get_user_activity_avg_visited_pages(ch, project_id, startTimestamp, endTimestamp, **args)
|
||||
|
||||
if len(rows) > 0:
|
||||
previous = helper.dict_to_camel_case(rows[0])
|
||||
results["progress"] = helper.__progress(old_val=previous["value"], new_val=results["value"])
|
||||
results["unit"] = schemas.TemplatePredefinedUnits.COUNT
|
||||
return results
|
||||
|
||||
|
||||
def __get_user_activity_avg_visited_pages(ch, project_id, startTimestamp, endTimestamp, **args):
|
||||
ch_sub_query = __get_basic_constraints(table_name="pages", data=args)
|
||||
ch_sub_query.append("pages.event_type='LOCATION'")
|
||||
meta_condition = __get_meta_constraint(args)
|
||||
ch_sub_query += meta_condition
|
||||
|
||||
ch_query = f"""SELECT COALESCE(CEIL(avgOrNull(count)),0) AS value
|
||||
FROM (SELECT COUNT(1) AS count
|
||||
FROM {exp_ch_helper.get_main_events_table(startTimestamp)} AS pages
|
||||
WHERE {" AND ".join(ch_sub_query)}
|
||||
GROUP BY session_id) AS groupped_data
|
||||
WHERE count>0;"""
|
||||
params = {"project_id": project_id, "startTimestamp": startTimestamp, "endTimestamp": endTimestamp,
|
||||
**__get_constraint_values(args)}
|
||||
|
||||
rows = ch.execute(query=ch_query, parameters=params)
|
||||
|
||||
return rows
|
||||
|
||||
|
||||
def __get_user_activity_avg_visited_pages_chart(ch, project_id, startTimestamp, endTimestamp, density=20, **args):
|
||||
step_size = get_step_size(endTimestamp=endTimestamp, startTimestamp=startTimestamp, density=density)
|
||||
ch_sub_query_chart = __get_basic_constraints(table_name="pages", round_start=True, data=args)
|
||||
ch_sub_query_chart.append("pages.event_type='LOCATION'")
|
||||
meta_condition = __get_meta_constraint(args)
|
||||
ch_sub_query_chart += meta_condition
|
||||
|
||||
params = {"step_size": step_size, "project_id": project_id, "startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args)}
|
||||
ch_query = f"""SELECT timestamp, COALESCE(avgOrNull(count), 0) AS value
|
||||
FROM (SELECT toUnixTimestamp(toStartOfInterval(pages.datetime, INTERVAL %(step_size)s second ))*1000 AS timestamp,
|
||||
session_id, COUNT(1) AS count
|
||||
FROM {exp_ch_helper.get_main_events_table(startTimestamp)} AS pages
|
||||
WHERE {" AND ".join(ch_sub_query_chart)}
|
||||
GROUP BY timestamp,session_id
|
||||
ORDER BY timestamp) AS groupped_data
|
||||
WHERE count>0
|
||||
GROUP BY timestamp
|
||||
ORDER BY timestamp;"""
|
||||
rows = ch.execute(query=ch_query, parameters=params)
|
||||
rows = __complete_missing_steps(rows=rows, start_time=startTimestamp,
|
||||
end_time=endTimestamp,
|
||||
density=density, neutral={"value": 0})
|
||||
return rows
|
||||
|
||||
|
||||
def get_top_metrics_count_requests(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
||||
endTimestamp=TimeUTC.now(), value=None, density=20, **args):
|
||||
step_size = get_step_size(endTimestamp=endTimestamp, startTimestamp=startTimestamp, density=density)
|
||||
ch_sub_query_chart = __get_basic_constraints(table_name="pages", round_start=True, data=args)
|
||||
ch_sub_query_chart.append("pages.event_type='LOCATION'")
|
||||
meta_condition = __get_meta_constraint(args)
|
||||
ch_sub_query_chart += meta_condition
|
||||
ch_sub_query = __get_basic_constraints(table_name="pages", data=args)
|
||||
ch_sub_query.append("pages.event_type='LOCATION'")
|
||||
ch_sub_query += meta_condition
|
||||
|
||||
if value is not None:
|
||||
ch_sub_query.append("pages.url_path = %(value)s")
|
||||
ch_sub_query_chart.append("pages.url_path = %(value)s")
|
||||
with ch_client.ClickHouseClient() as ch:
|
||||
ch_query = f"""SELECT COUNT(1) AS value
|
||||
FROM {exp_ch_helper.get_main_events_table(startTimestamp)} AS pages
|
||||
WHERE {" AND ".join(ch_sub_query)};"""
|
||||
params = {"step_size": step_size, "project_id": project_id,
|
||||
"startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp,
|
||||
"value": value, **__get_constraint_values(args)}
|
||||
rows = ch.execute(query=ch_query, parameters=params)
|
||||
result = rows[0]
|
||||
ch_query = f"""SELECT toUnixTimestamp(toStartOfInterval(pages.datetime, INTERVAL %(step_size)s second ))*1000 AS timestamp,
|
||||
COUNT(1) AS value
|
||||
FROM {exp_ch_helper.get_main_events_table(startTimestamp)} AS pages
|
||||
WHERE {" AND ".join(ch_sub_query_chart)}
|
||||
GROUP BY timestamp
|
||||
ORDER BY timestamp;"""
|
||||
params = {**params, **__get_constraint_values(args)}
|
||||
rows = ch.execute(query=ch_query, parameters=params)
|
||||
rows = __complete_missing_steps(rows=rows, start_time=startTimestamp,
|
||||
end_time=endTimestamp,
|
||||
density=density, neutral={"value": 0})
|
||||
result["chart"] = rows
|
||||
result["unit"] = schemas.TemplatePredefinedUnits.COUNT
|
||||
return helper.dict_to_camel_case(result)
|
||||
|
||||
|
||||
def get_unique_users(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
||||
endTimestamp=TimeUTC.now(),
|
||||
density=7, **args):
|
||||
step_size = get_step_size(startTimestamp, endTimestamp, density)
|
||||
ch_sub_query = __get_basic_constraints(table_name="sessions", data=args)
|
||||
ch_sub_query_chart = __get_basic_constraints(table_name="sessions", round_start=True, data=args)
|
||||
meta_condition = __get_meta_constraint(args)
|
||||
ch_sub_query += meta_condition
|
||||
ch_sub_query_chart += meta_condition
|
||||
ch_sub_query_chart.append("isNotNull(sessions.user_id)")
|
||||
ch_sub_query_chart.append("sessions.user_id!=''")
|
||||
with ch_client.ClickHouseClient() as ch:
|
||||
ch_query = f"""\
|
||||
SELECT toUnixTimestamp(toStartOfInterval(sessions.datetime, INTERVAL %(step_size)s second)) * 1000 AS timestamp,
|
||||
COUNT(DISTINCT sessions.user_id) AS value
|
||||
FROM {exp_ch_helper.get_main_sessions_table(startTimestamp)} AS sessions
|
||||
WHERE {" AND ".join(ch_sub_query_chart)}
|
||||
GROUP BY timestamp
|
||||
ORDER BY timestamp;\
|
||||
"""
|
||||
params = {"step_size": step_size, "project_id": project_id, "startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args)}
|
||||
|
||||
rows = ch.execute(query=ch_query, parameters=params)
|
||||
|
||||
results = {
|
||||
"value": sum([r["value"] for r in rows]),
|
||||
"chart": __complete_missing_steps(rows=rows, start_time=startTimestamp, end_time=endTimestamp,
|
||||
density=density,
|
||||
neutral={"value": 0})
|
||||
}
|
||||
|
||||
diff = endTimestamp - startTimestamp
|
||||
endTimestamp = startTimestamp
|
||||
startTimestamp = endTimestamp - diff
|
||||
|
||||
ch_query = f""" SELECT COUNT(DISTINCT user_id) AS count
|
||||
FROM {exp_ch_helper.get_main_sessions_table(startTimestamp)} AS sessions
|
||||
WHERE {" AND ".join(ch_sub_query)};"""
|
||||
params = {"project_id": project_id, "startTimestamp": startTimestamp, "endTimestamp": endTimestamp,
|
||||
**__get_constraint_values(args)}
|
||||
|
||||
count = ch.execute(query=ch_query, parameters=params)
|
||||
|
||||
count = count[0]["count"]
|
||||
|
||||
results["progress"] = helper.__progress(old_val=count, new_val=results["value"])
|
||||
results["unit"] = schemas.TemplatePredefinedUnits.COUNT
|
||||
return results
|
||||
|
||||
|
||||
def get_speed_index_location(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
||||
endTimestamp=TimeUTC.now(), **args):
|
||||
ch_sub_query = __get_basic_constraints(table_name="pages", data=args)
|
||||
ch_sub_query.append("pages.event_type='LOCATION'")
|
||||
ch_sub_query.append("isNotNull(pages.speed_index)")
|
||||
ch_sub_query.append("pages.speed_index>0")
|
||||
meta_condition = __get_meta_constraint(args)
|
||||
ch_sub_query += meta_condition
|
||||
|
||||
with ch_client.ClickHouseClient() as ch:
|
||||
ch_query = f"""SELECT sessions.user_country, COALESCE(avgOrNull(pages.speed_index),0) AS value
|
||||
FROM {exp_ch_helper.get_main_events_table(startTimestamp)} AS pages
|
||||
INNER JOIN {exp_ch_helper.get_main_sessions_table(startTimestamp)} AS sessions USING (session_id)
|
||||
WHERE {" AND ".join(ch_sub_query)}
|
||||
GROUP BY sessions.user_country
|
||||
ORDER BY value ,sessions.user_country;"""
|
||||
params = {"project_id": project_id,
|
||||
"startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args)}
|
||||
rows = ch.execute(query=ch_query, parameters=params)
|
||||
ch_query = f"""SELECT COALESCE(avgOrNull(pages.speed_index),0) AS avg
|
||||
FROM {exp_ch_helper.get_main_events_table(startTimestamp)} AS pages
|
||||
WHERE {" AND ".join(ch_sub_query)};"""
|
||||
avg = ch.execute(query=ch_query, parameters=params)[0]["avg"] if len(rows) > 0 else 0
|
||||
return {"value": avg, "chart": helper.list_to_camel_case(rows), "unit": schemas.TemplatePredefinedUnits.MILLISECOND}
|
||||
|
|
@ -5,8 +5,8 @@ from decouple import config
|
|||
logger = logging.getLogger(__name__)
|
||||
|
||||
if config("EXP_METRICS", cast=bool, default=False):
|
||||
from chalicelib.core.sessions import sessions_ch as sessions
|
||||
import chalicelib.core.sessions.sessions_ch as sessions
|
||||
else:
|
||||
from chalicelib.core.sessions import sessions
|
||||
import chalicelib.core.sessions.sessions_pg as sessions
|
||||
|
||||
from chalicelib.core.sessions import sessions_mobs
|
||||
|
|
|
|||
|
|
@ -175,7 +175,7 @@ def get_simple_funnel(filter_d: schemas.CardSeriesFilterSchema, project: schemas
|
|||
value_key=e_k
|
||||
) if not specific_condition else specific_condition)
|
||||
|
||||
full_args = {"eventTypes": tuple(event_types), **full_args, **values}
|
||||
full_args = {"eventTypes": event_types, **full_args, **values}
|
||||
n_stages = len(n_stages_query)
|
||||
if n_stages == 0:
|
||||
return []
|
||||
|
|
@ -243,7 +243,7 @@ def get_simple_funnel(filter_d: schemas.CardSeriesFilterSchema, project: schemas
|
|||
logger.debug(query)
|
||||
logger.debug("---------------------------------------------------")
|
||||
try:
|
||||
row = cur.execute(query)
|
||||
row = cur.execute(query=query)
|
||||
except Exception as err:
|
||||
logger.warning("--------- SIMPLE FUNNEL SEARCH QUERY EXCEPTION CH-----------")
|
||||
logger.warning(query)
|
||||
|
|
|
|||
10
api/chalicelib/core/metrics/product_analytics/__init__.py
Normal file
10
api/chalicelib/core/metrics/product_analytics/__init__.py
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
import logging
|
||||
|
||||
from decouple import config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
if config("EXP_METRICS", cast=bool, default=False):
|
||||
logger.info(">>> Using experimental product-analytics")
|
||||
from .product_analytics_ch import *
|
||||
else:
|
||||
from .product_analytics import *
|
||||
|
|
@ -89,6 +89,9 @@ def __transform_journey(rows, reverse_path=False):
|
|||
link["target"] = sr_idx
|
||||
links.append(link)
|
||||
|
||||
if reverse_path:
|
||||
for n in nodes_values:
|
||||
n["depth"] = max_depth - n["depth"]
|
||||
return {"nodes": nodes_values,
|
||||
"links": sorted(links, key=lambda x: (x["source"], x["target"]), reverse=False)}
|
||||
|
||||
|
|
@ -1,42 +1,99 @@
|
|||
from typing import List
|
||||
|
||||
import schemas
|
||||
from chalicelib.core.metrics.metrics_ch import __get_basic_constraints, __get_meta_constraint
|
||||
from chalicelib.core.metrics.metrics_ch import __get_constraint_values, __complete_missing_steps
|
||||
from chalicelib.utils import ch_client, exp_ch_helper
|
||||
from chalicelib.utils import helper, dev
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.utils import sql_helper as sh
|
||||
from chalicelib.core import metadata
|
||||
import logging
|
||||
from time import time
|
||||
|
||||
import logging
|
||||
from chalicelib.core.metrics.product_analytics import __transform_journey
|
||||
import schemas
|
||||
from chalicelib.core import metadata
|
||||
from .product_analytics import __transform_journey
|
||||
from chalicelib.utils import ch_client, exp_ch_helper
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils import sql_helper as sh
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.utils.metrics_helper import get_step_size
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
JOURNEY_TYPES = {
|
||||
schemas.ProductAnalyticsSelectedEventType.LOCATION: {"eventType": "LOCATION", "column": "url_path"},
|
||||
schemas.ProductAnalyticsSelectedEventType.CLICK: {"eventType": "CLICK", "column": "label"},
|
||||
schemas.ProductAnalyticsSelectedEventType.INPUT: {"eventType": "INPUT", "column": "label"},
|
||||
schemas.ProductAnalyticsSelectedEventType.CUSTOM_EVENT: {"eventType": "CUSTOM", "column": "name"}
|
||||
schemas.ProductAnalyticsSelectedEventType.LOCATION: {"eventType": "LOCATION", "column": "`$properties`.url_path"},
|
||||
schemas.ProductAnalyticsSelectedEventType.CLICK: {"eventType": "CLICK", "column": "`$properties`.label"},
|
||||
schemas.ProductAnalyticsSelectedEventType.INPUT: {"eventType": "INPUT", "column": "`$properties`.label"},
|
||||
schemas.ProductAnalyticsSelectedEventType.CUSTOM_EVENT: {"eventType": "CUSTOM", "column": "`$properties`.name"}
|
||||
}
|
||||
|
||||
|
||||
def __get_basic_constraints_events(table_name=None, identifier="project_id"):
|
||||
if table_name:
|
||||
table_name += "."
|
||||
else:
|
||||
table_name = ""
|
||||
ch_sub_query = [f"{table_name}{identifier} =toUInt16(%({identifier})s)"]
|
||||
ch_sub_query.append(f"{table_name}created_at >= toDateTime(%(startTimestamp)s/1000)")
|
||||
ch_sub_query.append(f"{table_name}created_at < toDateTime(%(endTimestamp)s/1000)")
|
||||
return ch_sub_query
|
||||
|
||||
|
||||
def __frange(start, stop, step):
|
||||
result = []
|
||||
i = start
|
||||
while i < stop:
|
||||
result.append(i)
|
||||
i += step
|
||||
return result
|
||||
|
||||
|
||||
def __add_missing_keys(original, complete):
|
||||
for missing in [key for key in complete.keys() if key not in original.keys()]:
|
||||
original[missing] = complete[missing]
|
||||
return original
|
||||
|
||||
|
||||
def __complete_missing_steps(start_time, end_time, density, neutral, rows, time_key="timestamp", time_coefficient=1000):
|
||||
if len(rows) == density:
|
||||
return rows
|
||||
step = get_step_size(start_time, end_time, density, decimal=True)
|
||||
optimal = [(int(i * time_coefficient), int((i + step) * time_coefficient)) for i in
|
||||
__frange(start_time // time_coefficient, end_time // time_coefficient, step)]
|
||||
result = []
|
||||
r = 0
|
||||
o = 0
|
||||
for i in range(density):
|
||||
neutral_clone = dict(neutral)
|
||||
for k in neutral_clone.keys():
|
||||
if callable(neutral_clone[k]):
|
||||
neutral_clone[k] = neutral_clone[k]()
|
||||
if r < len(rows) and len(result) + len(rows) - r == density:
|
||||
result += rows[r:]
|
||||
break
|
||||
if r < len(rows) and o < len(optimal) and rows[r][time_key] < optimal[o][0]:
|
||||
# complete missing keys in original object
|
||||
rows[r] = __add_missing_keys(original=rows[r], complete=neutral_clone)
|
||||
result.append(rows[r])
|
||||
r += 1
|
||||
elif r < len(rows) and o < len(optimal) and optimal[o][0] <= rows[r][time_key] < optimal[o][1]:
|
||||
# complete missing keys in original object
|
||||
rows[r] = __add_missing_keys(original=rows[r], complete=neutral_clone)
|
||||
result.append(rows[r])
|
||||
r += 1
|
||||
o += 1
|
||||
else:
|
||||
neutral_clone[time_key] = optimal[o][0]
|
||||
result.append(neutral_clone)
|
||||
o += 1
|
||||
return result
|
||||
|
||||
|
||||
# startPoints are computed before ranked_events to reduce the number of window functions over rows
|
||||
# compute avg_time_from_previous at the same level as sessions_count (this was removed in v1.22)
|
||||
# if start-point is selected, the selected event is ranked n°1
|
||||
def path_analysis(project_id: int, data: schemas.CardPathAnalysis):
|
||||
# # This code is used for testing only
|
||||
# return __get_test_data()
|
||||
|
||||
# ------ end of testing code ---
|
||||
if not data.hide_excess:
|
||||
data.hide_excess = True
|
||||
data.rows = 50
|
||||
sub_events = []
|
||||
start_points_conditions = []
|
||||
step_0_conditions = []
|
||||
step_1_post_conditions = ["event_number_in_session <= %(density)s"]
|
||||
|
||||
q2_extra_col = None
|
||||
q2_extra_condition = None
|
||||
if len(data.metric_value) == 0:
|
||||
data.metric_value.append(schemas.ProductAnalyticsSelectedEventType.LOCATION)
|
||||
sub_events.append({"column": JOURNEY_TYPES[schemas.ProductAnalyticsSelectedEventType.LOCATION]["column"],
|
||||
|
|
@ -49,9 +106,18 @@ def path_analysis(project_id: int, data: schemas.CardPathAnalysis):
|
|||
sub_events.append({"column": JOURNEY_TYPES[s.type]["column"],
|
||||
"eventType": JOURNEY_TYPES[s.type]["eventType"]})
|
||||
step_1_post_conditions.append(
|
||||
f"(event_type='{JOURNEY_TYPES[s.type]["eventType"]}' AND event_number_in_session = 1 \
|
||||
OR event_type!='{JOURNEY_TYPES[s.type]["eventType"]}' AND event_number_in_session > 1)")
|
||||
f"(`$event_name`='{JOURNEY_TYPES[s.type]['eventType']}' AND event_number_in_session = 1 \
|
||||
OR `$event_name`!='{JOURNEY_TYPES[s.type]['eventType']}' AND event_number_in_session > 1)")
|
||||
extra_metric_values.append(s.type)
|
||||
if not q2_extra_col:
|
||||
# This is used in case start event has different type of the visible event,
|
||||
# because it causes intermediary events to be removed, so you find a jump from step-0 to step-3
|
||||
# because step-2 is not of a visible event
|
||||
q2_extra_col = """,leadInFrame(toNullable(event_number_in_session))
|
||||
OVER (PARTITION BY session_id ORDER BY created_at %s
|
||||
ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS next_event_number_in_session"""
|
||||
q2_extra_condition = """WHERE event_number_in_session + 1 = next_event_number_in_session
|
||||
OR isNull(next_event_number_in_session);"""
|
||||
data.metric_value += extra_metric_values
|
||||
|
||||
for v in data.metric_value:
|
||||
|
|
@ -63,7 +129,7 @@ def path_analysis(project_id: int, data: schemas.CardPathAnalysis):
|
|||
main_column = sub_events[0]['column']
|
||||
else:
|
||||
main_column = f"multiIf(%s,%s)" % (
|
||||
','.join([f"event_type='{s['eventType']}',{s['column']}" for s in sub_events[:-1]]),
|
||||
','.join([f"`$event_name`='{s['eventType']}',{s['column']}" for s in sub_events[:-1]]),
|
||||
sub_events[-1]["column"])
|
||||
extra_values = {}
|
||||
reverse = data.start_type == "end"
|
||||
|
|
@ -76,19 +142,19 @@ def path_analysis(project_id: int, data: schemas.CardPathAnalysis):
|
|||
event_type = JOURNEY_TYPES[sf.type]['eventType']
|
||||
extra_values = {**extra_values, **sh.multi_values(sf.value, value_key=f_k),
|
||||
f"start_event_type_{i}": event_type}
|
||||
start_points_conditions.append(f"(event_type=%(start_event_type_{i})s AND " +
|
||||
start_points_conditions.append(f"(`$event_name`=%(start_event_type_{i})s AND " +
|
||||
sh.multi_conditions(f'{event_column} {op} %({f_k})s', sf.value, is_not=is_not,
|
||||
value_key=f_k)
|
||||
+ ")")
|
||||
step_0_conditions.append(f"(event_type=%(start_event_type_{i})s AND " +
|
||||
step_0_conditions.append(f"(`$event_name`=%(start_event_type_{i})s AND " +
|
||||
sh.multi_conditions(f'e_value {op} %({f_k})s', sf.value, is_not=is_not,
|
||||
value_key=f_k)
|
||||
+ ")")
|
||||
if len(start_points_conditions) > 0:
|
||||
start_points_conditions = ["(" + " OR ".join(start_points_conditions) + ")",
|
||||
"events.project_id = toUInt16(%(project_id)s)",
|
||||
"events.datetime >= toDateTime(%(startTimestamp)s / 1000)",
|
||||
"events.datetime < toDateTime(%(endTimestamp)s / 1000)"]
|
||||
"events.created_at >= toDateTime(%(startTimestamp)s / 1000)",
|
||||
"events.created_at < toDateTime(%(endTimestamp)s / 1000)"]
|
||||
step_0_conditions = ["(" + " OR ".join(step_0_conditions) + ")",
|
||||
"pre_ranked_events.event_number_in_session = 1"]
|
||||
|
||||
|
|
@ -277,10 +343,11 @@ def path_analysis(project_id: int, data: schemas.CardPathAnalysis):
|
|||
else:
|
||||
path_direction = ""
|
||||
|
||||
ch_sub_query = __get_basic_constraints(table_name="events")
|
||||
# ch_sub_query = __get_basic_constraints(table_name="events")
|
||||
ch_sub_query = __get_basic_constraints_events(table_name="events")
|
||||
selected_event_type_sub_query = []
|
||||
for s in data.metric_value:
|
||||
selected_event_type_sub_query.append(f"events.event_type = '{JOURNEY_TYPES[s]['eventType']}'")
|
||||
selected_event_type_sub_query.append(f"events.`$event_name` = '{JOURNEY_TYPES[s]['eventType']}'")
|
||||
if s in exclusions:
|
||||
selected_event_type_sub_query[-1] += " AND (" + " AND ".join(exclusions[s]) + ")"
|
||||
selected_event_type_sub_query = " OR ".join(selected_event_type_sub_query)
|
||||
|
|
@ -303,14 +370,14 @@ def path_analysis(project_id: int, data: schemas.CardPathAnalysis):
|
|||
|
||||
if len(start_points_conditions) == 0:
|
||||
step_0_subquery = """SELECT DISTINCT session_id
|
||||
FROM (SELECT event_type, e_value
|
||||
FROM (SELECT `$event_name`, e_value
|
||||
FROM pre_ranked_events
|
||||
WHERE event_number_in_session = 1
|
||||
GROUP BY event_type, e_value
|
||||
GROUP BY `$event_name`, e_value
|
||||
ORDER BY count(1) DESC
|
||||
LIMIT 1) AS top_start_events
|
||||
INNER JOIN pre_ranked_events
|
||||
ON (top_start_events.event_type = pre_ranked_events.event_type AND
|
||||
ON (top_start_events.`$event_name` = pre_ranked_events.`$event_name` AND
|
||||
top_start_events.e_value = pre_ranked_events.e_value)
|
||||
WHERE pre_ranked_events.event_number_in_session = 1"""
|
||||
initial_event_cte = ""
|
||||
|
|
@ -319,11 +386,11 @@ def path_analysis(project_id: int, data: schemas.CardPathAnalysis):
|
|||
FROM pre_ranked_events
|
||||
WHERE {" AND ".join(step_0_conditions)}"""
|
||||
initial_event_cte = f"""\
|
||||
initial_event AS (SELECT events.session_id, MIN(datetime) AS start_event_timestamp
|
||||
initial_event AS (SELECT events.session_id, MIN(created_at) AS start_event_timestamp
|
||||
FROM {main_events_table} {"INNER JOIN sub_sessions USING (session_id)" if len(sessions_conditions) > 0 else ""}
|
||||
WHERE {" AND ".join(start_points_conditions)}
|
||||
GROUP BY 1),"""
|
||||
ch_sub_query.append("events.datetime>=initial_event.start_event_timestamp")
|
||||
ch_sub_query.append(f"events.created_at{'<=' if reverse else '>='}initial_event.start_event_timestamp")
|
||||
main_events_table += " INNER JOIN initial_event ON (events.session_id = initial_event.session_id)"
|
||||
sessions_conditions = []
|
||||
|
||||
|
|
@ -334,21 +401,21 @@ def path_analysis(project_id: int, data: schemas.CardPathAnalysis):
|
|||
top_query = []
|
||||
top_with_next_query = []
|
||||
other_query = []
|
||||
for i in range(1, data.density + (0 if data.hide_excess else 1)):
|
||||
for i in range(1, data.density + (1 if data.hide_excess else 0)):
|
||||
steps_query.append(f"""n{i} AS (SELECT event_number_in_session,
|
||||
event_type,
|
||||
`$event_name`,
|
||||
e_value,
|
||||
next_type,
|
||||
next_value,
|
||||
COUNT(1) AS sessions_count
|
||||
FROM ranked_events
|
||||
WHERE event_number_in_session = {i}
|
||||
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
|
||||
GROUP BY event_number_in_session, `$event_name`, e_value, next_type, next_value
|
||||
ORDER BY sessions_count DESC)""")
|
||||
if data.hide_excess:
|
||||
if not data.hide_excess:
|
||||
projection_query.append(f"""\
|
||||
SELECT event_number_in_session,
|
||||
event_type,
|
||||
`$event_name`,
|
||||
e_value,
|
||||
next_type,
|
||||
next_value,
|
||||
|
|
@ -358,37 +425,37 @@ def path_analysis(project_id: int, data: schemas.CardPathAnalysis):
|
|||
else:
|
||||
top_query.append(f"""\
|
||||
SELECT event_number_in_session,
|
||||
event_type,
|
||||
`$event_name`,
|
||||
e_value,
|
||||
SUM(sessions_count) AS sessions_count
|
||||
SUM(n{i}.sessions_count) AS sessions_count
|
||||
FROM n{i}
|
||||
GROUP BY event_number_in_session, event_type, e_value
|
||||
GROUP BY event_number_in_session, `$event_name`, e_value
|
||||
ORDER BY sessions_count DESC
|
||||
LIMIT %(visibleRows)s""")
|
||||
|
||||
if i < data.density:
|
||||
drop_query.append(f"""SELECT event_number_in_session,
|
||||
event_type,
|
||||
`$event_name`,
|
||||
e_value,
|
||||
'DROP' AS next_type,
|
||||
NULL AS next_value,
|
||||
sessions_count
|
||||
FROM n{i}
|
||||
WHERE isNull(n{i}.next_type)""")
|
||||
if not data.hide_excess:
|
||||
if data.hide_excess:
|
||||
top_with_next_query.append(f"""\
|
||||
SELECT n{i}.*
|
||||
FROM n{i}
|
||||
INNER JOIN top_n
|
||||
ON (n{i}.event_number_in_session = top_n.event_number_in_session
|
||||
AND n{i}.event_type = top_n.event_type
|
||||
AND n{i}.`$event_name` = top_n.`$event_name`
|
||||
AND n{i}.e_value = top_n.e_value)""")
|
||||
|
||||
if i > 1 and not data.hide_excess:
|
||||
if i > 1 and data.hide_excess:
|
||||
other_query.append(f"""SELECT n{i}.*
|
||||
FROM n{i}
|
||||
WHERE (event_number_in_session, event_type, e_value) NOT IN
|
||||
(SELECT event_number_in_session, event_type, e_value
|
||||
WHERE (event_number_in_session, `$event_name`, e_value) NOT IN
|
||||
(SELECT event_number_in_session, `$event_name`, e_value
|
||||
FROM top_n
|
||||
WHERE top_n.event_number_in_session = {i})""")
|
||||
|
||||
|
|
@ -406,12 +473,12 @@ WITH {initial_sessions_cte}
|
|||
{initial_event_cte}
|
||||
pre_ranked_events AS (SELECT *
|
||||
FROM (SELECT session_id,
|
||||
event_type,
|
||||
datetime,
|
||||
{main_column} AS e_value,
|
||||
`$event_name`,
|
||||
created_at,
|
||||
toString({main_column}) AS e_value,
|
||||
row_number() OVER (PARTITION BY session_id
|
||||
ORDER BY datetime {path_direction},
|
||||
message_id {path_direction} ) AS event_number_in_session
|
||||
ORDER BY created_at {path_direction},
|
||||
event_id {path_direction} ) AS event_number_in_session
|
||||
FROM {main_events_table} {"INNER JOIN sub_sessions ON (sub_sessions.session_id = events.session_id)" if len(sessions_conditions) > 0 else ""}
|
||||
WHERE {" AND ".join(ch_sub_query)}
|
||||
) AS full_ranked_events
|
||||
|
|
@ -419,10 +486,11 @@ WITH {initial_sessions_cte}
|
|||
SELECT *
|
||||
FROM pre_ranked_events;"""
|
||||
logger.debug("---------Q1-----------")
|
||||
ch.execute(query=ch_query1, parameters=params)
|
||||
ch_query1 = ch.format(query=ch_query1, parameters=params)
|
||||
ch.execute(query=ch_query1)
|
||||
if time() - _now > 2:
|
||||
logger.warning(f">>>>>>>>>PathAnalysis long query EE ({int(time() - _now)}s)<<<<<<<<<")
|
||||
logger.warning(ch.format(query=ch_query1, parameters=params))
|
||||
logger.warning(str.encode(ch_query1))
|
||||
logger.warning("----------------------")
|
||||
_now = time()
|
||||
|
||||
|
|
@ -433,65 +501,68 @@ WITH pre_ranked_events AS (SELECT *
|
|||
start_points AS ({step_0_subquery}),
|
||||
ranked_events AS (SELECT pre_ranked_events.*,
|
||||
leadInFrame(e_value)
|
||||
OVER (PARTITION BY session_id ORDER BY datetime {path_direction}
|
||||
OVER (PARTITION BY session_id ORDER BY created_at {path_direction}
|
||||
ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS next_value,
|
||||
leadInFrame(toNullable(event_type))
|
||||
OVER (PARTITION BY session_id ORDER BY datetime {path_direction}
|
||||
leadInFrame(toNullable(`$event_name`))
|
||||
OVER (PARTITION BY session_id ORDER BY created_at {path_direction}
|
||||
ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS next_type
|
||||
{q2_extra_col % path_direction if q2_extra_col else ""}
|
||||
FROM start_points INNER JOIN pre_ranked_events USING (session_id))
|
||||
SELECT *
|
||||
FROM ranked_events;"""
|
||||
FROM ranked_events
|
||||
{q2_extra_condition if q2_extra_condition else ""};"""
|
||||
logger.debug("---------Q2-----------")
|
||||
ch.execute(query=ch_query2, parameters=params)
|
||||
ch_query2 = ch.format(query=ch_query2, parameters=params)
|
||||
ch.execute(query=ch_query2)
|
||||
if time() - _now > 2:
|
||||
logger.warning(f">>>>>>>>>PathAnalysis long query EE ({int(time() - _now)}s)<<<<<<<<<")
|
||||
logger.warning(ch.format(query=ch_query2, parameters=params))
|
||||
logger.warning(str.encode(ch_query2))
|
||||
logger.warning("----------------------")
|
||||
_now = time()
|
||||
|
||||
sub_cte = ""
|
||||
if not data.hide_excess:
|
||||
if data.hide_excess:
|
||||
sub_cte = f""",
|
||||
top_n AS ({"\nUNION ALL\n".join(top_query)}),
|
||||
top_n_with_next AS ({"\nUNION ALL\n".join(top_with_next_query)}),
|
||||
others_n AS ({"\nUNION ALL\n".join(other_query)})"""
|
||||
top_n AS ({" UNION ALL ".join(top_query)}),
|
||||
top_n_with_next AS ({" UNION ALL ".join(top_with_next_query)}),
|
||||
others_n AS ({" UNION ALL ".join(other_query)})"""
|
||||
projection_query = """\
|
||||
-- Top to Top: valid
|
||||
SELECT top_n_with_next.*
|
||||
FROM top_n_with_next
|
||||
INNER JOIN top_n
|
||||
ON (top_n_with_next.event_number_in_session + 1 = top_n.event_number_in_session
|
||||
AND top_n_with_next.next_type = top_n.event_type
|
||||
AND top_n_with_next.next_type = top_n.`$event_name`
|
||||
AND top_n_with_next.next_value = top_n.e_value)
|
||||
UNION ALL
|
||||
-- Top to Others: valid
|
||||
SELECT top_n_with_next.event_number_in_session,
|
||||
top_n_with_next.event_type,
|
||||
top_n_with_next.`$event_name`,
|
||||
top_n_with_next.e_value,
|
||||
'OTHER' AS next_type,
|
||||
NULL AS next_value,
|
||||
SUM(top_n_with_next.sessions_count) AS sessions_count
|
||||
FROM top_n_with_next
|
||||
WHERE (top_n_with_next.event_number_in_session + 1, top_n_with_next.next_type, top_n_with_next.next_value) IN
|
||||
(SELECT others_n.event_number_in_session, others_n.event_type, others_n.e_value FROM others_n)
|
||||
GROUP BY top_n_with_next.event_number_in_session, top_n_with_next.event_type, top_n_with_next.e_value
|
||||
(SELECT others_n.event_number_in_session, others_n.`$event_name`, others_n.e_value FROM others_n)
|
||||
GROUP BY top_n_with_next.event_number_in_session, top_n_with_next.`$event_name`, top_n_with_next.e_value
|
||||
UNION ALL
|
||||
-- Top go to Drop: valid
|
||||
SELECT drop_n.event_number_in_session,
|
||||
drop_n.event_type,
|
||||
drop_n.`$event_name`,
|
||||
drop_n.e_value,
|
||||
drop_n.next_type,
|
||||
drop_n.next_value,
|
||||
drop_n.sessions_count
|
||||
FROM drop_n
|
||||
INNER JOIN top_n ON (drop_n.event_number_in_session = top_n.event_number_in_session
|
||||
AND drop_n.event_type = top_n.event_type
|
||||
AND drop_n.`$event_name` = top_n.`$event_name`
|
||||
AND drop_n.e_value = top_n.e_value)
|
||||
ORDER BY drop_n.event_number_in_session
|
||||
UNION ALL
|
||||
-- Others got to Drop: valid
|
||||
SELECT others_n.event_number_in_session,
|
||||
'OTHER' AS event_type,
|
||||
'OTHER' AS `$event_name`,
|
||||
NULL AS e_value,
|
||||
'DROP' AS next_type,
|
||||
NULL AS next_value,
|
||||
|
|
@ -503,7 +574,7 @@ FROM ranked_events;"""
|
|||
UNION ALL
|
||||
-- Others got to Top:valid
|
||||
SELECT others_n.event_number_in_session,
|
||||
'OTHER' AS event_type,
|
||||
'OTHER' AS `$event_name`,
|
||||
NULL AS e_value,
|
||||
others_n.next_type,
|
||||
others_n.next_value,
|
||||
|
|
@ -511,49 +582,55 @@ FROM ranked_events;"""
|
|||
FROM others_n
|
||||
WHERE isNotNull(others_n.next_type)
|
||||
AND (others_n.event_number_in_session + 1, others_n.next_type, others_n.next_value) IN
|
||||
(SELECT top_n.event_number_in_session, top_n.event_type, top_n.e_value FROM top_n)
|
||||
(SELECT top_n.event_number_in_session, top_n.`$event_name`, top_n.e_value FROM top_n)
|
||||
GROUP BY others_n.event_number_in_session, others_n.next_type, others_n.next_value
|
||||
UNION ALL
|
||||
-- Others got to Others
|
||||
SELECT others_n.event_number_in_session,
|
||||
'OTHER' AS event_type,
|
||||
'OTHER' AS `$event_name`,
|
||||
NULL AS e_value,
|
||||
'OTHER' AS next_type,
|
||||
NULL AS next_value,
|
||||
SUM(sessions_count) AS sessions_count
|
||||
SUM(others_n.sessions_count) AS sessions_count
|
||||
FROM others_n
|
||||
WHERE isNotNull(others_n.next_type)
|
||||
AND others_n.event_number_in_session < %(density)s
|
||||
AND (others_n.event_number_in_session + 1, others_n.next_type, others_n.next_value) NOT IN
|
||||
(SELECT event_number_in_session, event_type, e_value FROM top_n)
|
||||
(SELECT event_number_in_session, `$event_name`, e_value FROM top_n)
|
||||
GROUP BY others_n.event_number_in_session"""
|
||||
else:
|
||||
projection_query.append("""\
|
||||
SELECT event_number_in_session,
|
||||
event_type,
|
||||
`$event_name`,
|
||||
e_value,
|
||||
next_type,
|
||||
next_value,
|
||||
sessions_count
|
||||
FROM drop_n""")
|
||||
projection_query = "\nUNION ALL\n".join(projection_query)
|
||||
projection_query = " UNION ALL ".join(projection_query)
|
||||
|
||||
ch_query3 = f"""\
|
||||
WITH ranked_events AS (SELECT *
|
||||
FROM ranked_events_{time_key}),
|
||||
{",\n".join(steps_query)},
|
||||
drop_n AS ({"\nUNION ALL\n".join(drop_query)})
|
||||
{", ".join(steps_query)},
|
||||
drop_n AS ({" UNION ALL ".join(drop_query)})
|
||||
{sub_cte}
|
||||
SELECT *
|
||||
SELECT event_number_in_session,
|
||||
`$event_name` AS event_type,
|
||||
e_value,
|
||||
next_type,
|
||||
next_value,
|
||||
sessions_count
|
||||
FROM (
|
||||
{projection_query}
|
||||
) AS chart_steps
|
||||
ORDER BY event_number_in_session, sessions_count DESC;"""
|
||||
logger.debug("---------Q3-----------")
|
||||
rows = ch.execute(query=ch_query3, parameters=params)
|
||||
ch_query3 = ch.format(query=ch_query3, parameters=params)
|
||||
rows = ch.execute(query=ch_query3)
|
||||
if time() - _now > 2:
|
||||
logger.warning(f">>>>>>>>>PathAnalysis long query EE ({int(time() - _now)}s)<<<<<<<<<")
|
||||
logger.warning(ch.format(query=ch_query3, parameters=params))
|
||||
logger.warning(str.encode(ch_query3))
|
||||
logger.warning("----------------------")
|
||||
|
||||
return __transform_journey(rows=rows, reverse_path=reverse)
|
||||
|
|
@ -1,389 +0,0 @@
|
|||
from typing import List
|
||||
|
||||
import schemas
|
||||
from chalicelib.core.metrics.metrics_ch import __get_basic_constraints, __get_basic_constraints_events
|
||||
from chalicelib.core.metrics.metrics_ch import __get_constraint_values, __complete_missing_steps
|
||||
from chalicelib.utils import ch_client, exp_ch_helper
|
||||
from chalicelib.utils import helper, dev
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.utils import sql_helper as sh
|
||||
from chalicelib.core import metadata
|
||||
from time import time
|
||||
|
||||
import logging
|
||||
from chalicelib.core.metrics.product_analytics import __transform_journey
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
JOURNEY_TYPES = {
|
||||
schemas.ProductAnalyticsSelectedEventType.LOCATION: {
|
||||
"eventType": "LOCATION",
|
||||
"column": "JSON_VALUE(CAST(`$properties` AS String), '$.url_path')",
|
||||
},
|
||||
schemas.ProductAnalyticsSelectedEventType.CLICK: {
|
||||
"eventType": "LOCATION",
|
||||
"column": "JSON_VALUE(CAST(`$properties` AS String), '$.label')",
|
||||
},
|
||||
schemas.ProductAnalyticsSelectedEventType.INPUT: {
|
||||
"eventType": "LOCATION",
|
||||
"column": "JSON_VALUE(CAST(`$properties` AS String), '$.label')",
|
||||
},
|
||||
schemas.ProductAnalyticsSelectedEventType.CUSTOM_EVENT: {
|
||||
"eventType": "LOCATION",
|
||||
"column": "JSON_VALUE(CAST(`$properties` AS String), '$.name')",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
# Q6: use events as a sub_query to support filter of materialized columns when doing a join
|
||||
# query: Q5, the result is correct,
|
||||
# startPoints are computed before ranked_events to reduce the number of window functions over rows
|
||||
# replaced time_to_target by time_from_previous
|
||||
# compute avg_time_from_previous at the same level as sessions_count (this was removed in v1.22)
|
||||
# sort by top 5 according to sessions_count at the CTE level
|
||||
# final part project data without grouping
|
||||
# if start-point is selected, the selected event is ranked n°1
|
||||
def path_analysis(project_id: int, data: schemas.CardPathAnalysis):
|
||||
sub_events = []
|
||||
start_points_conditions = []
|
||||
step_0_conditions = []
|
||||
step_1_post_conditions = ["event_number_in_session <= %(density)s"]
|
||||
|
||||
if len(data.metric_value) == 0:
|
||||
data.metric_value.append(schemas.ProductAnalyticsSelectedEventType.LOCATION)
|
||||
sub_events.append({"column": JOURNEY_TYPES[schemas.ProductAnalyticsSelectedEventType.LOCATION]["column"],
|
||||
"eventType": schemas.ProductAnalyticsSelectedEventType.LOCATION.value})
|
||||
else:
|
||||
if len(data.start_point) > 0:
|
||||
extra_metric_values = []
|
||||
for s in data.start_point:
|
||||
if s.type not in data.metric_value:
|
||||
sub_events.append({"column": JOURNEY_TYPES[s.type]["column"],
|
||||
"eventType": JOURNEY_TYPES[s.type]["eventType"]})
|
||||
step_1_post_conditions.append(
|
||||
f"(`$event_name`!='{JOURNEY_TYPES[s.type]["eventType"]}' OR event_number_in_session = 1)")
|
||||
extra_metric_values.append(s.type)
|
||||
data.metric_value += extra_metric_values
|
||||
|
||||
for v in data.metric_value:
|
||||
if JOURNEY_TYPES.get(v):
|
||||
sub_events.append({"column": JOURNEY_TYPES[v]["column"],
|
||||
"eventType": JOURNEY_TYPES[v]["eventType"]})
|
||||
|
||||
if len(sub_events) == 1:
|
||||
main_column = sub_events[0]['column']
|
||||
else:
|
||||
main_column = f"multiIf(%s,%s)" % (
|
||||
','.join([f"`$event_name`='{s['eventType']}',{s['column']}" for s in sub_events[:-1]]),
|
||||
sub_events[-1]["column"])
|
||||
extra_values = {}
|
||||
reverse = data.start_type == "end"
|
||||
for i, sf in enumerate(data.start_point):
|
||||
f_k = f"start_point_{i}"
|
||||
op = sh.get_sql_operator(sf.operator)
|
||||
sf.value = helper.values_for_operator(value=sf.value, op=sf.operator)
|
||||
is_not = sh.is_negation_operator(sf.operator)
|
||||
event_column = JOURNEY_TYPES[sf.type]['column']
|
||||
event_type = JOURNEY_TYPES[sf.type]['eventType']
|
||||
extra_values = {**extra_values, **sh.multi_values(sf.value, value_key=f_k),
|
||||
f"start_event_type_{i}": event_type}
|
||||
start_points_conditions.append(f"(`$event_name`=%(start_event_type_{i})s AND " +
|
||||
sh.multi_conditions(f'{event_column} {op} %({f_k})s', sf.value, is_not=is_not,
|
||||
value_key=f_k)
|
||||
+ ")")
|
||||
step_0_conditions.append(f"(`$event_name`=%(start_event_type_{i})s AND " +
|
||||
sh.multi_conditions(f'e_value {op} %({f_k})s', sf.value, is_not=is_not,
|
||||
value_key=f_k)
|
||||
+ ")")
|
||||
if len(start_points_conditions) > 0:
|
||||
start_points_conditions = ["(" + " OR ".join(start_points_conditions) + ")",
|
||||
"events.project_id = toUInt16(%(project_id)s)",
|
||||
"events.created_at >= toDateTime(%(startTimestamp)s / 1000)",
|
||||
"events.created_at < toDateTime(%(endTimestamp)s / 1000)"]
|
||||
step_0_conditions = ["(" + " OR ".join(step_0_conditions) + ")",
|
||||
"pre_ranked_events.event_number_in_session = 1"]
|
||||
|
||||
exclusions = {}
|
||||
for i, ef in enumerate(data.excludes):
|
||||
if len(ef.value) == 0:
|
||||
continue
|
||||
if ef.type in data.metric_value:
|
||||
f_k = f"exclude_{i}"
|
||||
ef.value = helper.values_for_operator(value=ef.value, op=ef.operator)
|
||||
op = sh.get_sql_operator(ef.operator)
|
||||
op = sh.reverse_sql_operator(op)
|
||||
extra_values = {**extra_values, **sh.multi_values(ef.value, value_key=f_k)}
|
||||
exclusions[ef.type] = [
|
||||
sh.multi_conditions(f'{JOURNEY_TYPES[ef.type]["column"]} {op} %({f_k})s', ef.value, is_not=True,
|
||||
value_key=f_k)]
|
||||
|
||||
sessions_conditions = []
|
||||
meta_keys = None
|
||||
for i, f in enumerate(data.series[0].filter.filters):
|
||||
op = sh.get_sql_operator(f.operator)
|
||||
is_any = sh.isAny_opreator(f.operator)
|
||||
is_not = sh.is_negation_operator(f.operator)
|
||||
is_undefined = sh.isUndefined_operator(f.operator)
|
||||
f_k = f"f_value_{i}"
|
||||
extra_values = {**extra_values, **sh.multi_values(f.value, value_key=f_k)}
|
||||
|
||||
if not is_any and len(f.value) == 0:
|
||||
continue
|
||||
|
||||
process_filter(f, is_any, is_not, is_undefined, op, f_k, sessions_conditions, extra_values, meta_keys,
|
||||
project_id)
|
||||
|
||||
if reverse:
|
||||
path_direction = "DESC"
|
||||
else:
|
||||
path_direction = ""
|
||||
|
||||
ch_sub_query = __get_basic_constraints_events(table_name="events")
|
||||
selected_event_type_sub_query = []
|
||||
for s in data.metric_value:
|
||||
selected_event_type_sub_query.append(f"events.`$event_name` = '{JOURNEY_TYPES[s]['eventType']}'")
|
||||
if s in exclusions:
|
||||
selected_event_type_sub_query[-1] += " AND (" + " AND ".join(exclusions[s]) + ")"
|
||||
selected_event_type_sub_query = " OR ".join(selected_event_type_sub_query)
|
||||
ch_sub_query.append(f"({selected_event_type_sub_query})")
|
||||
|
||||
main_events_table = exp_ch_helper.get_main_events_table(data.startTimestamp) + " AS events"
|
||||
main_sessions_table = exp_ch_helper.get_main_sessions_table(data.startTimestamp) + " AS sessions"
|
||||
if len(sessions_conditions) > 0:
|
||||
sessions_conditions.append(f"sessions.project_id = toUInt16(%(project_id)s)")
|
||||
sessions_conditions.append(f"sessions.datetime >= toDateTime(%(startTimestamp)s / 1000)")
|
||||
sessions_conditions.append(f"sessions.datetime < toDateTime(%(endTimestamp)s / 1000)")
|
||||
sessions_conditions.append("sessions.events_count>1")
|
||||
sessions_conditions.append("sessions.duration>0")
|
||||
|
||||
initial_sessions_cte = f"""sub_sessions AS (SELECT DISTINCT session_id
|
||||
FROM {main_sessions_table}
|
||||
WHERE {" AND ".join(sessions_conditions)}),"""
|
||||
else:
|
||||
initial_sessions_cte = ""
|
||||
|
||||
if len(start_points_conditions) == 0:
|
||||
step_0_subquery = """SELECT DISTINCT session_id
|
||||
FROM (SELECT `$event_name`, e_value
|
||||
FROM pre_ranked_events
|
||||
WHERE event_number_in_session = 1
|
||||
GROUP BY `$event_name`, e_value
|
||||
ORDER BY count(1) DESC
|
||||
LIMIT 1) AS top_start_events
|
||||
INNER JOIN pre_ranked_events
|
||||
ON (top_start_events.`$event_name` = pre_ranked_events.`$event_name` AND
|
||||
top_start_events.e_value = pre_ranked_events.e_value)
|
||||
WHERE pre_ranked_events.event_number_in_session = 1"""
|
||||
initial_event_cte = ""
|
||||
else:
|
||||
step_0_subquery = f"""SELECT DISTINCT session_id
|
||||
FROM pre_ranked_events
|
||||
WHERE {" AND ".join(step_0_conditions)}"""
|
||||
initial_event_cte = f"""\
|
||||
initial_event AS (SELECT events.session_id, MIN(created_at) AS start_event_timestamp
|
||||
FROM {main_events_table} {"INNER JOIN sub_sessions USING (session_id)" if len(sessions_conditions) > 0 else ""}
|
||||
WHERE {" AND ".join(start_points_conditions)}
|
||||
GROUP BY 1),"""
|
||||
ch_sub_query.append("events.created_at>=initial_event.start_event_timestamp")
|
||||
main_events_table += " INNER JOIN initial_event ON (events.session_id = initial_event.session_id)"
|
||||
sessions_conditions = []
|
||||
|
||||
steps_query = ["""n1 AS (SELECT event_number_in_session,
|
||||
`$event_name` as event_type,
|
||||
e_value,
|
||||
next_type,
|
||||
next_value,
|
||||
COUNT(1) AS sessions_count
|
||||
FROM ranked_events
|
||||
WHERE event_number_in_session = 1
|
||||
AND isNotNull(next_value)
|
||||
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
|
||||
ORDER BY sessions_count DESC
|
||||
LIMIT %(eventThresholdNumberInGroup)s)"""]
|
||||
projection_query = ["""SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
next_type,
|
||||
next_value,
|
||||
sessions_count
|
||||
FROM n1"""]
|
||||
for i in range(2, data.density + 1):
|
||||
steps_query.append(f"""n{i} AS (SELECT *
|
||||
FROM (SELECT re.event_number_in_session AS event_number_in_session,
|
||||
re.`$event_name` as $event_name,
|
||||
re.e_value AS e_value,
|
||||
re.next_type AS next_type,
|
||||
re.next_value AS next_value,
|
||||
COUNT(1) AS sessions_count
|
||||
FROM n{i - 1} INNER JOIN ranked_events AS re
|
||||
ON (n{i - 1}.next_value = re.e_value AND n{i - 1}.next_type = re.`$event_name`)
|
||||
WHERE re.event_number_in_session = {i}
|
||||
GROUP BY re.event_number_in_session, re.`$event_name`, re.e_value, re.next_type, re.next_value) AS sub_level
|
||||
ORDER BY sessions_count DESC
|
||||
LIMIT %(eventThresholdNumberInGroup)s)""")
|
||||
projection_query.append(f"""SELECT event_number_in_session,
|
||||
`$event_name`,
|
||||
e_value,
|
||||
next_type,
|
||||
next_value,
|
||||
sessions_count
|
||||
FROM n{i}""")
|
||||
|
||||
with ch_client.ClickHouseClient(database="product_analytics") as ch:
|
||||
time_key = TimeUTC.now()
|
||||
_now = time()
|
||||
params = {"project_id": project_id, "startTimestamp": data.startTimestamp,
|
||||
"endTimestamp": data.endTimestamp, "density": data.density,
|
||||
# This is ignored because UI will take care of it
|
||||
# "eventThresholdNumberInGroup": 4 if data.hide_excess else 8,
|
||||
"eventThresholdNumberInGroup": 8,
|
||||
**extra_values}
|
||||
|
||||
ch_query1 = f"""\
|
||||
CREATE TEMPORARY TABLE pre_ranked_events_{time_key} AS
|
||||
WITH {initial_sessions_cte}
|
||||
{initial_event_cte}
|
||||
pre_ranked_events AS (SELECT *
|
||||
FROM (SELECT session_id,
|
||||
`$event_name`,
|
||||
created_at,
|
||||
{main_column} AS e_value,
|
||||
row_number() OVER (PARTITION BY session_id
|
||||
ORDER BY created_at {path_direction},
|
||||
event_id {path_direction} ) AS event_number_in_session
|
||||
FROM {main_events_table} {"INNER JOIN sub_sessions ON (sub_sessions.session_id = events.session_id)" if len(sessions_conditions) > 0 else ""}
|
||||
WHERE {" AND ".join(ch_sub_query)}
|
||||
) AS full_ranked_events
|
||||
WHERE {" AND ".join(step_1_post_conditions)})
|
||||
SELECT *
|
||||
FROM pre_ranked_events;"""
|
||||
logger.debug("---------Q1-----------")
|
||||
query = ch.format(query=ch_query1, parameters=params)
|
||||
ch.execute(query=query)
|
||||
if time() - _now > 2:
|
||||
logger.warning(f">>>>>>>>>PathAnalysis long query EE ({int(time() - _now)}s)<<<<<<<<<")
|
||||
logger.warning(query)
|
||||
logger.warning("----------------------")
|
||||
_now = time()
|
||||
|
||||
ch_query2 = f"""\
|
||||
CREATE TEMPORARY TABLE ranked_events_{time_key} AS
|
||||
WITH pre_ranked_events AS (SELECT *
|
||||
FROM pre_ranked_events_{time_key}),
|
||||
start_points AS ({step_0_subquery}),
|
||||
ranked_events AS (SELECT pre_ranked_events.*,
|
||||
leadInFrame(e_value)
|
||||
OVER (PARTITION BY session_id ORDER BY created_at {path_direction}
|
||||
ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS next_value,
|
||||
leadInFrame(toNullable(`$event_name`))
|
||||
OVER (PARTITION BY session_id ORDER BY created_at {path_direction}
|
||||
ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS next_type
|
||||
FROM start_points INNER JOIN pre_ranked_events USING (session_id))
|
||||
SELECT *
|
||||
FROM ranked_events;"""
|
||||
logger.debug("---------Q2-----------")
|
||||
query = ch.format(query=ch_query2, parameters=params)
|
||||
ch.execute(query=query)
|
||||
if time() - _now > 2:
|
||||
logger.warning(f">>>>>>>>>PathAnalysis long query EE ({int(time() - _now)}s)<<<<<<<<<")
|
||||
logger.warning(query)
|
||||
logger.warning("----------------------")
|
||||
_now = time()
|
||||
|
||||
ch_query3 = f"""\
|
||||
WITH ranked_events AS (SELECT *
|
||||
FROM ranked_events_{time_key}),
|
||||
{",".join(steps_query)}
|
||||
SELECT *
|
||||
FROM ({" UNION ALL ".join(projection_query)}) AS chart_steps
|
||||
ORDER BY event_number_in_session;"""
|
||||
logger.debug("---------Q3-----------")
|
||||
query = ch.format(query=ch_query3, parameters=params)
|
||||
rows = ch.execute(query=query)
|
||||
if time() - _now > 2:
|
||||
logger.warning(f">>>>>>>>>PathAnalysis long query EE ({int(time() - _now)}s)<<<<<<<<<")
|
||||
logger.warning(query)
|
||||
logger.warning("----------------------")
|
||||
|
||||
return __transform_journey(rows=rows, reverse_path=reverse)
|
||||
|
||||
|
||||
def process_filter(f, is_any, is_not, is_undefined, op, f_k, sessions_conditions, extra_values, meta_keys, project_id):
|
||||
# Mapping for common types to their column names
|
||||
type_column_mapping = {
|
||||
schemas.FilterType.USER_BROWSER: 'user_browser',
|
||||
schemas.FilterType.USER_OS: 'user_os',
|
||||
schemas.FilterType.USER_DEVICE: 'user_device',
|
||||
schemas.FilterType.USER_COUNTRY: 'user_country',
|
||||
schemas.FilterType.USER_CITY: 'user_city',
|
||||
schemas.FilterType.USER_STATE: 'user_state',
|
||||
schemas.FilterType.UTM_SOURCE: 'utm_source',
|
||||
schemas.FilterType.UTM_MEDIUM: 'utm_medium',
|
||||
schemas.FilterType.UTM_CAMPAIGN: 'utm_campaign',
|
||||
schemas.FilterType.USER_ID: 'user_id',
|
||||
schemas.FilterType.USER_ID_MOBILE: 'user_id',
|
||||
schemas.FilterType.USER_ANONYMOUS_ID: 'user_anonymous_id',
|
||||
schemas.FilterType.USER_ANONYMOUS_ID_MOBILE: 'user_anonymous_id',
|
||||
schemas.FilterType.REV_ID: 'rev_id',
|
||||
schemas.FilterType.REV_ID_MOBILE: 'rev_id',
|
||||
}
|
||||
|
||||
if f.type in type_column_mapping:
|
||||
column = type_column_mapping[f.type]
|
||||
if is_any:
|
||||
sessions_conditions.append(f'isNotNull({column})')
|
||||
elif is_undefined:
|
||||
sessions_conditions.append(f'isNull({column})')
|
||||
else:
|
||||
sessions_conditions.append(
|
||||
sh.multi_conditions(f"{column} {op} toString(%({f_k})s)", f.value, is_not=is_not, value_key=f_k)
|
||||
)
|
||||
|
||||
elif f.type == schemas.FilterType.DURATION:
|
||||
if len(f.value) > 0 and f.value[0] is not None:
|
||||
sessions_conditions.append("duration >= %(minDuration)s")
|
||||
extra_values["minDuration"] = f.value[0]
|
||||
if len(f.value) > 1 and f.value[1] is not None and int(f.value[1]) > 0:
|
||||
sessions_conditions.append("duration <= %(maxDuration)s")
|
||||
extra_values["maxDuration"] = f.value[1]
|
||||
|
||||
elif f.type == schemas.FilterType.REFERRER:
|
||||
if is_any:
|
||||
sessions_conditions.append('isNotNull(base_referrer)')
|
||||
else:
|
||||
sessions_conditions.append(
|
||||
sh.multi_conditions(f"base_referrer {op} %({f_k})s", f.value, is_not=is_not, value_key=f_k)
|
||||
)
|
||||
|
||||
elif f.type == schemas.FilterType.METADATA:
|
||||
if meta_keys is None:
|
||||
meta_keys = metadata.get(project_id=project_id)
|
||||
meta_keys = {m["key"]: m["index"] for m in meta_keys}
|
||||
if f.source in meta_keys.keys():
|
||||
column = metadata.index_to_colname(meta_keys[f.source])
|
||||
if is_any:
|
||||
sessions_conditions.append(f"isNotNull({column})")
|
||||
elif is_undefined:
|
||||
sessions_conditions.append(f"isNull({column})")
|
||||
else:
|
||||
sessions_conditions.append(
|
||||
sh.multi_conditions(f"{column} {op} toString(%({f_k})s)", f.value, is_not=is_not, value_key=f_k)
|
||||
)
|
||||
|
||||
elif f.type == schemas.FilterType.PLATFORM:
|
||||
sessions_conditions.append(
|
||||
sh.multi_conditions(f"user_device_type {op} %({f_k})s", f.value, is_not=is_not, value_key=f_k)
|
||||
)
|
||||
|
||||
elif f.type == schemas.FilterType.ISSUE:
|
||||
if is_any:
|
||||
sessions_conditions.append("array_length(issue_types, 1) > 0")
|
||||
else:
|
||||
sessions_conditions.append(
|
||||
sh.multi_conditions(f"has(issue_types,%({f_k})s)", f.value, is_not=is_not, value_key=f_k)
|
||||
)
|
||||
|
||||
elif f.type == schemas.FilterType.EVENTS_COUNT:
|
||||
sessions_conditions.append(
|
||||
sh.multi_conditions(f"events_count {op} %({f_k})s", f.value, is_not=is_not, value_key=f_k)
|
||||
)
|
||||
|
|
@ -1,2 +1,6 @@
|
|||
TENANT_CONDITION = "TRUE"
|
||||
MOB_KEY=""
|
||||
MOB_KEY = ""
|
||||
|
||||
|
||||
def get_file_key(project_id, session_id):
|
||||
return {}
|
||||
|
|
|
|||
|
|
@ -413,7 +413,6 @@ def update_project_conditions(project_id, conditions):
|
|||
create_project_conditions(project_id, to_be_created)
|
||||
|
||||
if to_be_updated:
|
||||
logger.debug(to_be_updated)
|
||||
update_project_condition(project_id, to_be_updated)
|
||||
|
||||
return get_conditions(project_id)
|
||||
|
|
@ -428,3 +427,45 @@ def get_projects_ids(tenant_id):
|
|||
cur.execute(query=query)
|
||||
rows = cur.fetchall()
|
||||
return [r["project_id"] for r in rows]
|
||||
|
||||
|
||||
def delete_metadata_condition(project_id, metadata_key):
|
||||
sql = """\
|
||||
UPDATE public.projects_conditions
|
||||
SET filters=(SELECT COALESCE(jsonb_agg(elem), '[]'::jsonb)
|
||||
FROM jsonb_array_elements(filters) AS elem
|
||||
WHERE NOT (elem ->> 'type' = 'metadata'
|
||||
AND elem ->> 'source' = %(metadata_key)s))
|
||||
WHERE project_id = %(project_id)s
|
||||
AND jsonb_typeof(filters) = 'array'
|
||||
AND EXISTS (SELECT 1
|
||||
FROM jsonb_array_elements(filters) AS elem
|
||||
WHERE elem ->> 'type' = 'metadata'
|
||||
AND elem ->> 'source' = %(metadata_key)s);"""
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(sql, {"project_id": project_id, "metadata_key": metadata_key})
|
||||
cur.execute(query)
|
||||
|
||||
|
||||
def rename_metadata_condition(project_id, old_metadata_key, new_metadata_key):
|
||||
sql = """\
|
||||
UPDATE public.projects_conditions
|
||||
SET filters = (SELECT jsonb_agg(CASE
|
||||
WHEN elem ->> 'type' = 'metadata' AND elem ->> 'source' = %(old_metadata_key)s
|
||||
THEN elem || ('{"source": "'||%(new_metadata_key)s||'"}')::jsonb
|
||||
ELSE elem END)
|
||||
FROM jsonb_array_elements(filters) AS elem)
|
||||
WHERE project_id = %(project_id)s
|
||||
AND jsonb_typeof(filters) = 'array'
|
||||
AND EXISTS (SELECT 1
|
||||
FROM jsonb_array_elements(filters) AS elem
|
||||
WHERE elem ->> 'type' = 'metadata'
|
||||
AND elem ->> 'source' = %(old_metadata_key)s);"""
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(sql, {"project_id": project_id, "old_metadata_key": old_metadata_key,
|
||||
"new_metadata_key": new_metadata_key})
|
||||
cur.execute(query)
|
||||
|
||||
# TODO: make project conditions use metadata-column-name instead of metadata-key
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ def reset(data: schemas.ForgetPasswordPayloadSchema, background_tasks: Backgroun
|
|||
if helper.allow_captcha() and not captcha.is_valid(data.g_recaptcha_response):
|
||||
return {"errors": ["Invalid captcha."]}
|
||||
if not smtp.has_smtp():
|
||||
return {"errors": ["no SMTP configuration found, you can ask your admin to reset your password"]}
|
||||
return {"errors": ["Email delivery failed due to invalid SMTP configuration. Please contact your admin."]}
|
||||
a_user = users.get_by_email_only(data.email)
|
||||
if a_user:
|
||||
invitation_link = users.generate_new_invitation(user_id=a_user["userId"])
|
||||
|
|
|
|||
|
|
@ -3,9 +3,11 @@ import logging
|
|||
from decouple import config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
from . import sessions as sessions_legacy
|
||||
from . import sessions_pg
|
||||
from . import sessions_pg as sessions_legacy
|
||||
from . import sessions_ch
|
||||
|
||||
if config("EXP_METRICS", cast=bool, default=False):
|
||||
from . import sessions_ch as sessions
|
||||
else:
|
||||
from . import sessions
|
||||
from . import sessions_pg as sessions
|
||||
|
|
|
|||
|
|
@ -1,10 +1,12 @@
|
|||
from decouple import config
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.core.issue_tracking import integrations_manager, base_issue
|
||||
import json
|
||||
|
||||
from decouple import config
|
||||
|
||||
from chalicelib.core.issue_tracking import integrations_manager, base_issue
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
|
||||
def __get_saved_data(project_id, session_id, issue_id, tool):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
|
|
@ -39,8 +41,8 @@ def create_new_assignment(tenant_id, project_id, session_id, creator_id, assigne
|
|||
issue = integration.issue_handler.create_new_assignment(title=title, assignee=assignee, description=description,
|
||||
issue_type=issue_type,
|
||||
integration_project_id=integration_project_id)
|
||||
except integration_base_issue.RequestException as e:
|
||||
return integration_base_issue.proxy_issues_handler(e)
|
||||
except base_issue.RequestException as e:
|
||||
return base_issue.proxy_issues_handler(e)
|
||||
if issue is None or "id" not in issue:
|
||||
return {"errors": ["something went wrong while creating the issue"]}
|
||||
with pg_client.PostgresClient() as cur:
|
||||
|
|
|
|||
|
|
@ -1,11 +1,9 @@
|
|||
import ast
|
||||
import logging
|
||||
from typing import List, Union
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import events, metadata, projects
|
||||
from chalicelib.core.metrics import metrics
|
||||
from chalicelib.core.sessions import sessions_favorite, performance_event, sessions_legacy
|
||||
from chalicelib.core import events, metadata
|
||||
from . import performance_event, sessions_legacy
|
||||
from chalicelib.utils import pg_client, helper, metrics_helper, ch_client, exp_ch_helper
|
||||
from chalicelib.utils import sql_helper as sh
|
||||
|
||||
|
|
@ -15,8 +13,8 @@ logger = logging.getLogger(__name__)
|
|||
def search2_series(data: schemas.SessionsSearchPayloadSchema, project_id: int, density: int,
|
||||
metric_type: schemas.MetricType, metric_of: schemas.MetricOfTimeseries | schemas.MetricOfTable,
|
||||
metric_value: List):
|
||||
step_size = int(metrics_helper.get_step_size(endTimestamp=data.endTimestamp, startTimestamp=data.startTimestamp,
|
||||
density=density))
|
||||
step_size = metrics_helper.get_step_size(endTimestamp=data.endTimestamp, startTimestamp=data.startTimestamp,
|
||||
density=density, factor=1)
|
||||
extra_event = None
|
||||
if metric_of == schemas.MetricOfTable.VISITED_URL:
|
||||
extra_event = f"""SELECT DISTINCT ev.session_id, ev.url_path
|
||||
|
|
@ -36,25 +34,27 @@ def search2_series(data: schemas.SessionsSearchPayloadSchema, project_id: int, d
|
|||
with ch_client.ClickHouseClient() as cur:
|
||||
if metric_type == schemas.MetricType.TIMESERIES:
|
||||
if metric_of == schemas.MetricOfTimeseries.SESSION_COUNT:
|
||||
query = f"""SELECT toUnixTimestamp(
|
||||
toStartOfInterval(processed_sessions.datetime, INTERVAL %(step_size)s second)
|
||||
) * 1000 AS timestamp,
|
||||
COUNT(processed_sessions.session_id) AS count
|
||||
FROM (SELECT s.session_id AS session_id,
|
||||
s.datetime AS datetime
|
||||
{query_part}) AS processed_sessions
|
||||
query = f"""SELECT gs.generate_series AS timestamp,
|
||||
COALESCE(COUNT(DISTINCT processed_sessions.session_id),0) AS count
|
||||
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS gs
|
||||
LEFT JOIN (SELECT s.session_id AS session_id,
|
||||
s.datetime AS datetime
|
||||
{query_part}) AS processed_sessions ON(TRUE)
|
||||
WHERE processed_sessions.datetime >= toDateTime(timestamp / 1000)
|
||||
AND processed_sessions.datetime < toDateTime((timestamp + %(step_size)s) / 1000)
|
||||
GROUP BY timestamp
|
||||
ORDER BY timestamp;"""
|
||||
elif metric_of == schemas.MetricOfTimeseries.USER_COUNT:
|
||||
query = f"""SELECT toUnixTimestamp(
|
||||
toStartOfInterval(processed_sessions.datetime, INTERVAL %(step_size)s second)
|
||||
) * 1000 AS timestamp,
|
||||
COUNT(DISTINCT processed_sessions.user_id) AS count
|
||||
FROM (SELECT s.user_id AS user_id,
|
||||
s.datetime AS datetime
|
||||
{query_part}
|
||||
WHERE isNotNull(s.user_id)
|
||||
AND s.user_id != '') AS processed_sessions
|
||||
query = f"""SELECT gs.generate_series AS timestamp,
|
||||
COALESCE(COUNT(DISTINCT processed_sessions.user_id),0) AS count
|
||||
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS gs
|
||||
LEFT JOIN (SELECT multiIf(s.user_id IS NOT NULL AND s.user_id != '', s.user_id,
|
||||
s.user_anonymous_id IS NOT NULL AND s.user_anonymous_id != '',
|
||||
s.user_anonymous_id, toString(s.user_uuid)) AS user_id,
|
||||
s.datetime AS datetime
|
||||
{query_part}) AS processed_sessions ON(TRUE)
|
||||
WHERE processed_sessions.datetime >= toDateTime(timestamp / 1000)
|
||||
AND processed_sessions.datetime < toDateTime((timestamp + %(step_size)s) / 1000)
|
||||
GROUP BY timestamp
|
||||
ORDER BY timestamp;"""
|
||||
else:
|
||||
|
|
@ -64,9 +64,7 @@ def search2_series(data: schemas.SessionsSearchPayloadSchema, project_id: int, d
|
|||
logging.debug("--------------------")
|
||||
logging.debug(main_query)
|
||||
logging.debug("--------------------")
|
||||
sessions = cur.execute(main_query)
|
||||
sessions = metrics.__complete_missing_steps(start_time=data.startTimestamp, end_time=data.endTimestamp,
|
||||
density=density, neutral={"count": 0}, rows=sessions)
|
||||
sessions = cur.execute(query=main_query)
|
||||
|
||||
elif metric_type == schemas.MetricType.TABLE:
|
||||
full_args["limit_s"] = 0
|
||||
|
|
@ -114,7 +112,7 @@ def search2_series(data: schemas.SessionsSearchPayloadSchema, project_id: int, d
|
|||
logging.debug("--------------------")
|
||||
logging.debug(main_query)
|
||||
logging.debug("--------------------")
|
||||
sessions = cur.execute(main_query)
|
||||
sessions = cur.execute(query=main_query)
|
||||
# cur.fetchone()
|
||||
count = 0
|
||||
if len(sessions) > 0:
|
||||
|
|
@ -123,7 +121,10 @@ def search2_series(data: schemas.SessionsSearchPayloadSchema, project_id: int, d
|
|||
s.pop("main_count")
|
||||
sessions = {"count": count, "values": helper.list_to_camel_case(sessions)}
|
||||
|
||||
return sessions
|
||||
return metrics_helper.complete_missing_steps(rows=sessions,
|
||||
start_timestamp=data.startTimestamp,
|
||||
end_timestamp=data.endTimestamp, step=step_size,
|
||||
neutral={"count": 0})
|
||||
|
||||
|
||||
def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, density: int,
|
||||
|
|
@ -152,7 +153,7 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
|
|||
"isEvent": True,
|
||||
"value": [],
|
||||
"operator": e.operator,
|
||||
"filters": []
|
||||
"filters": e.filters
|
||||
})
|
||||
for v in e.value:
|
||||
if v not in extra_conditions[e.operator].value:
|
||||
|
|
@ -177,7 +178,7 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
|
|||
"isEvent": True,
|
||||
"value": [],
|
||||
"operator": e.operator,
|
||||
"filters": []
|
||||
"filters": e.filters
|
||||
})
|
||||
for v in e.value:
|
||||
if v not in extra_conditions[e.operator].value:
|
||||
|
|
@ -243,7 +244,7 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
|
|||
{extra_where}
|
||||
GROUP BY {main_col}
|
||||
ORDER BY total DESC
|
||||
LIMIT %(limit_e)s OFFSET %(limit_s)s;"""
|
||||
LIMIT %(limit)s OFFSET %(limit_s)s;"""
|
||||
else:
|
||||
main_query = f"""SELECT COUNT(DISTINCT {main_col}) OVER () AS main_count,
|
||||
{main_col} AS name,
|
||||
|
|
@ -256,13 +257,13 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
|
|||
{extra_where}
|
||||
GROUP BY {main_col}
|
||||
ORDER BY total DESC
|
||||
LIMIT %(limit_e)s OFFSET %(limit_s)s;"""
|
||||
LIMIT %(limit)s OFFSET %(limit_s)s;"""
|
||||
|
||||
main_query = cur.format(query=main_query, parameters=full_args)
|
||||
logging.debug("--------------------")
|
||||
logging.debug(main_query)
|
||||
logging.debug("--------------------")
|
||||
sessions = cur.execute(main_query)
|
||||
sessions = cur.execute(query=main_query)
|
||||
count = 0
|
||||
total = 0
|
||||
if len(sessions) > 0:
|
||||
|
|
@ -380,7 +381,7 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
|||
filter_type = f.type
|
||||
f.value = helper.values_for_operator(value=f.value, op=f.operator)
|
||||
f_k = f"f_value{i}"
|
||||
full_args = {**full_args, f_k: f.value, **sh.multi_values(f.value, value_key=f_k)}
|
||||
full_args = {**full_args, f_k: sh.single_value(f.value), **sh.multi_values(f.value, value_key=f_k)}
|
||||
op = sh.get_sql_operator(f.operator) \
|
||||
if filter_type not in [schemas.FilterType.EVENTS_COUNT] else f.operator.value
|
||||
is_any = sh.isAny_opreator(f.operator)
|
||||
|
|
@ -869,12 +870,12 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
|||
events_conditions[-1]["condition"] = []
|
||||
if not is_any and event.value not in [None, "*", ""]:
|
||||
event_where.append(
|
||||
sh.multi_conditions(f"(main1.message {op} %({e_k})s OR main1.name {op} %({e_k})s)",
|
||||
sh.multi_conditions(f"(toString(main1.`$properties`.message) {op} %({e_k})s OR toString(main1.`$properties`.name) {op} %({e_k})s)",
|
||||
event.value, value_key=e_k))
|
||||
events_conditions[-1]["condition"].append(event_where[-1])
|
||||
events_extra_join += f" AND {event_where[-1]}"
|
||||
if len(event.source) > 0 and event.source[0] not in [None, "*", ""]:
|
||||
event_where.append(sh.multi_conditions(f"main1.source = %({s_k})s", event.source, value_key=s_k))
|
||||
event_where.append(sh.multi_conditions(f"toString(main1.`$properties`.source) = %({s_k})s", event.source, value_key=s_k))
|
||||
events_conditions[-1]["condition"].append(event_where[-1])
|
||||
events_extra_join += f" AND {event_where[-1]}"
|
||||
|
||||
|
|
@ -1107,8 +1108,12 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
|||
is_any = sh.isAny_opreator(f.operator)
|
||||
if is_any or len(f.value) == 0:
|
||||
continue
|
||||
is_negative_operator = sh.is_negation_operator(f.operator)
|
||||
f.value = helper.values_for_operator(value=f.value, op=f.operator)
|
||||
op = sh.get_sql_operator(f.operator)
|
||||
r_op = ""
|
||||
if is_negative_operator:
|
||||
r_op = sh.reverse_sql_operator(op)
|
||||
e_k_f = e_k + f"_fetch{j}"
|
||||
full_args = {**full_args, **sh.multi_values(f.value, value_key=e_k_f)}
|
||||
if f.type == schemas.FetchFilterType.FETCH_URL:
|
||||
|
|
@ -1117,9 +1122,15 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
|||
))
|
||||
events_conditions[-1]["condition"].append(event_where[-1])
|
||||
apply = True
|
||||
if is_negative_operator:
|
||||
events_conditions_not.append(
|
||||
{
|
||||
"type": f"sub.`$event_name`='{exp_ch_helper.get_event_type(event_type, platform=platform)}'"})
|
||||
events_conditions_not[-1]["condition"] = sh.multi_conditions(
|
||||
f"sub.`$properties`.url_path {r_op} %({e_k_f})s", f.value, value_key=e_k_f)
|
||||
elif f.type == schemas.FetchFilterType.FETCH_STATUS_CODE:
|
||||
event_where.append(json_condition(
|
||||
"main", "$properties", 'status', op, f.value, e_k_f
|
||||
"main", "$properties", 'status', op, f.value, e_k_f, True, True
|
||||
))
|
||||
events_conditions[-1]["condition"].append(event_where[-1])
|
||||
apply = True
|
||||
|
|
@ -1129,6 +1140,13 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
|||
))
|
||||
events_conditions[-1]["condition"].append(event_where[-1])
|
||||
apply = True
|
||||
if is_negative_operator:
|
||||
events_conditions_not.append(
|
||||
{
|
||||
"type": f"sub.`$event_name`='{exp_ch_helper.get_event_type(event_type, platform=platform)}'"})
|
||||
events_conditions_not[-1]["condition"] = sh.multi_conditions(
|
||||
f"sub.`$properties`.method {r_op} %({e_k_f})s", f.value,
|
||||
value_key=e_k_f)
|
||||
elif f.type == schemas.FetchFilterType.FETCH_DURATION:
|
||||
event_where.append(
|
||||
sh.multi_conditions(f"main.`$duration_s` {f.operator} %({e_k_f})s/1000", f.value,
|
||||
|
|
@ -1141,12 +1159,26 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
|||
))
|
||||
events_conditions[-1]["condition"].append(event_where[-1])
|
||||
apply = True
|
||||
if is_negative_operator:
|
||||
events_conditions_not.append(
|
||||
{
|
||||
"type": f"sub.`$event_name`='{exp_ch_helper.get_event_type(event_type, platform=platform)}'"})
|
||||
events_conditions_not[-1]["condition"] = sh.multi_conditions(
|
||||
f"sub.`$properties`.request_body {r_op} %({e_k_f})s", f.value,
|
||||
value_key=e_k_f)
|
||||
elif f.type == schemas.FetchFilterType.FETCH_RESPONSE_BODY:
|
||||
event_where.append(json_condition(
|
||||
"main", "$properties", 'response_body', op, f.value, e_k_f
|
||||
))
|
||||
events_conditions[-1]["condition"].append(event_where[-1])
|
||||
apply = True
|
||||
if is_negative_operator:
|
||||
events_conditions_not.append(
|
||||
{
|
||||
"type": f"sub.`$event_name`='{exp_ch_helper.get_event_type(event_type, platform=platform)}'"})
|
||||
events_conditions_not[-1]["condition"] = sh.multi_conditions(
|
||||
f"sub.`$properties`.response_body {r_op} %({e_k_f})s", f.value,
|
||||
value_key=e_k_f)
|
||||
else:
|
||||
logging.warning(f"undefined FETCH filter: {f.type}")
|
||||
if not apply:
|
||||
|
|
@ -1394,17 +1426,30 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
|||
if extra_conditions and len(extra_conditions) > 0:
|
||||
_extra_or_condition = []
|
||||
for i, c in enumerate(extra_conditions):
|
||||
if sh.isAny_opreator(c.operator):
|
||||
if sh.isAny_opreator(c.operator) and c.type != schemas.EventType.REQUEST_DETAILS.value:
|
||||
continue
|
||||
e_k = f"ec_value{i}"
|
||||
op = sh.get_sql_operator(c.operator)
|
||||
c.value = helper.values_for_operator(value=c.value, op=c.operator)
|
||||
full_args = {**full_args,
|
||||
**sh.multi_values(c.value, value_key=e_k)}
|
||||
if c.type == events.EventType.LOCATION.ui_type:
|
||||
if c.type in (schemas.EventType.LOCATION.value, schemas.EventType.REQUEST.value):
|
||||
_extra_or_condition.append(
|
||||
sh.multi_conditions(f"extra_event.url_path {op} %({e_k})s",
|
||||
c.value, value_key=e_k))
|
||||
elif c.type == schemas.EventType.REQUEST_DETAILS.value:
|
||||
for j, c_f in enumerate(c.filters):
|
||||
if sh.isAny_opreator(c_f.operator) or len(c_f.value) == 0:
|
||||
continue
|
||||
e_k += f"_{j}"
|
||||
op = sh.get_sql_operator(c_f.operator)
|
||||
c_f.value = helper.values_for_operator(value=c_f.value, op=c_f.operator)
|
||||
full_args = {**full_args,
|
||||
**sh.multi_values(c_f.value, value_key=e_k)}
|
||||
if c_f.type == schemas.FetchFilterType.FETCH_URL.value:
|
||||
_extra_or_condition.append(
|
||||
sh.multi_conditions(f"extra_event.url_path {op} %({e_k})s",
|
||||
c_f.value, value_key=e_k))
|
||||
else:
|
||||
logging.warning(f"unsupported extra_event type:${c.type}")
|
||||
if len(_extra_or_condition) > 0:
|
||||
|
|
@ -1415,9 +1460,10 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
|||
query_part = f"""{f"({events_query_part}) AS f" if len(events_query_part) > 0 else ""}"""
|
||||
else:
|
||||
if len(events_query_part) > 0:
|
||||
extra_join += f"""INNER JOIN (SELECT *
|
||||
extra_join += f"""INNER JOIN (SELECT DISTINCT ON (session_id) *
|
||||
FROM {MAIN_SESSIONS_TABLE} AS s {extra_event}
|
||||
WHERE {" AND ".join(extra_constraints)}) AS s ON(s.session_id=f.session_id)"""
|
||||
WHERE {" AND ".join(extra_constraints)}
|
||||
ORDER BY _timestamp DESC) AS s ON(s.session_id=f.session_id)"""
|
||||
else:
|
||||
deduplication_keys = ["session_id"] + extra_deduplication
|
||||
extra_join = f"""(SELECT *
|
||||
|
|
@ -1504,7 +1550,7 @@ def session_exists(project_id, session_id):
|
|||
AND project_id=%(project_id)s
|
||||
LIMIT 1""",
|
||||
parameters={"project_id": project_id, "session_id": session_id})
|
||||
row = cur.execute(query)
|
||||
row = cur.execute(query=query)
|
||||
return row is not None
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1 @@
|
|||
from .sessions_devtool import *
|
||||
|
|
@ -4,7 +4,7 @@ import schemas
|
|||
from chalicelib.utils.storage import StorageClient
|
||||
|
||||
|
||||
def __get_devtools_keys(project_id, session_id):
|
||||
def get_devtools_keys(project_id, session_id):
|
||||
params = {
|
||||
"sessionId": session_id,
|
||||
"projectId": project_id
|
||||
|
|
@ -16,7 +16,7 @@ def __get_devtools_keys(project_id, session_id):
|
|||
|
||||
def get_urls(session_id, project_id, context: schemas.CurrentContext, check_existence: bool = True):
|
||||
results = []
|
||||
for k in __get_devtools_keys(project_id=project_id, session_id=session_id):
|
||||
for k in get_devtools_keys(project_id=project_id, session_id=session_id):
|
||||
if check_existence and not StorageClient.exists(bucket=config("sessions_bucket"), key=k):
|
||||
continue
|
||||
results.append(StorageClient.get_presigned_url_for_sharing(
|
||||
|
|
@ -29,5 +29,5 @@ def get_urls(session_id, project_id, context: schemas.CurrentContext, check_exis
|
|||
|
||||
def delete_mobs(project_id, session_ids):
|
||||
for session_id in session_ids:
|
||||
for k in __get_devtools_keys(project_id=project_id, session_id=session_id):
|
||||
for k in get_devtools_keys(project_id=project_id, session_id=session_id):
|
||||
StorageClient.tag_for_deletion(bucket=config("sessions_bucket"), key=k)
|
||||
|
|
@ -0,0 +1 @@
|
|||
from .sessions_favorite import *
|
||||
|
|
@ -1,76 +1,81 @@
|
|||
from functools import cache
|
||||
|
||||
import schemas
|
||||
from chalicelib.core.autocomplete import autocomplete
|
||||
from chalicelib.utils.event_filter_definition import SupportedFilter
|
||||
|
||||
SUPPORTED_TYPES = {
|
||||
schemas.FilterType.USER_OS: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_OS),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_OS)),
|
||||
schemas.FilterType.USER_BROWSER: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_BROWSER),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_BROWSER)),
|
||||
schemas.FilterType.USER_DEVICE: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_DEVICE),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_DEVICE)),
|
||||
schemas.FilterType.USER_COUNTRY: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY)),
|
||||
schemas.FilterType.USER_CITY: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_CITY),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_CITY)),
|
||||
schemas.FilterType.USER_STATE: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_STATE),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_STATE)),
|
||||
schemas.FilterType.USER_ID: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_ID),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_ID)),
|
||||
schemas.FilterType.USER_ANONYMOUS_ID: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID)),
|
||||
schemas.FilterType.REV_ID: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.REV_ID),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.REV_ID)),
|
||||
schemas.FilterType.REFERRER: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.REFERRER),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.REFERRER)),
|
||||
schemas.FilterType.UTM_CAMPAIGN: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.UTM_CAMPAIGN),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.UTM_CAMPAIGN)),
|
||||
schemas.FilterType.UTM_MEDIUM: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.UTM_MEDIUM),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.UTM_MEDIUM)),
|
||||
schemas.FilterType.UTM_SOURCE: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.UTM_SOURCE),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.UTM_SOURCE)),
|
||||
# Mobile
|
||||
schemas.FilterType.USER_OS_MOBILE: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_OS_MOBILE),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_OS_MOBILE)),
|
||||
schemas.FilterType.USER_DEVICE_MOBILE: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(
|
||||
typename=schemas.FilterType.USER_DEVICE_MOBILE),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_DEVICE_MOBILE)),
|
||||
schemas.FilterType.USER_COUNTRY_MOBILE: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY_MOBILE),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY_MOBILE)),
|
||||
schemas.FilterType.USER_ID_MOBILE: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_ID_MOBILE),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_ID_MOBILE)),
|
||||
schemas.FilterType.USER_ANONYMOUS_ID_MOBILE: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID_MOBILE),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID_MOBILE)),
|
||||
schemas.FilterType.REV_ID_MOBILE: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.REV_ID_MOBILE),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.REV_ID_MOBILE)),
|
||||
|
||||
}
|
||||
@cache
|
||||
def supported_types():
|
||||
return {
|
||||
schemas.FilterType.USER_OS: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_OS),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_OS)),
|
||||
schemas.FilterType.USER_BROWSER: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_BROWSER),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_BROWSER)),
|
||||
schemas.FilterType.USER_DEVICE: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_DEVICE),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_DEVICE)),
|
||||
schemas.FilterType.USER_COUNTRY: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY)),
|
||||
schemas.FilterType.USER_CITY: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_CITY),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_CITY)),
|
||||
schemas.FilterType.USER_STATE: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_STATE),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_STATE)),
|
||||
schemas.FilterType.USER_ID: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ID),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ID)),
|
||||
schemas.FilterType.USER_ANONYMOUS_ID: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID)),
|
||||
schemas.FilterType.REV_ID: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REV_ID),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REV_ID)),
|
||||
schemas.FilterType.REFERRER: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REFERRER),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REFERRER)),
|
||||
schemas.FilterType.UTM_CAMPAIGN: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_CAMPAIGN),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_CAMPAIGN)),
|
||||
schemas.FilterType.UTM_MEDIUM: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_MEDIUM),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_MEDIUM)),
|
||||
schemas.FilterType.UTM_SOURCE: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_SOURCE),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_SOURCE)),
|
||||
# Mobile
|
||||
schemas.FilterType.USER_OS_MOBILE: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_OS_MOBILE),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_OS_MOBILE)),
|
||||
schemas.FilterType.USER_DEVICE_MOBILE: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(
|
||||
typename=schemas.FilterType.USER_DEVICE_MOBILE),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_DEVICE_MOBILE)),
|
||||
schemas.FilterType.USER_COUNTRY_MOBILE: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY_MOBILE),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY_MOBILE)),
|
||||
schemas.FilterType.USER_ID_MOBILE: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ID_MOBILE),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ID_MOBILE)),
|
||||
schemas.FilterType.USER_ANONYMOUS_ID_MOBILE: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID_MOBILE),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID_MOBILE)),
|
||||
schemas.FilterType.REV_ID_MOBILE: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REV_ID_MOBILE),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REV_ID_MOBILE)),
|
||||
|
||||
}
|
||||
|
||||
|
||||
def search(text: str, meta_type: schemas.FilterType, project_id: int):
|
||||
rows = []
|
||||
if meta_type not in list(SUPPORTED_TYPES.keys()):
|
||||
if meta_type not in list(supported_types().keys()):
|
||||
return {"errors": ["unsupported type"]}
|
||||
rows += SUPPORTED_TYPES[meta_type].get(project_id=project_id, text=text)
|
||||
rows += supported_types()[meta_type].get(project_id=project_id, text=text)
|
||||
# for IOS events autocomplete
|
||||
# if meta_type + "_IOS" in list(SUPPORTED_TYPES.keys()):
|
||||
# rows += SUPPORTED_TYPES[meta_type + "_IOS"].get(project_id=project_id, text=text)
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ def get_all_notes_by_project_id(tenant_id, project_id, user_id, data: schemas.Se
|
|||
|
||||
# filter by ownership or shared status
|
||||
if data.shared_only:
|
||||
conditions.append("sessions_notes.is_public")
|
||||
conditions.append("sessions_notes.is_public IS TRUE")
|
||||
elif data.mine_only:
|
||||
conditions.append("sessions_notes.user_id = %(user_id)s")
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -2,8 +2,8 @@ import logging
|
|||
from typing import List, Union
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import events, metadata, projects
|
||||
from chalicelib.core.sessions import sessions_favorite, performance_event
|
||||
from chalicelib.core import events, metadata
|
||||
from . import performance_event
|
||||
from chalicelib.utils import pg_client, helper, metrics_helper
|
||||
from chalicelib.utils import sql_helper as sh
|
||||
|
||||
|
|
@ -36,24 +36,24 @@ def search2_series(data: schemas.SessionsSearchPayloadSchema, project_id: int, d
|
|||
SELECT generated_timestamp AS timestamp,
|
||||
COUNT(s) AS count
|
||||
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp
|
||||
LEFT JOIN LATERAL ( SELECT 1 AS s
|
||||
FROM full_sessions
|
||||
WHERE start_ts >= generated_timestamp
|
||||
AND start_ts <= generated_timestamp + %(step_size)s) AS sessions ON (TRUE)
|
||||
LEFT JOIN LATERAL (SELECT 1 AS s
|
||||
FROM full_sessions
|
||||
WHERE start_ts >= generated_timestamp
|
||||
AND start_ts <= generated_timestamp + %(step_size)s) AS sessions ON (TRUE)
|
||||
GROUP BY generated_timestamp
|
||||
ORDER BY generated_timestamp;""", full_args)
|
||||
elif metric_of == schemas.MetricOfTimeseries.USER_COUNT:
|
||||
main_query = cur.mogrify(f"""WITH full_sessions AS (SELECT s.user_id, s.start_ts
|
||||
{query_part}
|
||||
AND s.user_id IS NOT NULL
|
||||
AND s.user_id != '')
|
||||
{query_part}
|
||||
AND s.user_id IS NOT NULL
|
||||
AND s.user_id != '')
|
||||
SELECT generated_timestamp AS timestamp,
|
||||
COUNT(s) AS count
|
||||
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp
|
||||
LEFT JOIN LATERAL ( SELECT DISTINCT user_id AS s
|
||||
FROM full_sessions
|
||||
WHERE start_ts >= generated_timestamp
|
||||
AND start_ts <= generated_timestamp + %(step_size)s) AS sessions ON (TRUE)
|
||||
LEFT JOIN LATERAL (SELECT DISTINCT user_id AS s
|
||||
FROM full_sessions
|
||||
WHERE start_ts >= generated_timestamp
|
||||
AND start_ts <= generated_timestamp + %(step_size)s) AS sessions ON (TRUE)
|
||||
GROUP BY generated_timestamp
|
||||
ORDER BY generated_timestamp;""", full_args)
|
||||
else:
|
||||
|
|
@ -148,7 +148,7 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
|
|||
"isEvent": True,
|
||||
"value": [],
|
||||
"operator": e.operator,
|
||||
"filters": []
|
||||
"filters": e.filters
|
||||
})
|
||||
for v in e.value:
|
||||
if v not in extra_conditions[e.operator].value:
|
||||
|
|
@ -165,7 +165,7 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
|
|||
"isEvent": True,
|
||||
"value": [],
|
||||
"operator": e.operator,
|
||||
"filters": []
|
||||
"filters": e.filters
|
||||
})
|
||||
for v in e.value:
|
||||
if v not in extra_conditions[e.operator].value:
|
||||
|
|
@ -989,7 +989,7 @@ def search_query_parts(data: schemas.SessionsSearchPayloadSchema, error_status,
|
|||
sh.multi_conditions(f"ev.{events.EventType.LOCATION.column} {op} %({e_k})s",
|
||||
c.value, value_key=e_k))
|
||||
else:
|
||||
logger.warning(f"unsupported extra_event type:${c.type}")
|
||||
logger.warning(f"unsupported extra_event type: {c.type}")
|
||||
if len(_extra_or_condition) > 0:
|
||||
extra_constraints.append("(" + " OR ".join(_extra_or_condition) + ")")
|
||||
query_part = f"""\
|
||||
|
|
@ -1002,7 +1002,6 @@ def search_query_parts(data: schemas.SessionsSearchPayloadSchema, error_status,
|
|||
return full_args, query_part
|
||||
|
||||
|
||||
|
||||
def get_user_sessions(project_id, user_id, start_date, end_date):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
constraints = ["s.project_id = %(projectId)s", "s.user_id = %(userId)s"]
|
||||
|
|
@ -1112,4 +1111,3 @@ def check_recording_status(project_id: int) -> dict:
|
|||
"recordingStatus": row["recording_status"],
|
||||
"sessionsCount": row["sessions_count"]
|
||||
}
|
||||
|
||||
|
|
@ -1,10 +1,10 @@
|
|||
import schemas
|
||||
from chalicelib.core import events, metadata, events_mobile, \
|
||||
issues, assist, canvas, user_testing
|
||||
from chalicelib.core.sessions import sessions_mobs, sessions_devtool
|
||||
from chalicelib.utils import errors_helper
|
||||
from . import sessions_mobs, sessions_devtool
|
||||
from chalicelib.core.errors.modules import errors_helper
|
||||
from chalicelib.utils import pg_client, helper
|
||||
from chalicelib.core.modules import MOB_KEY
|
||||
from chalicelib.core.modules import MOB_KEY, get_file_key
|
||||
|
||||
|
||||
def __is_mobile_session(platform):
|
||||
|
|
@ -22,6 +22,7 @@ def __group_metadata(session, project_metadata):
|
|||
|
||||
def get_pre_replay(project_id, session_id):
|
||||
return {
|
||||
**get_file_key(project_id=project_id, session_id=session_id),
|
||||
'domURL': [sessions_mobs.get_first_url(project_id=project_id, session_id=session_id, check_existence=False)]}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,11 +1,9 @@
|
|||
import logging
|
||||
from typing import List, Union
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import events, metadata, projects
|
||||
from chalicelib.core.sessions import sessions_favorite, performance_event, sessions_legacy
|
||||
from chalicelib.utils import pg_client, helper, metrics_helper
|
||||
from chalicelib.utils import sql_helper as sh
|
||||
from chalicelib.core import metadata, projects
|
||||
from . import sessions_favorite, sessions_legacy
|
||||
from chalicelib.utils import pg_client, helper
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -40,16 +38,22 @@ COALESCE((SELECT TRUE
|
|||
|
||||
|
||||
# This function executes the query and return result
|
||||
def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_id, errors_only=False,
|
||||
error_status=schemas.ErrorStatus.ALL, count_only=False, issue=None, ids_only=False,
|
||||
platform="web"):
|
||||
def search_sessions(data: schemas.SessionsSearchPayloadSchema, project: schemas.ProjectContext,
|
||||
user_id, errors_only=False, error_status=schemas.ErrorStatus.ALL,
|
||||
count_only=False, issue=None, ids_only=False, platform="web"):
|
||||
if data.bookmarked:
|
||||
data.startTimestamp, data.endTimestamp = sessions_favorite.get_start_end_timestamp(project_id, user_id)
|
||||
|
||||
data.startTimestamp, data.endTimestamp = sessions_favorite.get_start_end_timestamp(project.project_id, user_id)
|
||||
if data.startTimestamp is None:
|
||||
logger.debug(f"No vault sessions found for project:{project.project_id}")
|
||||
return {
|
||||
'total': 0,
|
||||
'sessions': [],
|
||||
'src': 1
|
||||
}
|
||||
full_args, query_part = sessions_legacy.search_query_parts(data=data, error_status=error_status,
|
||||
errors_only=errors_only,
|
||||
favorite_only=data.bookmarked, issue=issue,
|
||||
project_id=project_id,
|
||||
project_id=project.project_id,
|
||||
user_id=user_id, platform=platform)
|
||||
if data.limit is not None and data.page is not None:
|
||||
full_args["sessions_limit"] = data.limit
|
||||
|
|
@ -86,7 +90,7 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_
|
|||
else:
|
||||
sort = 'start_ts'
|
||||
|
||||
meta_keys = metadata.get(project_id=project_id)
|
||||
meta_keys = metadata.get(project_id=project.project_id)
|
||||
main_query = cur.mogrify(f"""SELECT COUNT(*) AS count,
|
||||
COALESCE(JSONB_AGG(users_sessions)
|
||||
FILTER (WHERE rn>%(sessions_limit_s)s AND rn<=%(sessions_limit_e)s), '[]'::JSONB) AS sessions
|
||||
|
|
@ -118,9 +122,12 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_
|
|||
sort = 'session_id'
|
||||
if data.sort is not None and data.sort != "session_id":
|
||||
# sort += " " + data.order + "," + helper.key_to_snake_case(data.sort)
|
||||
sort = helper.key_to_snake_case(data.sort)
|
||||
if data.sort == 'datetime':
|
||||
sort = 'start_ts'
|
||||
else:
|
||||
sort = helper.key_to_snake_case(data.sort)
|
||||
|
||||
meta_keys = metadata.get(project_id=project_id)
|
||||
meta_keys = metadata.get(project_id=project.project_id)
|
||||
main_query = cur.mogrify(f"""SELECT COUNT(full_sessions) AS count,
|
||||
COALESCE(JSONB_AGG(full_sessions)
|
||||
FILTER (WHERE rn>%(sessions_limit_s)s AND rn<=%(sessions_limit_e)s), '[]'::JSONB) AS sessions
|
||||
|
|
@ -168,7 +175,8 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_
|
|||
# reverse=data.order.upper() == "DESC")
|
||||
return {
|
||||
'total': total,
|
||||
'sessions': helper.list_to_camel_case(sessions)
|
||||
'sessions': helper.list_to_camel_case(sessions),
|
||||
'src': 1
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
1
api/chalicelib/core/sessions/sessions_viewed/__init__.py
Normal file
1
api/chalicelib/core/sessions/sessions_viewed/__init__.py
Normal file
|
|
@ -0,0 +1 @@
|
|||
from .sessions_viewed import *
|
||||
|
|
@ -1,6 +1,7 @@
|
|||
import logging
|
||||
|
||||
from chalicelib.core import sessions, assist
|
||||
from chalicelib.core import assist
|
||||
from . import sessions
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ def refresh_spot_jwt_iat_jti(user_id):
|
|||
{"user_id": user_id})
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
return row.get("spot_jwt_iat"), row.get("spot_jwt_refresh_jti"), row.get("spot_jwt_refresh_iat")
|
||||
return users.RefreshSpotJWTs(**row)
|
||||
|
||||
|
||||
def logout(user_id: int):
|
||||
|
|
@ -26,13 +26,13 @@ def logout(user_id: int):
|
|||
|
||||
|
||||
def refresh(user_id: int, tenant_id: int = -1) -> dict:
|
||||
spot_jwt_iat, spot_jwt_r_jti, spot_jwt_r_iat = refresh_spot_jwt_iat_jti(user_id=user_id)
|
||||
j = refresh_spot_jwt_iat_jti(user_id=user_id)
|
||||
return {
|
||||
"jwt": authorizers.generate_jwt(user_id=user_id, tenant_id=tenant_id, iat=spot_jwt_iat,
|
||||
"jwt": authorizers.generate_jwt(user_id=user_id, tenant_id=tenant_id, iat=j.spot_jwt_iat,
|
||||
aud=AUDIENCE, for_spot=True),
|
||||
"refreshToken": authorizers.generate_jwt_refresh(user_id=user_id, tenant_id=tenant_id, iat=spot_jwt_r_iat,
|
||||
aud=AUDIENCE, jwt_jti=spot_jwt_r_jti, for_spot=True),
|
||||
"refreshTokenMaxAge": config("JWT_SPOT_REFRESH_EXPIRATION", cast=int) - (spot_jwt_iat - spot_jwt_r_iat)
|
||||
"refreshToken": authorizers.generate_jwt_refresh(user_id=user_id, tenant_id=tenant_id, iat=j.spot_jwt_refresh_iat,
|
||||
aud=AUDIENCE, jwt_jti=j.spot_jwt_refresh_jti, for_spot=True),
|
||||
"refreshTokenMaxAge": config("JWT_SPOT_REFRESH_EXPIRATION", cast=int) - (j.spot_jwt_iat - j.spot_jwt_refresh_iat)
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,13 +1,14 @@
|
|||
import json
|
||||
import secrets
|
||||
from typing import Optional
|
||||
|
||||
from decouple import config
|
||||
from fastapi import BackgroundTasks
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, model_validator
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import authorizers, metadata
|
||||
from chalicelib.core import tenants, spot, scope
|
||||
from chalicelib.core import authorizers
|
||||
from chalicelib.core import tenants, spot
|
||||
from chalicelib.utils import email_helper
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils import pg_client
|
||||
|
|
@ -83,7 +84,6 @@ def restore_member(user_id, email, invitation_token, admin, name, owner=False):
|
|||
"name": name, "invitation_token": invitation_token})
|
||||
cur.execute(query)
|
||||
result = cur.fetchone()
|
||||
cur.execute(query)
|
||||
result["created_at"] = TimeUTC.datetime_to_timestamp(result["created_at"])
|
||||
return helper.dict_to_camel_case(result)
|
||||
|
||||
|
|
@ -284,7 +284,7 @@ def edit_member(user_id_to_update, tenant_id, changes: schemas.EditMemberSchema,
|
|||
if editor_id != user_id_to_update:
|
||||
admin = get_user_role(tenant_id=tenant_id, user_id=editor_id)
|
||||
if not admin["superAdmin"] and not admin["admin"]:
|
||||
return {"errors": ["unauthorized"]}
|
||||
return {"errors": ["unauthorized, you must have admin privileges"]}
|
||||
if admin["admin"] and user["superAdmin"]:
|
||||
return {"errors": ["only the owner can edit his own details"]}
|
||||
else:
|
||||
|
|
@ -552,14 +552,35 @@ def refresh_auth_exists(user_id, jwt_jti=None):
|
|||
return r is not None
|
||||
|
||||
|
||||
class ChangeJwt(BaseModel):
|
||||
class FullLoginJWTs(BaseModel):
|
||||
jwt_iat: int
|
||||
jwt_refresh_jti: int
|
||||
jwt_refresh_jti: str
|
||||
jwt_refresh_iat: int
|
||||
spot_jwt_iat: int
|
||||
spot_jwt_refresh_jti: int
|
||||
spot_jwt_refresh_jti: str
|
||||
spot_jwt_refresh_iat: int
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def _transform_data(cls, values):
|
||||
if values.get("jwt_refresh_jti") is not None:
|
||||
values["jwt_refresh_jti"] = str(values["jwt_refresh_jti"])
|
||||
if values.get("spot_jwt_refresh_jti") is not None:
|
||||
values["spot_jwt_refresh_jti"] = str(values["spot_jwt_refresh_jti"])
|
||||
return values
|
||||
|
||||
|
||||
class RefreshLoginJWTs(FullLoginJWTs):
|
||||
spot_jwt_iat: Optional[int] = None
|
||||
spot_jwt_refresh_jti: Optional[str] = None
|
||||
spot_jwt_refresh_iat: Optional[int] = None
|
||||
|
||||
|
||||
class RefreshSpotJWTs(FullLoginJWTs):
|
||||
jwt_iat: Optional[int] = None
|
||||
jwt_refresh_jti: Optional[str] = None
|
||||
jwt_refresh_iat: Optional[int] = None
|
||||
|
||||
|
||||
def change_jwt_iat_jti(user_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
|
|
@ -580,7 +601,7 @@ def change_jwt_iat_jti(user_id):
|
|||
{"user_id": user_id})
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
return ChangeJwt(**row)
|
||||
return FullLoginJWTs(**row)
|
||||
|
||||
|
||||
def refresh_jwt_iat_jti(user_id):
|
||||
|
|
@ -595,7 +616,7 @@ def refresh_jwt_iat_jti(user_id):
|
|||
{"user_id": user_id})
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
return row.get("jwt_iat"), row.get("jwt_refresh_jti"), row.get("jwt_refresh_iat")
|
||||
return RefreshLoginJWTs(**row)
|
||||
|
||||
|
||||
def authenticate(email, password, for_change_password=False) -> dict | bool | None:
|
||||
|
|
@ -627,9 +648,12 @@ def authenticate(email, password, for_change_password=False) -> dict | bool | No
|
|||
response = {
|
||||
"jwt": authorizers.generate_jwt(user_id=r['userId'], tenant_id=r['tenantId'], iat=j_r.jwt_iat,
|
||||
aud=AUDIENCE),
|
||||
"refreshToken": authorizers.generate_jwt_refresh(user_id=r['userId'], tenant_id=r['tenantId'],
|
||||
iat=j_r.jwt_refresh_iat, aud=AUDIENCE,
|
||||
jwt_jti=j_r.jwt_refresh_jti),
|
||||
"refreshToken": authorizers.generate_jwt_refresh(user_id=r['userId'],
|
||||
tenant_id=r['tenantId'],
|
||||
iat=j_r.jwt_refresh_iat,
|
||||
aud=AUDIENCE,
|
||||
jwt_jti=j_r.jwt_refresh_jti,
|
||||
for_spot=False),
|
||||
"refreshTokenMaxAge": config("JWT_REFRESH_EXPIRATION", cast=int),
|
||||
"email": email,
|
||||
"spotJwt": authorizers.generate_jwt(user_id=r['userId'], tenant_id=r['tenantId'],
|
||||
|
|
@ -660,13 +684,13 @@ def logout(user_id: int):
|
|||
|
||||
|
||||
def refresh(user_id: int, tenant_id: int = -1) -> dict:
|
||||
jwt_iat, jwt_r_jti, jwt_r_iat = refresh_jwt_iat_jti(user_id=user_id)
|
||||
j = refresh_jwt_iat_jti(user_id=user_id)
|
||||
return {
|
||||
"jwt": authorizers.generate_jwt(user_id=user_id, tenant_id=tenant_id, iat=jwt_iat,
|
||||
"jwt": authorizers.generate_jwt(user_id=user_id, tenant_id=tenant_id, iat=j.jwt_iat,
|
||||
aud=AUDIENCE),
|
||||
"refreshToken": authorizers.generate_jwt_refresh(user_id=user_id, tenant_id=tenant_id, iat=jwt_r_iat,
|
||||
aud=AUDIENCE, jwt_jti=jwt_r_jti),
|
||||
"refreshTokenMaxAge": config("JWT_REFRESH_EXPIRATION", cast=int) - (jwt_iat - jwt_r_iat)
|
||||
"refreshToken": authorizers.generate_jwt_refresh(user_id=user_id, tenant_id=tenant_id, iat=j.jwt_refresh_iat,
|
||||
aud=AUDIENCE, jwt_jti=j.jwt_refresh_jti),
|
||||
"refreshTokenMaxAge": config("JWT_REFRESH_EXPIRATION", cast=int) - (j.jwt_iat - j.jwt_refresh_iat),
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -41,8 +41,7 @@ class ClickHouseClient:
|
|||
keys = tuple(x for x, y in results[1])
|
||||
return [dict(zip(keys, i)) for i in results[0]]
|
||||
except Exception as err:
|
||||
logger.error("--------- CH EXCEPTION -----------")
|
||||
logger.error(err)
|
||||
logger.error("--------- CH EXCEPTION -----------", exc_info=err)
|
||||
logger.error("--------- CH QUERY EXCEPTION -----------")
|
||||
logger.error(self.format(query=query, parameters=parameters)
|
||||
.replace('\n', '\\n')
|
||||
|
|
|
|||
|
|
@ -34,7 +34,10 @@ if config("CH_COMPRESSION", cast=bool, default=True):
|
|||
def transform_result(self, original_function):
|
||||
@wraps(original_function)
|
||||
def wrapper(*args, **kwargs):
|
||||
logger.debug(self.format(query=kwargs.get("query"), parameters=kwargs.get("parameters")))
|
||||
if kwargs.get("parameters"):
|
||||
logger.debug(str.encode(self.format(query=kwargs.get("query", ""), parameters=kwargs.get("parameters"))))
|
||||
elif len(args) > 0:
|
||||
logger.debug(str.encode(args[0]))
|
||||
result = original_function(*args, **kwargs)
|
||||
if isinstance(result, clickhouse_connect.driver.query.QueryResult):
|
||||
column_names = result.column_names
|
||||
|
|
@ -108,14 +111,14 @@ def make_pool():
|
|||
try:
|
||||
CH_pool.close_all()
|
||||
except Exception as error:
|
||||
logger.error("Error while closing all connexions to CH", error)
|
||||
logger.error("Error while closing all connexions to CH", exc_info=error)
|
||||
try:
|
||||
CH_pool = ClickHouseConnectionPool(min_size=config("CH_MINCONN", cast=int, default=4),
|
||||
max_size=config("CH_MAXCONN", cast=int, default=8))
|
||||
if CH_pool is not None:
|
||||
logger.info("Connection pool created successfully for CH")
|
||||
except ConnectionError as error:
|
||||
logger.error("Error while connecting to CH", error)
|
||||
logger.error("Error while connecting to CH", exc_info=error)
|
||||
if RETRY < RETRY_MAX:
|
||||
RETRY += 1
|
||||
logger.info(f"waiting for {RETRY_INTERVAL}s before retry n°{RETRY}")
|
||||
|
|
@ -146,13 +149,11 @@ class ClickHouseClient:
|
|||
def __enter__(self):
|
||||
return self.__client
|
||||
|
||||
def format(self, query, *, parameters=None):
|
||||
if parameters is None:
|
||||
return query
|
||||
return query % {
|
||||
key: f"'{value}'" if isinstance(value, str) else value
|
||||
for key, value in parameters.items()
|
||||
}
|
||||
def format(self, query, parameters=None):
|
||||
if parameters:
|
||||
ctx = QueryContext(query=query, parameters=parameters)
|
||||
return ctx.final_query
|
||||
return query
|
||||
|
||||
def __exit__(self, *args):
|
||||
if config('CH_POOL', cast=bool, default=True):
|
||||
|
|
@ -174,4 +175,4 @@ async def terminate():
|
|||
CH_pool.close_all()
|
||||
logger.info("Closed all connexions to CH")
|
||||
except Exception as error:
|
||||
logger.error("Error while closing all connexions to CH", error)
|
||||
logger.error("Error while closing all connexions to CH", exc_info=error)
|
||||
|
|
|
|||
36
api/chalicelib/utils/contextual_validators.py
Normal file
36
api/chalicelib/utils/contextual_validators.py
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
import schemas
|
||||
from fastapi import HTTPException, Depends
|
||||
from or_dependencies import OR_context
|
||||
|
||||
|
||||
def validate_contextual_payload(
|
||||
item: schemas.SessionsSearchPayloadSchema,
|
||||
context: schemas.CurrentContext = Depends(OR_context)
|
||||
) -> schemas.SessionsSearchPayloadSchema:
|
||||
if context.project.platform == "web":
|
||||
for e in item.events:
|
||||
if e.type in [schemas.EventType.CLICK_MOBILE,
|
||||
schemas.EventType.INPUT_MOBILE,
|
||||
schemas.EventType.VIEW_MOBILE,
|
||||
schemas.EventType.CUSTOM_MOBILE,
|
||||
schemas.EventType.REQUEST_MOBILE,
|
||||
schemas.EventType.ERROR_MOBILE,
|
||||
schemas.EventType.SWIPE_MOBILE]:
|
||||
raise HTTPException(status_code=422,
|
||||
detail=f"Mobile event '{e.type}' not supported for web project")
|
||||
else:
|
||||
for e in item.events:
|
||||
if e.type in [schemas.EventType.CLICK,
|
||||
schemas.EventType.INPUT,
|
||||
schemas.EventType.LOCATION,
|
||||
schemas.EventType.CUSTOM,
|
||||
schemas.EventType.REQUEST,
|
||||
schemas.EventType.REQUEST_DETAILS,
|
||||
schemas.EventType.GRAPHQL,
|
||||
schemas.EventType.STATE_ACTION,
|
||||
schemas.EventType.ERROR,
|
||||
schemas.EventType.TAG]:
|
||||
raise HTTPException(status_code=422,
|
||||
detail=f"Web event '{e.type}' not supported for mobile project")
|
||||
|
||||
return item
|
||||
|
|
@ -1,14 +0,0 @@
|
|||
from chalicelib.core.sourcemaps import sourcemaps
|
||||
|
||||
|
||||
def format_first_stack_frame(error):
|
||||
error["stack"] = sourcemaps.format_payload(error.pop("payload"), truncate_to_first=True)
|
||||
for s in error["stack"]:
|
||||
for c in s.get("context", []):
|
||||
for sci, sc in enumerate(c):
|
||||
if isinstance(sc, str) and len(sc) > 1000:
|
||||
c[sci] = sc[:1000]
|
||||
# convert bytes to string:
|
||||
if isinstance(s["filename"], bytes):
|
||||
s["filename"] = s["filename"].decode("utf-8")
|
||||
return error
|
||||
|
|
@ -334,3 +334,5 @@ def cast_session_id_to_string(data):
|
|||
for key in keys:
|
||||
data[key] = cast_session_id_to_string(data[key])
|
||||
return data
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,26 @@
|
|||
from typing import List
|
||||
|
||||
|
||||
def get_step_size(startTimestamp, endTimestamp, density, decimal=False, factor=1000):
|
||||
if endTimestamp == 0:
|
||||
raise Exception("endTimestamp cannot be 0 in order to get step size")
|
||||
step_size = (endTimestamp // factor - startTimestamp // factor)
|
||||
if density <= 1:
|
||||
return step_size
|
||||
if decimal:
|
||||
return step_size / density
|
||||
return step_size // (density - 1)
|
||||
return step_size // density
|
||||
|
||||
|
||||
def complete_missing_steps(rows: List[dict], start_timestamp: int, end_timestamp: int, step: int, neutral: dict,
|
||||
time_key: str = "timestamp") -> List[dict]:
|
||||
result = []
|
||||
i = 0
|
||||
for t in range(start_timestamp, end_timestamp, step):
|
||||
if i >= len(rows) or rows[i][time_key] > t:
|
||||
neutral[time_key] = t
|
||||
result.append(neutral.copy())
|
||||
elif i < len(rows) and rows[i][time_key] == t:
|
||||
result.append(rows[i])
|
||||
i += 1
|
||||
return result
|
||||
|
|
|
|||
|
|
@ -19,6 +19,16 @@ PG_CONFIG = dict(_PG_CONFIG)
|
|||
if config("PG_TIMEOUT", cast=int, default=0) > 0:
|
||||
PG_CONFIG["options"] = f"-c statement_timeout={config('PG_TIMEOUT', cast=int) * 1000}"
|
||||
|
||||
if config('PG_POOL', cast=bool, default=True):
|
||||
PG_CONFIG = {
|
||||
**PG_CONFIG,
|
||||
# Keepalive settings
|
||||
"keepalives": 1, # Enable keepalives
|
||||
"keepalives_idle": 300, # Seconds before sending keepalive
|
||||
"keepalives_interval": 10, # Seconds between keepalives
|
||||
"keepalives_count": 3 # Number of keepalives before giving up
|
||||
}
|
||||
|
||||
|
||||
class ORThreadedConnectionPool(psycopg2.pool.ThreadedConnectionPool):
|
||||
def __init__(self, minconn, maxconn, *args, **kwargs):
|
||||
|
|
@ -55,6 +65,7 @@ RETRY = 0
|
|||
|
||||
def make_pool():
|
||||
if not config('PG_POOL', cast=bool, default=True):
|
||||
logger.info("PG_POOL is disabled, not creating a new one")
|
||||
return
|
||||
global postgreSQL_pool
|
||||
global RETRY
|
||||
|
|
@ -62,7 +73,7 @@ def make_pool():
|
|||
try:
|
||||
postgreSQL_pool.closeall()
|
||||
except (Exception, psycopg2.DatabaseError) as error:
|
||||
logger.error("Error while closing all connexions to PostgreSQL", error)
|
||||
logger.error("Error while closing all connexions to PostgreSQL", exc_info=error)
|
||||
try:
|
||||
postgreSQL_pool = ORThreadedConnectionPool(config("PG_MINCONN", cast=int, default=4),
|
||||
config("PG_MAXCONN", cast=int, default=8),
|
||||
|
|
@ -70,10 +81,10 @@ def make_pool():
|
|||
if postgreSQL_pool is not None:
|
||||
logger.info("Connection pool created successfully")
|
||||
except (Exception, psycopg2.DatabaseError) as error:
|
||||
logger.error("Error while connecting to PostgreSQL", error)
|
||||
logger.error("Error while connecting to PostgreSQL", exc_info=error)
|
||||
if RETRY < RETRY_MAX:
|
||||
RETRY += 1
|
||||
logger.info(f"waiting for {RETRY_INTERVAL}s before retry n°{RETRY}")
|
||||
logger.info(f"Waiting for {RETRY_INTERVAL}s before retry n°{RETRY}")
|
||||
time.sleep(RETRY_INTERVAL)
|
||||
make_pool()
|
||||
else:
|
||||
|
|
@ -97,13 +108,17 @@ class PostgresClient:
|
|||
elif long_query:
|
||||
long_config = dict(_PG_CONFIG)
|
||||
long_config["application_name"] += "-LONG"
|
||||
long_config["options"] = f"-c statement_timeout=" \
|
||||
f"{config('pg_long_timeout', cast=int, default=5 * 60) * 1000}"
|
||||
if config('PG_TIMEOUT_LONG', cast=int, default=1) > 0:
|
||||
long_config["options"] = f"-c statement_timeout=" \
|
||||
f"{config('PG_TIMEOUT_LONG', cast=int, default=5 * 60) * 1000}"
|
||||
else:
|
||||
logger.info("Disabled timeout for long query")
|
||||
self.connection = psycopg2.connect(**long_config)
|
||||
elif not use_pool or not config('PG_POOL', cast=bool, default=True):
|
||||
single_config = dict(_PG_CONFIG)
|
||||
single_config["application_name"] += "-NOPOOL"
|
||||
single_config["options"] = f"-c statement_timeout={config('PG_TIMEOUT', cast=int, default=30) * 1000}"
|
||||
if config('PG_TIMEOUT', cast=int, default=1) > 0:
|
||||
single_config["options"] = f"-c statement_timeout={config('PG_TIMEOUT', cast=int, default=30) * 1000}"
|
||||
self.connection = psycopg2.connect(**single_config)
|
||||
else:
|
||||
self.connection = postgreSQL_pool.getconn()
|
||||
|
|
@ -123,7 +138,7 @@ class PostgresClient:
|
|||
if not self.use_pool or self.long_query or self.unlimited_query:
|
||||
self.connection.close()
|
||||
except Exception as error:
|
||||
logger.error("Error while committing/closing PG-connection", error)
|
||||
logger.error("Error while committing/closing PG-connection", exc_info=error)
|
||||
if str(error) == "connection already closed" \
|
||||
and self.use_pool \
|
||||
and not self.long_query \
|
||||
|
|
@ -150,7 +165,7 @@ class PostgresClient:
|
|||
try:
|
||||
self.connection.rollback()
|
||||
except psycopg2.InterfaceError as e:
|
||||
logger.error("!!! Error while rollbacking connection", e)
|
||||
logger.error("!!! Error while rollbacking connection", exc_info=e)
|
||||
logger.error("!!! Trying to recreate the cursor")
|
||||
self.recreate_cursor()
|
||||
raise error
|
||||
|
|
@ -161,19 +176,18 @@ class PostgresClient:
|
|||
try:
|
||||
self.connection.rollback()
|
||||
except Exception as error:
|
||||
logger.error("Error while rollbacking connection for recreation", error)
|
||||
logger.error("Error while rollbacking connection for recreation", exc_info=error)
|
||||
try:
|
||||
self.cursor.close()
|
||||
except Exception as error:
|
||||
logger.error("Error while closing cursor for recreation", error)
|
||||
logger.error("Error while closing cursor for recreation", exc_info=error)
|
||||
self.cursor = None
|
||||
return self.__enter__()
|
||||
|
||||
|
||||
async def init():
|
||||
logger.info(f">use PG_POOL:{config('PG_POOL', default=True)}")
|
||||
if config('PG_POOL', cast=bool, default=True):
|
||||
make_pool()
|
||||
make_pool()
|
||||
|
||||
|
||||
async def terminate():
|
||||
|
|
@ -183,4 +197,4 @@ async def terminate():
|
|||
postgreSQL_pool.closeall()
|
||||
logger.info("Closed all connexions to PostgreSQL")
|
||||
except (Exception, psycopg2.DatabaseError) as error:
|
||||
logger.error("Error while closing all connexions to PostgreSQL", error)
|
||||
logger.error("Error while closing all connexions to PostgreSQL", exc_info=error)
|
||||
|
|
|
|||
|
|
@ -4,37 +4,41 @@ import schemas
|
|||
|
||||
|
||||
def get_sql_operator(op: Union[schemas.SearchEventOperator, schemas.ClickEventExtraOperator, schemas.MathOperator]):
|
||||
if isinstance(op, Enum):
|
||||
op = op.value
|
||||
return {
|
||||
schemas.SearchEventOperator.IS: "=",
|
||||
schemas.SearchEventOperator.ON: "=",
|
||||
schemas.SearchEventOperator.ON_ANY: "IN",
|
||||
schemas.SearchEventOperator.IS_NOT: "!=",
|
||||
schemas.SearchEventOperator.NOT_ON: "!=",
|
||||
schemas.SearchEventOperator.CONTAINS: "ILIKE",
|
||||
schemas.SearchEventOperator.NOT_CONTAINS: "NOT ILIKE",
|
||||
schemas.SearchEventOperator.STARTS_WITH: "ILIKE",
|
||||
schemas.SearchEventOperator.ENDS_WITH: "ILIKE",
|
||||
schemas.SearchEventOperator.IS.value: "=",
|
||||
schemas.SearchEventOperator.ON.value: "=",
|
||||
schemas.SearchEventOperator.ON_ANY.value: "IN",
|
||||
schemas.SearchEventOperator.IS_NOT.value: "!=",
|
||||
schemas.SearchEventOperator.NOT_ON.value: "!=",
|
||||
schemas.SearchEventOperator.CONTAINS.value: "ILIKE",
|
||||
schemas.SearchEventOperator.NOT_CONTAINS.value: "NOT ILIKE",
|
||||
schemas.SearchEventOperator.STARTS_WITH.value: "ILIKE",
|
||||
schemas.SearchEventOperator.ENDS_WITH.value: "ILIKE",
|
||||
# Selector operators:
|
||||
schemas.ClickEventExtraOperator.IS: "=",
|
||||
schemas.ClickEventExtraOperator.IS_NOT: "!=",
|
||||
schemas.ClickEventExtraOperator.CONTAINS: "ILIKE",
|
||||
schemas.ClickEventExtraOperator.NOT_CONTAINS: "NOT ILIKE",
|
||||
schemas.ClickEventExtraOperator.STARTS_WITH: "ILIKE",
|
||||
schemas.ClickEventExtraOperator.ENDS_WITH: "ILIKE",
|
||||
schemas.ClickEventExtraOperator.IS.value: "=",
|
||||
schemas.ClickEventExtraOperator.IS_NOT.value: "!=",
|
||||
schemas.ClickEventExtraOperator.CONTAINS.value: "ILIKE",
|
||||
schemas.ClickEventExtraOperator.NOT_CONTAINS.value: "NOT ILIKE",
|
||||
schemas.ClickEventExtraOperator.STARTS_WITH.value: "ILIKE",
|
||||
schemas.ClickEventExtraOperator.ENDS_WITH.value: "ILIKE",
|
||||
|
||||
schemas.MathOperator.GREATER: ">",
|
||||
schemas.MathOperator.GREATER_EQ: ">=",
|
||||
schemas.MathOperator.LESS: "<",
|
||||
schemas.MathOperator.LESS_EQ: "<=",
|
||||
schemas.MathOperator.GREATER.value: ">",
|
||||
schemas.MathOperator.GREATER_EQ.value: ">=",
|
||||
schemas.MathOperator.LESS.value: "<",
|
||||
schemas.MathOperator.LESS_EQ.value: "<=",
|
||||
}.get(op, "=")
|
||||
|
||||
|
||||
def is_negation_operator(op: schemas.SearchEventOperator):
|
||||
return op in [schemas.SearchEventOperator.IS_NOT,
|
||||
schemas.SearchEventOperator.NOT_ON,
|
||||
schemas.SearchEventOperator.NOT_CONTAINS,
|
||||
schemas.ClickEventExtraOperator.IS_NOT,
|
||||
schemas.ClickEventExtraOperator.NOT_CONTAINS]
|
||||
if isinstance(op, Enum):
|
||||
op = op.value
|
||||
return op in [schemas.SearchEventOperator.IS_NOT.value,
|
||||
schemas.SearchEventOperator.NOT_ON.value,
|
||||
schemas.SearchEventOperator.NOT_CONTAINS.value,
|
||||
schemas.ClickEventExtraOperator.IS_NOT.value,
|
||||
schemas.ClickEventExtraOperator.NOT_CONTAINS.value]
|
||||
|
||||
|
||||
def reverse_sql_operator(op):
|
||||
|
|
@ -64,3 +68,12 @@ def isAny_opreator(op: schemas.SearchEventOperator):
|
|||
|
||||
def isUndefined_operator(op: schemas.SearchEventOperator):
|
||||
return op in [schemas.SearchEventOperator.IS_UNDEFINED]
|
||||
|
||||
|
||||
def single_value(values):
|
||||
if values is not None and isinstance(values, list):
|
||||
for i, v in enumerate(values):
|
||||
if isinstance(v, Enum):
|
||||
values[i] = v.value
|
||||
return values
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
#!/bin/sh
|
||||
export TZ=UTC
|
||||
export ASSIST_KEY=ignore
|
||||
export CH_POOL=false
|
||||
uvicorn app:app --host 0.0.0.0 --port 8888 --log-level ${S_LOGLEVEL:-warning}
|
||||
|
|
|
|||
|
|
@ -63,4 +63,9 @@ sessions_region=us-east-1
|
|||
SITE_URL=http://127.0.0.1:3333
|
||||
sourcemaps_bucket=
|
||||
sourcemaps_reader=http://127.0.0.1:3000/sourcemaps
|
||||
TZ=UTC
|
||||
TZ=UTC
|
||||
EXP_CH_DRIVER=true
|
||||
EXP_AUTOCOMPLETE=true
|
||||
EXP_ALERTS=true
|
||||
EXP_ERRORS_SEARCH=true
|
||||
EXP_METRICS=true
|
||||
|
|
@ -1,591 +0,0 @@
|
|||
-- -- Original Q3
|
||||
-- WITH ranked_events AS (SELECT *
|
||||
-- FROM ranked_events_1736344377403),
|
||||
-- n1 AS (SELECT event_number_in_session,
|
||||
-- event_type,
|
||||
-- e_value,
|
||||
-- next_type,
|
||||
-- next_value,
|
||||
-- COUNT(1) AS sessions_count
|
||||
-- FROM ranked_events
|
||||
-- WHERE event_number_in_session = 1
|
||||
-- AND isNotNull(next_value)
|
||||
-- GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
|
||||
-- ORDER BY sessions_count DESC
|
||||
-- LIMIT 8),
|
||||
-- n2 AS (SELECT *
|
||||
-- FROM (SELECT re.event_number_in_session AS event_number_in_session,
|
||||
-- re.event_type AS event_type,
|
||||
-- re.e_value AS e_value,
|
||||
-- re.next_type AS next_type,
|
||||
-- re.next_value AS next_value,
|
||||
-- COUNT(1) AS sessions_count
|
||||
-- FROM n1
|
||||
-- INNER JOIN ranked_events AS re
|
||||
-- ON (n1.next_value = re.e_value AND n1.next_type = re.event_type)
|
||||
-- WHERE re.event_number_in_session = 2
|
||||
-- GROUP BY re.event_number_in_session, re.event_type, re.e_value, re.next_type,
|
||||
-- re.next_value) AS sub_level
|
||||
-- ORDER BY sessions_count DESC
|
||||
-- LIMIT 8),
|
||||
-- n3 AS (SELECT *
|
||||
-- FROM (SELECT re.event_number_in_session AS event_number_in_session,
|
||||
-- re.event_type AS event_type,
|
||||
-- re.e_value AS e_value,
|
||||
-- re.next_type AS next_type,
|
||||
-- re.next_value AS next_value,
|
||||
-- COUNT(1) AS sessions_count
|
||||
-- FROM n2
|
||||
-- INNER JOIN ranked_events AS re
|
||||
-- ON (n2.next_value = re.e_value AND n2.next_type = re.event_type)
|
||||
-- WHERE re.event_number_in_session = 3
|
||||
-- GROUP BY re.event_number_in_session, re.event_type, re.e_value, re.next_type,
|
||||
-- re.next_value) AS sub_level
|
||||
-- ORDER BY sessions_count DESC
|
||||
-- LIMIT 8),
|
||||
-- n4 AS (SELECT *
|
||||
-- FROM (SELECT re.event_number_in_session AS event_number_in_session,
|
||||
-- re.event_type AS event_type,
|
||||
-- re.e_value AS e_value,
|
||||
-- re.next_type AS next_type,
|
||||
-- re.next_value AS next_value,
|
||||
-- COUNT(1) AS sessions_count
|
||||
-- FROM n3
|
||||
-- INNER JOIN ranked_events AS re
|
||||
-- ON (n3.next_value = re.e_value AND n3.next_type = re.event_type)
|
||||
-- WHERE re.event_number_in_session = 4
|
||||
-- GROUP BY re.event_number_in_session, re.event_type, re.e_value, re.next_type,
|
||||
-- re.next_value) AS sub_level
|
||||
-- ORDER BY sessions_count DESC
|
||||
-- LIMIT 8),
|
||||
-- n5 AS (SELECT *
|
||||
-- FROM (SELECT re.event_number_in_session AS event_number_in_session,
|
||||
-- re.event_type AS event_type,
|
||||
-- re.e_value AS e_value,
|
||||
-- re.next_type AS next_type,
|
||||
-- re.next_value AS next_value,
|
||||
-- COUNT(1) AS sessions_count
|
||||
-- FROM n4
|
||||
-- INNER JOIN ranked_events AS re
|
||||
-- ON (n4.next_value = re.e_value AND n4.next_type = re.event_type)
|
||||
-- WHERE re.event_number_in_session = 5
|
||||
-- GROUP BY re.event_number_in_session, re.event_type, re.e_value, re.next_type,
|
||||
-- re.next_value) AS sub_level
|
||||
-- ORDER BY sessions_count DESC
|
||||
-- LIMIT 8)
|
||||
-- SELECT *
|
||||
-- FROM (SELECT event_number_in_session,
|
||||
-- event_type,
|
||||
-- e_value,
|
||||
-- next_type,
|
||||
-- next_value,
|
||||
-- sessions_count
|
||||
-- FROM n1
|
||||
-- UNION ALL
|
||||
-- SELECT event_number_in_session,
|
||||
-- event_type,
|
||||
-- e_value,
|
||||
-- next_type,
|
||||
-- next_value,
|
||||
-- sessions_count
|
||||
-- FROM n2
|
||||
-- UNION ALL
|
||||
-- SELECT event_number_in_session,
|
||||
-- event_type,
|
||||
-- e_value,
|
||||
-- next_type,
|
||||
-- next_value,
|
||||
-- sessions_count
|
||||
-- FROM n3
|
||||
-- UNION ALL
|
||||
-- SELECT event_number_in_session,
|
||||
-- event_type,
|
||||
-- e_value,
|
||||
-- next_type,
|
||||
-- next_value,
|
||||
-- sessions_count
|
||||
-- FROM n4
|
||||
-- UNION ALL
|
||||
-- SELECT event_number_in_session,
|
||||
-- event_type,
|
||||
-- e_value,
|
||||
-- next_type,
|
||||
-- next_value,
|
||||
-- sessions_count
|
||||
-- FROM n5) AS chart_steps
|
||||
-- ORDER BY event_number_in_session;
|
||||
|
||||
-- Q1
|
||||
-- CREATE TEMPORARY TABLE pre_ranked_events_1736344377403 AS
|
||||
CREATE TABLE pre_ranked_events_1736344377403 ENGINE = Memory AS
|
||||
(WITH initial_event AS (SELECT events.session_id, MIN(datetime) AS start_event_timestamp
|
||||
FROM experimental.events AS events
|
||||
WHERE ((event_type = 'LOCATION' AND (url_path = '/en/deployment/')))
|
||||
AND events.project_id = toUInt16(65)
|
||||
AND events.datetime >= toDateTime(1735599600000 / 1000)
|
||||
AND events.datetime < toDateTime(1736290799999 / 1000)
|
||||
GROUP BY 1),
|
||||
pre_ranked_events AS (SELECT *
|
||||
FROM (SELECT session_id,
|
||||
event_type,
|
||||
datetime,
|
||||
url_path AS e_value,
|
||||
row_number() OVER (PARTITION BY session_id
|
||||
ORDER BY datetime ,
|
||||
message_id ) AS event_number_in_session
|
||||
FROM experimental.events AS events
|
||||
INNER JOIN initial_event ON (events.session_id = initial_event.session_id)
|
||||
WHERE events.project_id = toUInt16(65)
|
||||
AND events.datetime >= toDateTime(1735599600000 / 1000)
|
||||
AND events.datetime < toDateTime(1736290799999 / 1000)
|
||||
AND (events.event_type = 'LOCATION')
|
||||
AND events.datetime >= initial_event.start_event_timestamp
|
||||
) AS full_ranked_events
|
||||
WHERE event_number_in_session <= 5)
|
||||
SELECT *
|
||||
FROM pre_ranked_events);
|
||||
;
|
||||
|
||||
SELECT *
|
||||
FROM pre_ranked_events_1736344377403
|
||||
WHERE event_number_in_session < 3;
|
||||
|
||||
|
||||
|
||||
-- ---------Q2-----------
|
||||
-- CREATE TEMPORARY TABLE ranked_events_1736344377403 AS
|
||||
DROP TABLE ranked_events_1736344377403;
|
||||
CREATE TABLE ranked_events_1736344377403 ENGINE = Memory AS
|
||||
(WITH pre_ranked_events AS (SELECT *
|
||||
FROM pre_ranked_events_1736344377403),
|
||||
start_points AS (SELECT DISTINCT session_id
|
||||
FROM pre_ranked_events
|
||||
WHERE ((event_type = 'LOCATION' AND (e_value = '/en/deployment/')))
|
||||
AND pre_ranked_events.event_number_in_session = 1),
|
||||
ranked_events AS (SELECT pre_ranked_events.*,
|
||||
leadInFrame(e_value)
|
||||
OVER (PARTITION BY session_id ORDER BY datetime
|
||||
ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS next_value,
|
||||
leadInFrame(toNullable(event_type))
|
||||
OVER (PARTITION BY session_id ORDER BY datetime
|
||||
ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS next_type
|
||||
FROM start_points
|
||||
INNER JOIN pre_ranked_events USING (session_id))
|
||||
SELECT *
|
||||
FROM ranked_events);
|
||||
|
||||
|
||||
-- ranked events
|
||||
SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
next_type,
|
||||
next_value,
|
||||
COUNT(1) AS sessions_count
|
||||
FROM ranked_events_1736344377403
|
||||
WHERE event_number_in_session = 2
|
||||
-- AND e_value='/en/deployment/deploy-docker/'
|
||||
-- AND next_value NOT IN ('/en/deployment/','/en/plugins/','/en/using-or/')
|
||||
-- AND e_value NOT IN ('/en/deployment/deploy-docker/','/en/getting-started/','/en/deployment/deploy-ubuntu/')
|
||||
AND isNotNull(next_value)
|
||||
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
|
||||
ORDER BY event_number_in_session, sessions_count DESC;
|
||||
|
||||
|
||||
|
||||
SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
COUNT(1) AS sessions_count
|
||||
FROM ranked_events_1736344377403
|
||||
WHERE event_number_in_session = 1
|
||||
GROUP BY event_number_in_session, event_type, e_value
|
||||
ORDER BY event_number_in_session, sessions_count DESC;
|
||||
|
||||
SELECT COUNT(1) AS sessions_count
|
||||
FROM ranked_events_1736344377403
|
||||
WHERE event_number_in_session = 2
|
||||
AND isNull(next_value)
|
||||
;
|
||||
|
||||
-- ---------Q3 MORE -----------
|
||||
WITH ranked_events AS (SELECT *
|
||||
FROM ranked_events_1736344377403),
|
||||
n1 AS (SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
next_type,
|
||||
next_value,
|
||||
COUNT(1) AS sessions_count
|
||||
FROM ranked_events
|
||||
WHERE event_number_in_session = 1
|
||||
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
|
||||
ORDER BY sessions_count DESC),
|
||||
n2 AS (SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
next_type,
|
||||
next_value,
|
||||
COUNT(1) AS sessions_count
|
||||
FROM ranked_events
|
||||
WHERE event_number_in_session = 2
|
||||
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
|
||||
ORDER BY sessions_count DESC),
|
||||
n3 AS (SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
next_type,
|
||||
next_value,
|
||||
COUNT(1) AS sessions_count
|
||||
FROM ranked_events
|
||||
WHERE event_number_in_session = 3
|
||||
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
|
||||
ORDER BY sessions_count DESC),
|
||||
drop_n AS (-- STEP 1
|
||||
SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
'DROP' AS next_type,
|
||||
NULL AS next_value,
|
||||
sessions_count
|
||||
FROM n1
|
||||
WHERE isNull(n1.next_type)
|
||||
UNION ALL
|
||||
-- STEP 2
|
||||
SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
'DROP' AS next_type,
|
||||
NULL AS next_value,
|
||||
sessions_count
|
||||
FROM n2
|
||||
WHERE isNull(n2.next_type)),
|
||||
-- TODO: make this as top_steps, where every step will go to next as top/others
|
||||
top_n1 AS (-- STEP 1
|
||||
SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
next_type,
|
||||
next_value,
|
||||
sessions_count
|
||||
FROM n1
|
||||
WHERE isNotNull(next_type)
|
||||
ORDER BY sessions_count DESC
|
||||
LIMIT 3),
|
||||
top_n2 AS (-- STEP 2
|
||||
SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
next_type,
|
||||
next_value,
|
||||
sessions_count
|
||||
FROM n2
|
||||
WHERE (event_type, e_value) IN (SELECT event_type,
|
||||
e_value
|
||||
FROM n2
|
||||
WHERE isNotNull(next_type)
|
||||
GROUP BY event_type, e_value
|
||||
ORDER BY SUM(sessions_count) DESC
|
||||
LIMIT 3)
|
||||
ORDER BY sessions_count DESC),
|
||||
top_n AS (SELECT *
|
||||
FROM top_n1
|
||||
UNION ALL
|
||||
SELECT *
|
||||
FROM top_n2),
|
||||
u_top_n AS (SELECT DISTINCT event_number_in_session,
|
||||
event_type,
|
||||
e_value
|
||||
FROM top_n),
|
||||
others_n AS (
|
||||
-- STEP 1
|
||||
SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
next_type,
|
||||
next_value,
|
||||
sessions_count
|
||||
FROM n1
|
||||
WHERE isNotNull(next_type)
|
||||
ORDER BY sessions_count DESC
|
||||
LIMIT 1000000 OFFSET 3
|
||||
UNION ALL
|
||||
-- STEP 2
|
||||
SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
next_type,
|
||||
next_value,
|
||||
sessions_count
|
||||
FROM n2
|
||||
WHERE isNotNull(next_type)
|
||||
-- GROUP BY event_number_in_session, event_type, e_value
|
||||
ORDER BY sessions_count DESC
|
||||
LIMIT 1000000 OFFSET 3)
|
||||
SELECT *
|
||||
FROM (
|
||||
-- Top
|
||||
SELECT *
|
||||
FROM top_n
|
||||
-- UNION ALL
|
||||
-- -- Others
|
||||
-- SELECT event_number_in_session,
|
||||
-- event_type,
|
||||
-- e_value,
|
||||
-- 'OTHER' AS next_type,
|
||||
-- NULL AS next_value,
|
||||
-- SUM(sessions_count)
|
||||
-- FROM others_n
|
||||
-- GROUP BY event_number_in_session, event_type, e_value
|
||||
-- UNION ALL
|
||||
-- -- Top go to Drop
|
||||
-- SELECT drop_n.event_number_in_session,
|
||||
-- drop_n.event_type,
|
||||
-- drop_n.e_value,
|
||||
-- drop_n.next_type,
|
||||
-- drop_n.next_value,
|
||||
-- drop_n.sessions_count
|
||||
-- FROM drop_n
|
||||
-- INNER JOIN u_top_n ON (drop_n.event_number_in_session = u_top_n.event_number_in_session
|
||||
-- AND drop_n.event_type = u_top_n.event_type
|
||||
-- AND drop_n.e_value = u_top_n.e_value)
|
||||
-- ORDER BY drop_n.event_number_in_session
|
||||
-- -- -- UNION ALL
|
||||
-- -- -- Top go to Others
|
||||
-- SELECT top_n.event_number_in_session,
|
||||
-- top_n.event_type,
|
||||
-- top_n.e_value,
|
||||
-- 'OTHER' AS next_type,
|
||||
-- NULL AS next_value,
|
||||
-- SUM(top_n.sessions_count) AS sessions_count
|
||||
-- FROM top_n
|
||||
-- LEFT JOIN others_n ON (others_n.event_number_in_session = (top_n.event_number_in_session + 1)
|
||||
-- AND top_n.next_type = others_n.event_type
|
||||
-- AND top_n.next_value = others_n.e_value)
|
||||
-- WHERE others_n.event_number_in_session IS NULL
|
||||
-- AND top_n.next_type IS NOT NULL
|
||||
-- GROUP BY event_number_in_session, event_type, e_value
|
||||
-- UNION ALL
|
||||
-- -- Others got to Top
|
||||
-- SELECT others_n.event_number_in_session,
|
||||
-- 'OTHER' AS event_type,
|
||||
-- NULL AS e_value,
|
||||
-- others_n.s_next_type AS next_type,
|
||||
-- others_n.s_next_value AS next_value,
|
||||
-- SUM(sessions_count) AS sessions_count
|
||||
-- FROM others_n
|
||||
-- INNER JOIN top_n ON (others_n.event_number_in_session = top_n.event_number_in_session + 1 AND
|
||||
-- others_n.s_next_type = top_n.event_type AND
|
||||
-- others_n.s_next_value = top_n.event_type)
|
||||
-- GROUP BY others_n.event_number_in_session, next_type, next_value
|
||||
-- UNION ALL
|
||||
-- -- TODO: find if this works or not
|
||||
-- -- Others got to Others
|
||||
-- SELECT others_n.event_number_in_session,
|
||||
-- 'OTHER' AS event_type,
|
||||
-- NULL AS e_value,
|
||||
-- 'OTHERS' AS next_type,
|
||||
-- NULL AS next_value,
|
||||
-- SUM(sessions_count) AS sessions_count
|
||||
-- FROM others_n
|
||||
-- LEFT JOIN u_top_n ON ((others_n.event_number_in_session + 1) = u_top_n.event_number_in_session
|
||||
-- AND others_n.s_next_type = u_top_n.event_type
|
||||
-- AND others_n.s_next_value = u_top_n.e_value)
|
||||
-- WHERE u_top_n.event_number_in_session IS NULL
|
||||
-- GROUP BY others_n.event_number_in_session
|
||||
)
|
||||
ORDER BY event_number_in_session;
|
||||
|
||||
|
||||
-- ---------Q3 TOP ON VALUE ONLY -----------
|
||||
WITH ranked_events AS (SELECT *
|
||||
FROM ranked_events_1736344377403),
|
||||
n1 AS (SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
next_type,
|
||||
next_value,
|
||||
COUNT(1) AS sessions_count
|
||||
FROM ranked_events
|
||||
WHERE event_number_in_session = 1
|
||||
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
|
||||
ORDER BY sessions_count DESC),
|
||||
n2 AS (SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
next_type,
|
||||
next_value,
|
||||
COUNT(1) AS sessions_count
|
||||
FROM ranked_events
|
||||
WHERE event_number_in_session = 2
|
||||
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
|
||||
ORDER BY sessions_count DESC),
|
||||
n3 AS (SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
next_type,
|
||||
next_value,
|
||||
COUNT(1) AS sessions_count
|
||||
FROM ranked_events
|
||||
WHERE event_number_in_session = 3
|
||||
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
|
||||
ORDER BY sessions_count DESC),
|
||||
|
||||
drop_n AS (-- STEP 1
|
||||
SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
'DROP' AS next_type,
|
||||
NULL AS next_value,
|
||||
sessions_count
|
||||
FROM n1
|
||||
WHERE isNull(n1.next_type)
|
||||
UNION ALL
|
||||
-- STEP 2
|
||||
SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
'DROP' AS next_type,
|
||||
NULL AS next_value,
|
||||
sessions_count
|
||||
FROM n2
|
||||
WHERE isNull(n2.next_type)),
|
||||
top_n AS (SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
SUM(sessions_count) AS sessions_count
|
||||
FROM n1
|
||||
GROUP BY event_number_in_session, event_type, e_value
|
||||
LIMIT 1
|
||||
UNION ALL
|
||||
-- STEP 2
|
||||
SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
SUM(sessions_count) AS sessions_count
|
||||
FROM n2
|
||||
GROUP BY event_number_in_session, event_type, e_value
|
||||
ORDER BY sessions_count DESC
|
||||
LIMIT 3
|
||||
UNION ALL
|
||||
-- STEP 3
|
||||
SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
SUM(sessions_count) AS sessions_count
|
||||
FROM n3
|
||||
GROUP BY event_number_in_session, event_type, e_value
|
||||
ORDER BY sessions_count DESC
|
||||
LIMIT 3),
|
||||
top_n_with_next AS (SELECT n1.*
|
||||
FROM n1
|
||||
UNION ALL
|
||||
SELECT n2.*
|
||||
FROM n2
|
||||
INNER JOIN top_n ON (n2.event_number_in_session = top_n.event_number_in_session
|
||||
AND n2.event_type = top_n.event_type
|
||||
AND n2.e_value = top_n.e_value)),
|
||||
others_n AS (
|
||||
-- STEP 2
|
||||
SELECT n2.*
|
||||
FROM n2
|
||||
WHERE (n2.event_number_in_session, n2.event_type, n2.e_value) NOT IN
|
||||
(SELECT event_number_in_session, event_type, e_value
|
||||
FROM top_n
|
||||
WHERE top_n.event_number_in_session = 2)
|
||||
UNION ALL
|
||||
-- STEP 3
|
||||
SELECT n3.*
|
||||
FROM n3
|
||||
WHERE (n3.event_number_in_session, n3.event_type, n3.e_value) NOT IN
|
||||
(SELECT event_number_in_session, event_type, e_value
|
||||
FROM top_n
|
||||
WHERE top_n.event_number_in_session = 3))
|
||||
SELECT *
|
||||
FROM (
|
||||
-- SELECT sum(top_n_with_next.sessions_count)
|
||||
-- FROM top_n_with_next
|
||||
-- WHERE event_number_in_session = 1
|
||||
-- -- AND isNotNull(next_value)
|
||||
-- AND (next_type, next_value) IN
|
||||
-- (SELECT others_n.event_type, others_n.e_value FROM others_n WHERE others_n.event_number_in_session = 2)
|
||||
-- -- SELECT * FROM others_n
|
||||
-- -- SELECT * FROM n2
|
||||
-- SELECT *
|
||||
-- FROM top_n
|
||||
-- );
|
||||
-- Top to Top: valid
|
||||
SELECT top_n_with_next.*
|
||||
FROM top_n_with_next
|
||||
INNER JOIN top_n
|
||||
ON (top_n_with_next.event_number_in_session + 1 = top_n.event_number_in_session
|
||||
AND top_n_with_next.next_type = top_n.event_type
|
||||
AND top_n_with_next.next_value = top_n.e_value)
|
||||
UNION ALL
|
||||
-- Top to Others: valid
|
||||
SELECT top_n_with_next.event_number_in_session,
|
||||
top_n_with_next.event_type,
|
||||
top_n_with_next.e_value,
|
||||
'OTHER' AS next_type,
|
||||
NULL AS next_value,
|
||||
SUM(top_n_with_next.sessions_count) AS sessions_count
|
||||
FROM top_n_with_next
|
||||
WHERE (top_n_with_next.event_number_in_session + 1, top_n_with_next.next_type, top_n_with_next.next_value) IN
|
||||
(SELECT others_n.event_number_in_session, others_n.event_type, others_n.e_value FROM others_n)
|
||||
GROUP BY top_n_with_next.event_number_in_session, top_n_with_next.event_type, top_n_with_next.e_value
|
||||
UNION ALL
|
||||
-- Top go to Drop: valid
|
||||
SELECT drop_n.event_number_in_session,
|
||||
drop_n.event_type,
|
||||
drop_n.e_value,
|
||||
drop_n.next_type,
|
||||
drop_n.next_value,
|
||||
drop_n.sessions_count
|
||||
FROM drop_n
|
||||
INNER JOIN top_n ON (drop_n.event_number_in_session = top_n.event_number_in_session
|
||||
AND drop_n.event_type = top_n.event_type
|
||||
AND drop_n.e_value = top_n.e_value)
|
||||
ORDER BY drop_n.event_number_in_session
|
||||
UNION ALL
|
||||
-- Others got to Drop: valid
|
||||
SELECT others_n.event_number_in_session,
|
||||
'OTHER' AS event_type,
|
||||
NULL AS e_value,
|
||||
'DROP' AS next_type,
|
||||
NULL AS next_value,
|
||||
SUM(others_n.sessions_count) AS sessions_count
|
||||
FROM others_n
|
||||
WHERE isNull(others_n.next_type)
|
||||
AND others_n.event_number_in_session < 3
|
||||
GROUP BY others_n.event_number_in_session, next_type, next_value
|
||||
UNION ALL
|
||||
-- Others got to Top:valid
|
||||
SELECT others_n.event_number_in_session,
|
||||
'OTHER' AS event_type,
|
||||
NULL AS e_value,
|
||||
others_n.next_type,
|
||||
others_n.next_value,
|
||||
SUM(others_n.sessions_count) AS sessions_count
|
||||
FROM others_n
|
||||
WHERE isNotNull(others_n.next_type)
|
||||
AND (others_n.event_number_in_session + 1, others_n.next_type, others_n.next_value) IN
|
||||
(SELECT top_n.event_number_in_session, top_n.event_type, top_n.e_value FROM top_n)
|
||||
GROUP BY others_n.event_number_in_session, others_n.next_type, others_n.next_value
|
||||
UNION ALL
|
||||
-- Others got to Others
|
||||
SELECT others_n.event_number_in_session,
|
||||
'OTHER' AS event_type,
|
||||
NULL AS e_value,
|
||||
'OTHERS' AS next_type,
|
||||
NULL AS next_value,
|
||||
SUM(sessions_count) AS sessions_count
|
||||
FROM others_n
|
||||
WHERE isNotNull(others_n.next_type)
|
||||
AND others_n.event_number_in_session < 3
|
||||
AND (others_n.event_number_in_session + 1, others_n.next_type, others_n.next_value) NOT IN
|
||||
(SELECT event_number_in_session, event_type, e_value FROM top_n)
|
||||
GROUP BY others_n.event_number_in_session)
|
||||
ORDER BY event_number_in_session, sessions_count
|
||||
DESC;
|
||||
|
||||
|
||||
|
|
@ -1,19 +1,17 @@
|
|||
urllib3==2.2.3
|
||||
urllib3==2.3.0
|
||||
requests==2.32.3
|
||||
boto3==1.35.86
|
||||
boto3==1.36.12
|
||||
pyjwt==2.10.1
|
||||
psycopg2-binary==2.9.10
|
||||
psycopg[pool,binary]==3.2.3
|
||||
psycopg[pool,binary]==3.2.4
|
||||
clickhouse-driver[lz4]==0.2.9
|
||||
clickhouse-connect==0.8.11
|
||||
elasticsearch==8.17.0
|
||||
clickhouse-connect==0.8.15
|
||||
elasticsearch==8.17.1
|
||||
jira==3.8.0
|
||||
cachetools==5.5.0
|
||||
cachetools==5.5.1
|
||||
|
||||
|
||||
|
||||
fastapi==0.115.6
|
||||
fastapi==0.115.8
|
||||
uvicorn[standard]==0.34.0
|
||||
python-decouple==3.8
|
||||
pydantic[email]==2.10.4
|
||||
pydantic[email]==2.10.6
|
||||
apscheduler==3.11.0
|
||||
|
|
|
|||
|
|
@ -1,21 +1,19 @@
|
|||
urllib3==2.2.3
|
||||
urllib3==2.3.0
|
||||
requests==2.32.3
|
||||
boto3==1.35.86
|
||||
boto3==1.36.12
|
||||
pyjwt==2.10.1
|
||||
psycopg2-binary==2.9.10
|
||||
psycopg[pool,binary]==3.2.3
|
||||
psycopg[pool,binary]==3.2.4
|
||||
clickhouse-driver[lz4]==0.2.9
|
||||
clickhouse-connect==0.8.11
|
||||
elasticsearch==8.17.0
|
||||
clickhouse-connect==0.8.15
|
||||
elasticsearch==8.17.1
|
||||
jira==3.8.0
|
||||
cachetools==5.5.0
|
||||
cachetools==5.5.1
|
||||
|
||||
|
||||
|
||||
fastapi==0.115.6
|
||||
fastapi==0.115.8
|
||||
uvicorn[standard]==0.34.0
|
||||
python-decouple==3.8
|
||||
pydantic[email]==2.10.4
|
||||
pydantic[email]==2.10.6
|
||||
apscheduler==3.11.0
|
||||
|
||||
redis==5.2.1
|
||||
|
|
|
|||
|
|
@ -4,8 +4,9 @@ from decouple import config
|
|||
from fastapi import Depends, Body, BackgroundTasks
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import sourcemaps, events, projects, issues, metadata, reset_password, log_tools, \
|
||||
from chalicelib.core import events, projects, issues, metadata, reset_password, log_tools, \
|
||||
announcements, weekly_report, assist, mobile, tenants, boarding, notifications, webhook, users, saved_search, tags
|
||||
from chalicelib.core.sourcemaps import sourcemaps
|
||||
from chalicelib.core.metrics import custom_metrics
|
||||
from chalicelib.core.alerts import alerts
|
||||
from chalicelib.core.autocomplete import autocomplete
|
||||
|
|
@ -32,8 +33,7 @@ def events_search(projectId: int, q: Optional[str] = None,
|
|||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
if type and (not q or len(q) == 0) \
|
||||
and (autocomplete.is_top_supported(type)):
|
||||
# return autocomplete.get_top_values(project_id=projectId, event_type=type, event_key=key)
|
||||
return autocomplete.get_top_values(projectId, type, event_key=key)
|
||||
return autocomplete.get_top_values(project_id=projectId, event_type=type, event_key=key)
|
||||
elif not q or len(q) == 0:
|
||||
return {"data": []}
|
||||
|
||||
|
|
@ -79,7 +79,7 @@ def notify(projectId: int, integration: str, webhookId: int, source: str, source
|
|||
|
||||
args = {"tenant_id": context.tenant_id,
|
||||
"user": context.email, "comment": comment, "project_id": projectId,
|
||||
"id": webhookId,
|
||||
"integration_id": webhookId,
|
||||
"project_name": context.project.name}
|
||||
if integration == schemas.WebhookType.SLACK:
|
||||
if source == "sessions":
|
||||
|
|
|
|||
|
|
@ -7,16 +7,17 @@ from fastapi import HTTPException, status
|
|||
from starlette.responses import RedirectResponse, FileResponse, JSONResponse, Response
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import scope
|
||||
from chalicelib.core import assist, signup, feature_flags
|
||||
from chalicelib.core.metrics import heatmaps
|
||||
from chalicelib.core.errors import errors, errors_details
|
||||
from chalicelib.core.sessions import sessions, sessions_notes, sessions_replay, sessions_favorite, sessions_viewed, \
|
||||
sessions_assignments, unprocessed_sessions, sessions_search
|
||||
from chalicelib.core import scope
|
||||
from chalicelib.core import tenants, users, projects, license
|
||||
from chalicelib.core import webhook
|
||||
from chalicelib.core.collaborations.collaboration_slack import Slack
|
||||
from chalicelib.core.errors import errors, errors_details
|
||||
from chalicelib.core.metrics import heatmaps
|
||||
from chalicelib.core.sessions import sessions, sessions_notes, sessions_replay, sessions_favorite, sessions_viewed, \
|
||||
sessions_assignments, unprocessed_sessions, sessions_search
|
||||
from chalicelib.utils import captcha, smtp
|
||||
from chalicelib.utils import contextual_validators
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from or_dependencies import OR_context, OR_role
|
||||
|
|
@ -26,7 +27,10 @@ from routers.subs import spot
|
|||
logger = logging.getLogger(__name__)
|
||||
public_app, app, app_apikey = get_routers()
|
||||
|
||||
COOKIE_PATH = "/api/refresh"
|
||||
if config("LOCAL_DEV", cast=bool, default=False):
|
||||
COOKIE_PATH = "/refresh"
|
||||
else:
|
||||
COOKIE_PATH = "/api/refresh"
|
||||
|
||||
|
||||
@public_app.get('/signup', tags=['signup'])
|
||||
|
|
@ -252,17 +256,19 @@ def get_projects(context: schemas.CurrentContext = Depends(OR_context)):
|
|||
|
||||
|
||||
@app.post('/{projectId}/sessions/search', tags=["sessions"])
|
||||
def search_sessions(projectId: int, data: schemas.SessionsSearchPayloadSchema = Body(...),
|
||||
def search_sessions(projectId: int, data: schemas.SessionsSearchPayloadSchema = \
|
||||
Depends(contextual_validators.validate_contextual_payload),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = sessions_search.search_sessions(data=data, project_id=projectId, user_id=context.user_id,
|
||||
data = sessions_search.search_sessions(data=data, project=context.project, user_id=context.user_id,
|
||||
platform=context.project.platform)
|
||||
return {'data': data}
|
||||
|
||||
|
||||
@app.post('/{projectId}/sessions/search/ids', tags=["sessions"])
|
||||
def session_ids_search(projectId: int, data: schemas.SessionsSearchPayloadSchema = Body(...),
|
||||
def session_ids_search(projectId: int, data: schemas.SessionsSearchPayloadSchema = \
|
||||
Depends(contextual_validators.validate_contextual_payload),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = sessions_search.search_sessions(data=data, project_id=projectId, user_id=context.user_id, ids_only=True,
|
||||
data = sessions_search.search_sessions(data=data, project=context.project, user_id=context.user_id, ids_only=True,
|
||||
platform=context.project.platform)
|
||||
return {'data': data}
|
||||
|
||||
|
|
|
|||
|
|
@ -9,172 +9,244 @@ from routers.base import get_routers
|
|||
public_app, app, app_apikey = get_routers()
|
||||
|
||||
|
||||
@app.post('/{projectId}/dashboards', tags=["dashboard"])
|
||||
@app.post("/{projectId}/dashboards", tags=["dashboard"])
|
||||
def create_dashboards(projectId: int, data: schemas.CreateDashboardSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return dashboards.create_dashboard(project_id=projectId, user_id=context.user_id, data=data)
|
||||
return dashboards.create_dashboard(
|
||||
project_id=projectId, user_id=context.user_id, data=data
|
||||
)
|
||||
|
||||
|
||||
@app.get('/{projectId}/dashboards', tags=["dashboard"])
|
||||
@app.get("/{projectId}/dashboards", tags=["dashboard"])
|
||||
def get_dashboards(projectId: int, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": dashboards.get_dashboards(project_id=projectId, user_id=context.user_id)}
|
||||
return {
|
||||
"data": dashboards.get_dashboards(project_id=projectId, user_id=context.user_id)
|
||||
}
|
||||
|
||||
|
||||
@app.get('/{projectId}/dashboards/{dashboardId}', tags=["dashboard"])
|
||||
@app.get("/{projectId}/dashboards/{dashboardId}", tags=["dashboard"])
|
||||
def get_dashboard(projectId: int, dashboardId: int, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = dashboards.get_dashboard(project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId)
|
||||
data = dashboards.get_dashboard(
|
||||
project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId
|
||||
)
|
||||
if data is None:
|
||||
return {"errors": ["dashboard not found"]}
|
||||
return {"data": data}
|
||||
|
||||
|
||||
@app.put('/{projectId}/dashboards/{dashboardId}', tags=["dashboard"])
|
||||
@app.put("/{projectId}/dashboards/{dashboardId}", tags=["dashboard"])
|
||||
def update_dashboard(projectId: int, dashboardId: int, data: schemas.EditDashboardSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": dashboards.update_dashboard(project_id=projectId, user_id=context.user_id,
|
||||
dashboard_id=dashboardId, data=data)}
|
||||
return {
|
||||
"data": dashboards.update_dashboard(
|
||||
project_id=projectId,
|
||||
user_id=context.user_id,
|
||||
dashboard_id=dashboardId,
|
||||
data=data,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@app.delete('/{projectId}/dashboards/{dashboardId}', tags=["dashboard"])
|
||||
@app.delete("/{projectId}/dashboards/{dashboardId}", tags=["dashboard"])
|
||||
def delete_dashboard(projectId: int, dashboardId: int, _=Body(None),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return dashboards.delete_dashboard(project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId)
|
||||
return dashboards.delete_dashboard(
|
||||
project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId
|
||||
)
|
||||
|
||||
|
||||
@app.get('/{projectId}/dashboards/{dashboardId}/pin', tags=["dashboard"])
|
||||
@app.get("/{projectId}/dashboards/{dashboardId}/pin", tags=["dashboard"])
|
||||
def pin_dashboard(projectId: int, dashboardId: int, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": dashboards.pin_dashboard(project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId)}
|
||||
return {
|
||||
"data": dashboards.pin_dashboard(
|
||||
project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@app.post('/{projectId}/dashboards/{dashboardId}/cards', tags=["cards"])
|
||||
def add_card_to_dashboard(projectId: int, dashboardId: int,
|
||||
data: schemas.AddWidgetToDashboardPayloadSchema = Body(...),
|
||||
@app.post("/{projectId}/dashboards/{dashboardId}/cards", tags=["cards"])
|
||||
def add_card_to_dashboard(projectId: int, dashboardId: int, data: schemas.AddWidgetToDashboardPayloadSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": dashboards.add_widget(project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId,
|
||||
data=data)}
|
||||
return {
|
||||
"data": dashboards.add_widget(
|
||||
project_id=projectId,
|
||||
user_id=context.user_id,
|
||||
dashboard_id=dashboardId,
|
||||
data=data,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@app.post('/{projectId}/dashboards/{dashboardId}/metrics', tags=["dashboard"])
|
||||
@app.post("/{projectId}/dashboards/{dashboardId}/metrics", tags=["dashboard"])
|
||||
# @app.put('/{projectId}/dashboards/{dashboardId}/metrics', tags=["dashboard"])
|
||||
def create_metric_and_add_to_dashboard(projectId: int, dashboardId: int,
|
||||
data: schemas.CardSchema = Body(...),
|
||||
def create_metric_and_add_to_dashboard(projectId: int, dashboardId: int, data: schemas.CardSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": dashboards.create_metric_add_widget(project=context.project, user_id=context.user_id,
|
||||
dashboard_id=dashboardId, data=data)}
|
||||
return {
|
||||
"data": dashboards.create_metric_add_widget(
|
||||
project=context.project,
|
||||
user_id=context.user_id,
|
||||
dashboard_id=dashboardId,
|
||||
data=data,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@app.put('/{projectId}/dashboards/{dashboardId}/widgets/{widgetId}', tags=["dashboard"])
|
||||
@app.put("/{projectId}/dashboards/{dashboardId}/widgets/{widgetId}", tags=["dashboard"])
|
||||
def update_widget_in_dashboard(projectId: int, dashboardId: int, widgetId: int,
|
||||
data: schemas.UpdateWidgetPayloadSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return dashboards.update_widget(project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId,
|
||||
widget_id=widgetId, data=data)
|
||||
return dashboards.update_widget(
|
||||
project_id=projectId,
|
||||
user_id=context.user_id,
|
||||
dashboard_id=dashboardId,
|
||||
widget_id=widgetId,
|
||||
data=data,
|
||||
)
|
||||
|
||||
|
||||
@app.delete('/{projectId}/dashboards/{dashboardId}/widgets/{widgetId}', tags=["dashboard"])
|
||||
@app.delete("/{projectId}/dashboards/{dashboardId}/widgets/{widgetId}", tags=["dashboard"])
|
||||
def remove_widget_from_dashboard(projectId: int, dashboardId: int, widgetId: int, _=Body(None),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return dashboards.remove_widget(project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId,
|
||||
widget_id=widgetId)
|
||||
return dashboards.remove_widget(
|
||||
project_id=projectId,
|
||||
user_id=context.user_id,
|
||||
dashboard_id=dashboardId,
|
||||
widget_id=widgetId,
|
||||
)
|
||||
|
||||
|
||||
@app.post('/{projectId}/cards/try', tags=["cards"])
|
||||
@app.post("/{projectId}/cards/try", tags=["cards"])
|
||||
def try_card(projectId: int, data: schemas.CardSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": custom_metrics.get_chart(project=context.project, data=data, user_id=context.user_id)}
|
||||
return {
|
||||
"data": custom_metrics.get_chart(
|
||||
project=context.project, data=data, user_id=context.user_id
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@app.post('/{projectId}/cards/try/sessions', tags=["cards"])
|
||||
@app.post("/{projectId}/cards/try/sessions", tags=["cards"])
|
||||
def try_card_sessions(projectId: int, data: schemas.CardSessionsSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = custom_metrics.get_sessions(project_id=projectId, user_id=context.user_id, data=data)
|
||||
data = custom_metrics.get_sessions(
|
||||
project=context.project, user_id=context.user_id, data=data
|
||||
)
|
||||
return {"data": data}
|
||||
|
||||
|
||||
@app.post('/{projectId}/cards/try/issues', tags=["cards"])
|
||||
@app.post("/{projectId}/cards/try/issues", tags=["cards"])
|
||||
def try_card_issues(projectId: int, data: schemas.CardSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": custom_metrics.get_issues(project=context.project, user_id=context.user_id, data=data)}
|
||||
return {
|
||||
"data": custom_metrics.get_issues(
|
||||
project=context.project, user_id=context.user_id, data=data
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@app.get('/{projectId}/cards', tags=["cards"])
|
||||
@app.get("/{projectId}/cards", tags=["cards"])
|
||||
def get_cards(projectId: int, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": custom_metrics.get_all(project_id=projectId, user_id=context.user_id)}
|
||||
return {
|
||||
"data": custom_metrics.get_all(project_id=projectId, user_id=context.user_id)
|
||||
}
|
||||
|
||||
|
||||
@app.post('/{projectId}/cards', tags=["cards"])
|
||||
@app.post("/{projectId}/cards", tags=["cards"])
|
||||
def create_card(projectId: int, data: schemas.CardSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return custom_metrics.create_card(project=context.project, user_id=context.user_id, data=data)
|
||||
return custom_metrics.create_card(
|
||||
project=context.project, user_id=context.user_id, data=data
|
||||
)
|
||||
|
||||
|
||||
@app.post('/{projectId}/cards/search', tags=["cards"])
|
||||
def search_cards(projectId: int, data: schemas.SearchCardsSchema = Body(...),
|
||||
@app.post("/{projectId}/cards/search", tags=["cards"])
|
||||
def search_cards(projectId: int, data: schemas.MetricSearchSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": custom_metrics.search_all(project_id=projectId, user_id=context.user_id, data=data)}
|
||||
return {
|
||||
"data": custom_metrics.search_metrics(
|
||||
project_id=projectId, user_id=context.user_id, data=data
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@app.get('/{projectId}/cards/{metric_id}', tags=["cards"])
|
||||
@app.get("/{projectId}/cards/{metric_id}", tags=["cards"])
|
||||
def get_card(projectId: int, metric_id: Union[int, str], context: schemas.CurrentContext = Depends(OR_context)):
|
||||
if metric_id.isnumeric():
|
||||
metric_id = int(metric_id)
|
||||
else:
|
||||
return {"errors": ["invalid card_id"]}
|
||||
data = custom_metrics.get_card(project_id=projectId, user_id=context.user_id, metric_id=metric_id)
|
||||
data = custom_metrics.get_card(
|
||||
project_id=projectId, user_id=context.user_id, metric_id=metric_id
|
||||
)
|
||||
if data is None:
|
||||
return {"errors": ["card not found"]}
|
||||
return {"data": data}
|
||||
|
||||
|
||||
@app.post('/{projectId}/cards/{metric_id}/sessions', tags=["cards"])
|
||||
def get_card_sessions(projectId: int, metric_id: int,
|
||||
data: schemas.CardSessionsSchema = Body(...),
|
||||
@app.post("/{projectId}/cards/{metric_id}/sessions", tags=["cards"])
|
||||
def get_card_sessions(projectId: int, metric_id: int, data: schemas.CardSessionsSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = custom_metrics.get_sessions_by_card_id(project_id=projectId, user_id=context.user_id, metric_id=metric_id,
|
||||
data=data)
|
||||
data = custom_metrics.get_sessions_by_card_id(
|
||||
project=context.project, user_id=context.user_id, metric_id=metric_id, data=data
|
||||
)
|
||||
if data is None:
|
||||
return {"errors": ["custom metric not found"]}
|
||||
return {"data": data}
|
||||
|
||||
|
||||
@app.post('/{projectId}/cards/{metric_id}/issues/{issueId}/sessions', tags=["dashboard"])
|
||||
@app.post("/{projectId}/cards/{metric_id}/issues/{issueId}/sessions", tags=["dashboard"])
|
||||
def get_metric_funnel_issue_sessions(projectId: int, metric_id: int, issueId: str,
|
||||
data: schemas.CardSessionsSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = custom_metrics.get_funnel_sessions_by_issue(project_id=projectId, user_id=context.user_id,
|
||||
metric_id=metric_id, issue_id=issueId, data=data)
|
||||
data = custom_metrics.get_funnel_sessions_by_issue(
|
||||
project_id=projectId,
|
||||
user_id=context.user_id,
|
||||
metric_id=metric_id,
|
||||
issue_id=issueId,
|
||||
data=data,
|
||||
)
|
||||
if data is None:
|
||||
return {"errors": ["custom metric not found"]}
|
||||
return {"data": data}
|
||||
|
||||
|
||||
@app.post('/{projectId}/cards/{metric_id}/chart', tags=["card"])
|
||||
@app.post("/{projectId}/cards/{metric_id}/chart", tags=["card"])
|
||||
def get_card_chart(projectId: int, metric_id: int, data: schemas.CardSessionsSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = custom_metrics.make_chart_from_card(project=context.project, user_id=context.user_id, metric_id=metric_id,
|
||||
data=data)
|
||||
data = custom_metrics.make_chart_from_card(
|
||||
project=context.project, user_id=context.user_id, metric_id=metric_id, data=data
|
||||
)
|
||||
return {"data": data}
|
||||
|
||||
|
||||
@app.post('/{projectId}/cards/{metric_id}', tags=["dashboard"])
|
||||
@app.post("/{projectId}/cards/{metric_id}", tags=["dashboard"])
|
||||
def update_card(projectId: int, metric_id: int, data: schemas.CardSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = custom_metrics.update_card(project_id=projectId, user_id=context.user_id, metric_id=metric_id, data=data)
|
||||
data = custom_metrics.update_card(
|
||||
project_id=projectId, user_id=context.user_id, metric_id=metric_id, data=data
|
||||
)
|
||||
if data is None:
|
||||
return {"errors": ["custom metric not found"]}
|
||||
return {"data": data}
|
||||
|
||||
|
||||
@app.post('/{projectId}/cards/{metric_id}/status', tags=["dashboard"])
|
||||
def update_card_state(projectId: int, metric_id: int,
|
||||
data: schemas.UpdateCardStatusSchema = Body(...),
|
||||
@app.post("/{projectId}/cards/{metric_id}/status", tags=["dashboard"])
|
||||
def update_card_state(projectId: int, metric_id: int, data: schemas.UpdateCardStatusSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {
|
||||
"data": custom_metrics.change_state(project_id=projectId, user_id=context.user_id, metric_id=metric_id,
|
||||
status=data.active)}
|
||||
"data": custom_metrics.change_state(
|
||||
project_id=projectId,
|
||||
user_id=context.user_id,
|
||||
metric_id=metric_id,
|
||||
status=data.active,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@app.delete('/{projectId}/cards/{metric_id}', tags=["dashboard"])
|
||||
def delete_card(projectId: int, metric_id: int, _=Body(None),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": custom_metrics.delete_card(project_id=projectId, user_id=context.user_id, metric_id=metric_id)}
|
||||
@app.delete("/{projectId}/cards/{metric_id}", tags=["dashboard"])
|
||||
def delete_card(projectId: int, metric_id: int, _=Body(None), context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {
|
||||
"data": custom_metrics.delete_card(
|
||||
project_id=projectId, user_id=context.user_id, metric_id=metric_id
|
||||
)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
from decouple import config
|
||||
from fastapi import Depends
|
||||
from starlette.responses import JSONResponse, Response
|
||||
|
||||
|
|
@ -8,7 +9,10 @@ from routers.base import get_routers
|
|||
|
||||
public_app, app, app_apikey = get_routers(prefix="/spot", tags=["spot"])
|
||||
|
||||
COOKIE_PATH = "/api/spot/refresh"
|
||||
if config("LOCAL_DEV", cast=bool, default=False):
|
||||
COOKIE_PATH = "/spot/refresh"
|
||||
else:
|
||||
COOKIE_PATH = "/api/spot/refresh"
|
||||
|
||||
|
||||
@app.get('/logout')
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
from fastapi import Depends, Body
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import sessions, events, jobs, projects
|
||||
from chalicelib.core import events, jobs, projects
|
||||
from chalicelib.core.sessions import sessions
|
||||
from or_dependencies import OR_context
|
||||
from routers.base import get_routers
|
||||
|
||||
|
|
|
|||
|
|
@ -537,6 +537,13 @@ class RequestGraphqlFilterSchema(BaseModel):
|
|||
value: List[Union[int, str]] = Field(...)
|
||||
operator: Union[SearchEventOperator, MathOperator] = Field(...)
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def _transform_data(cls, values):
|
||||
if values.get("type") in [FetchFilterType.FETCH_DURATION, FetchFilterType.FETCH_STATUS_CODE]:
|
||||
values["value"] = [int(v) for v in values["value"] if v is not None and str(v).isnumeric()]
|
||||
return values
|
||||
|
||||
|
||||
class SessionSearchEventSchema2(BaseModel):
|
||||
is_event: Literal[True] = True
|
||||
|
|
@ -844,18 +851,21 @@ class MetricTimeseriesViewType(str, Enum):
|
|||
LINE_CHART = "lineChart"
|
||||
AREA_CHART = "areaChart"
|
||||
BAR_CHART = "barChart"
|
||||
PIE_CHART = "pieChart"
|
||||
PROGRESS_CHART = "progressChart"
|
||||
TABLE_CHART = "table"
|
||||
PIE_CHART = "pieChart"
|
||||
METRIC_CHART = "metric"
|
||||
TABLE_CHART = "table"
|
||||
|
||||
|
||||
class MetricTableViewType(str, Enum):
|
||||
TABLE = "table"
|
||||
TABLE_CHART = "table"
|
||||
|
||||
|
||||
class MetricOtherViewType(str, Enum):
|
||||
OTHER_CHART = "chart"
|
||||
COLUMN_CHART = "columnChart"
|
||||
METRIC_CHART = "metric"
|
||||
TABLE_CHART = "table"
|
||||
LIST_CHART = "list"
|
||||
|
||||
|
||||
|
|
@ -863,37 +873,13 @@ class MetricType(str, Enum):
|
|||
TIMESERIES = "timeseries"
|
||||
TABLE = "table"
|
||||
FUNNEL = "funnel"
|
||||
ERRORS = "errors"
|
||||
PERFORMANCE = "performance"
|
||||
RESOURCES = "resources"
|
||||
WEB_VITAL = "webVitals"
|
||||
PATH_ANALYSIS = "pathAnalysis"
|
||||
RETENTION = "retention"
|
||||
STICKINESS = "stickiness"
|
||||
HEAT_MAP = "heatMap"
|
||||
|
||||
|
||||
class MetricOfErrors(str, Enum):
|
||||
DOMAINS_ERRORS_4XX = "domainsErrors4xx"
|
||||
DOMAINS_ERRORS_5XX = "domainsErrors5xx"
|
||||
ERRORS_PER_DOMAINS = "errorsPerDomains"
|
||||
ERRORS_PER_TYPE = "errorsPerType"
|
||||
IMPACTED_SESSIONS_BY_JS_ERRORS = "impactedSessionsByJsErrors"
|
||||
RESOURCES_BY_PARTY = "resourcesByParty"
|
||||
|
||||
|
||||
class MetricOfWebVitals(str, Enum):
|
||||
AVG_SESSION_DURATION = "avgSessionDuration"
|
||||
AVG_USED_JS_HEAP_SIZE = "avgUsedJsHeapSize"
|
||||
AVG_VISITED_PAGES = "avgVisitedPages"
|
||||
COUNT_REQUESTS = "countRequests"
|
||||
COUNT_SESSIONS = "countSessions"
|
||||
COUNT_USERS = "userCount"
|
||||
SPEED_LOCATION = "speedLocation"
|
||||
|
||||
|
||||
class MetricOfTable(str, Enum):
|
||||
USER_OS = FilterType.USER_OS.value
|
||||
USER_BROWSER = FilterType.USER_BROWSER.value
|
||||
USER_DEVICE = FilterType.USER_DEVICE.value
|
||||
USER_COUNTRY = FilterType.USER_COUNTRY.value
|
||||
|
|
@ -974,36 +960,6 @@ class CardSessionsSchema(_TimedSchema, _PaginatedSchema):
|
|||
|
||||
return self
|
||||
|
||||
# We don't need this as the UI is expecting filters to override the full series' filters
|
||||
# @model_validator(mode="after")
|
||||
# def __merge_out_filters_with_series(self):
|
||||
# for f in self.filters:
|
||||
# for s in self.series:
|
||||
# found = False
|
||||
#
|
||||
# if f.is_event:
|
||||
# sub = s.filter.events
|
||||
# else:
|
||||
# sub = s.filter.filters
|
||||
#
|
||||
# for e in sub:
|
||||
# if f.type == e.type and f.operator == e.operator:
|
||||
# found = True
|
||||
# if f.is_event:
|
||||
# # If extra event: append value
|
||||
# for v in f.value:
|
||||
# if v not in e.value:
|
||||
# e.value.append(v)
|
||||
# else:
|
||||
# # If extra filter: override value
|
||||
# e.value = f.value
|
||||
# if not found:
|
||||
# sub.append(f)
|
||||
#
|
||||
# self.filters = []
|
||||
#
|
||||
# return self
|
||||
|
||||
# UI is expecting filters to override the full series' filters
|
||||
@model_validator(mode="after")
|
||||
def __override_series_filters_with_outer_filters(self):
|
||||
|
|
@ -1043,12 +999,6 @@ class __CardSchema(CardSessionsSchema):
|
|||
# This is used to specify the number of top values for PathAnalysis
|
||||
rows: int = Field(default=3, ge=1, le=10)
|
||||
|
||||
@computed_field
|
||||
@property
|
||||
def is_predefined(self) -> bool:
|
||||
return self.metric_type in [MetricType.ERRORS, MetricType.PERFORMANCE,
|
||||
MetricType.RESOURCES, MetricType.WEB_VITAL]
|
||||
|
||||
|
||||
class CardTimeSeries(__CardSchema):
|
||||
metric_type: Literal[MetricType.TIMESERIES]
|
||||
|
|
@ -1080,6 +1030,16 @@ class CardTable(__CardSchema):
|
|||
values["metricValue"] = []
|
||||
return values
|
||||
|
||||
@model_validator(mode="after")
|
||||
def __enforce_AND_operator(self):
|
||||
self.metric_of = MetricOfTable(self.metric_of)
|
||||
if self.metric_of in (MetricOfTable.VISITED_URL, MetricOfTable.FETCH, \
|
||||
MetricOfTable.VISITED_URL.value, MetricOfTable.FETCH.value):
|
||||
for s in self.series:
|
||||
if s.filter is not None:
|
||||
s.filter.events_order = SearchEventOrder.AND
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def __transform(self):
|
||||
self.metric_of = MetricOfTable(self.metric_of)
|
||||
|
|
@ -1107,7 +1067,7 @@ class CardFunnel(__CardSchema):
|
|||
def __enforce_default(cls, values):
|
||||
if values.get("metricOf") and not MetricOfFunnels.has_value(values["metricOf"]):
|
||||
values["metricOf"] = MetricOfFunnels.SESSION_COUNT
|
||||
values["viewType"] = MetricOtherViewType.OTHER_CHART
|
||||
# values["viewType"] = MetricOtherViewType.OTHER_CHART
|
||||
if values.get("series") is not None and len(values["series"]) > 0:
|
||||
values["series"] = [values["series"][0]]
|
||||
return values
|
||||
|
|
@ -1118,40 +1078,6 @@ class CardFunnel(__CardSchema):
|
|||
return self
|
||||
|
||||
|
||||
class CardErrors(__CardSchema):
|
||||
metric_type: Literal[MetricType.ERRORS]
|
||||
metric_of: MetricOfErrors = Field(default=MetricOfErrors.IMPACTED_SESSIONS_BY_JS_ERRORS)
|
||||
view_type: MetricOtherViewType = Field(...)
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def __enforce_default(cls, values):
|
||||
values["series"] = []
|
||||
return values
|
||||
|
||||
@model_validator(mode="after")
|
||||
def __transform(self):
|
||||
self.metric_of = MetricOfErrors(self.metric_of)
|
||||
return self
|
||||
|
||||
|
||||
class CardWebVital(__CardSchema):
|
||||
metric_type: Literal[MetricType.WEB_VITAL]
|
||||
metric_of: MetricOfWebVitals = Field(default=MetricOfWebVitals.AVG_VISITED_PAGES)
|
||||
view_type: MetricOtherViewType = Field(...)
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def __enforce_default(cls, values):
|
||||
values["series"] = []
|
||||
return values
|
||||
|
||||
@model_validator(mode="after")
|
||||
def __transform(self):
|
||||
self.metric_of = MetricOfWebVitals(self.metric_of)
|
||||
return self
|
||||
|
||||
|
||||
class CardHeatMap(__CardSchema):
|
||||
metric_type: Literal[MetricType.HEAT_MAP]
|
||||
metric_of: MetricOfHeatMap = Field(default=MetricOfHeatMap.HEAT_MAP_URL)
|
||||
|
|
@ -1189,7 +1115,7 @@ class CardPathAnalysis(__CardSchema):
|
|||
view_type: MetricOtherViewType = Field(...)
|
||||
metric_value: List[ProductAnalyticsSelectedEventType] = Field(default_factory=list)
|
||||
density: int = Field(default=4, ge=2, le=10)
|
||||
rows: int = Field(default=3, ge=1, le=10)
|
||||
rows: int = Field(default=5, ge=1, le=10)
|
||||
|
||||
start_type: Literal["start", "end"] = Field(default="start")
|
||||
start_point: List[PathAnalysisSubFilterSchema] = Field(default_factory=list)
|
||||
|
|
@ -1241,9 +1167,7 @@ class CardPathAnalysis(__CardSchema):
|
|||
|
||||
# Union of cards-schemas that doesn't change between FOSS and EE
|
||||
__cards_union_base = Union[
|
||||
CardTimeSeries, CardTable, CardFunnel,
|
||||
CardErrors, CardWebVital, CardHeatMap,
|
||||
CardPathAnalysis]
|
||||
CardTimeSeries, CardTable, CardFunnel, CardHeatMap, CardPathAnalysis]
|
||||
CardSchema = ORUnion(__cards_union_base, discriminator='metric_type')
|
||||
|
||||
|
||||
|
|
@ -1389,7 +1313,7 @@ class SearchNoteSchema(_PaginatedSchema):
|
|||
|
||||
|
||||
class SessionNoteSchema(BaseModel):
|
||||
message: str = Field(..., min_length=2)
|
||||
message: Optional[str] = Field(None, max_length=250)
|
||||
tag: Optional[str] = Field(default=None)
|
||||
timestamp: int = Field(default=-1)
|
||||
is_public: bool = Field(default=False)
|
||||
|
|
@ -1424,6 +1348,42 @@ class SearchCardsSchema(_PaginatedSchema):
|
|||
query: Optional[str] = Field(default=None)
|
||||
|
||||
|
||||
class MetricSortColumnType(str, Enum):
|
||||
NAME = "name"
|
||||
METRIC_TYPE = "metric_type"
|
||||
METRIC_OF = "metric_of"
|
||||
IS_PUBLIC = "is_public"
|
||||
CREATED_AT = "created_at"
|
||||
EDITED_AT = "edited_at"
|
||||
|
||||
|
||||
class MetricFilterColumnType(str, Enum):
|
||||
NAME = "name"
|
||||
METRIC_TYPE = "metric_type"
|
||||
METRIC_OF = "metric_of"
|
||||
IS_PUBLIC = "is_public"
|
||||
USER_ID = "user_id"
|
||||
CREATED_AT = "created_at"
|
||||
EDITED_AT = "edited_at"
|
||||
|
||||
|
||||
class MetricListSort(BaseModel):
|
||||
field: Optional[str] = Field(default=None)
|
||||
order: Optional[str] = Field(default=SortOrderType.DESC)
|
||||
|
||||
|
||||
class MetricFilter(BaseModel):
|
||||
type: Optional[str] = Field(default=None)
|
||||
query: Optional[str] = Field(default=None)
|
||||
|
||||
|
||||
class MetricSearchSchema(_PaginatedSchema):
|
||||
filter: Optional[MetricFilter] = Field(default=None)
|
||||
sort: Optional[MetricListSort] = Field(default=MetricListSort())
|
||||
shared_only: bool = Field(default=False)
|
||||
mine_only: bool = Field(default=False)
|
||||
|
||||
|
||||
class _HeatMapSearchEventRaw(SessionSearchEventSchema2):
|
||||
type: Literal[EventType.LOCATION] = Field(...)
|
||||
|
||||
|
|
|
|||
|
|
@ -38,6 +38,9 @@ def force_is_event(events_enum: list[Type[Enum]]):
|
|||
def fn(value: list):
|
||||
if value is not None and isinstance(value, list):
|
||||
for v in value:
|
||||
if v.get("type") is None:
|
||||
v["isEvent"] = False
|
||||
continue
|
||||
r = False
|
||||
for en in events_enum:
|
||||
if en.has_value(v["type"]) or en.has_value(v["type"].lower()):
|
||||
|
|
|
|||
20
assist/package-lock.json
generated
20
assist/package-lock.json
generated
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "assist-server",
|
||||
"version": "v1.12.0",
|
||||
"version": "v1.22.0",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
|
|
@ -433,9 +433,9 @@
|
|||
}
|
||||
},
|
||||
"node_modules/express": {
|
||||
"version": "4.21.1",
|
||||
"resolved": "https://registry.npmjs.org/express/-/express-4.21.1.tgz",
|
||||
"integrity": "sha512-YSFlK1Ee0/GC8QaO91tHcDxJiE/X4FbpAyQWkxAvG6AXCuR65YzK8ua6D9hvi/TzUfZMpc+BwuM1IPw8fmQBiQ==",
|
||||
"version": "4.21.2",
|
||||
"resolved": "https://registry.npmjs.org/express/-/express-4.21.2.tgz",
|
||||
"integrity": "sha512-28HqgMZAmih1Czt9ny7qr6ek2qddF4FclbMzwhCREB6OFfH+rXAnuNCwo1/wFvrtbgsQDb4kSbX9de9lFbrXnA==",
|
||||
"dependencies": {
|
||||
"accepts": "~1.3.8",
|
||||
"array-flatten": "1.1.1",
|
||||
|
|
@ -456,7 +456,7 @@
|
|||
"methods": "~1.1.2",
|
||||
"on-finished": "2.4.1",
|
||||
"parseurl": "~1.3.3",
|
||||
"path-to-regexp": "0.1.10",
|
||||
"path-to-regexp": "0.1.12",
|
||||
"proxy-addr": "~2.0.7",
|
||||
"qs": "6.13.0",
|
||||
"range-parser": "~1.2.1",
|
||||
|
|
@ -471,6 +471,10 @@
|
|||
},
|
||||
"engines": {
|
||||
"node": ">= 0.10.0"
|
||||
},
|
||||
"funding": {
|
||||
"type": "opencollective",
|
||||
"url": "https://opencollective.com/express"
|
||||
}
|
||||
},
|
||||
"node_modules/extsprintf": {
|
||||
|
|
@ -930,9 +934,9 @@
|
|||
}
|
||||
},
|
||||
"node_modules/path-to-regexp": {
|
||||
"version": "0.1.10",
|
||||
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.10.tgz",
|
||||
"integrity": "sha512-7lf7qcQidTku0Gu3YDPc8DJ1q7OOucfa/BSsIwjuh56VU7katFvuM8hULfkwB3Fns/rsVF7PwPKVw1sl5KQS9w=="
|
||||
"version": "0.1.12",
|
||||
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz",
|
||||
"integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ=="
|
||||
},
|
||||
"node_modules/prom-client": {
|
||||
"version": "15.1.3",
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue