* feat(backend): implemented unzipping for http requests with gzip content-type * fix(tracker): rm unused import * change(tracker): configure automatic headers, compress anything bigger than 24k, add third party lib to list * feat(backend): using custom library for unzipping request body * feat(backend): added extra logs * feat(backend): more debug logs * feat(backend): added compression threshold to start request * change(tracker): support compressionThreshold in tracker * feat(backend): debug log for body content * feat(backend): removed debug logs in http methods * change(tracker): fix priority sending, remove dead code, * feat(backend): removed debug logs in http methods * Enable session encryption (#1121) * feat(backend): enable session encryption * feat(backend): fixed updated method name in failover algo * feat(backend): disable encryption by default * change(tracker): fix iframe network handling * change(ui): add toast for recording error * Encryption metrics (#1151) * feat(backend): added metric to measure the duration of session encryption * feat(backend): enabled ecnryption * feat(backend): fixed typo issue in packSession method * change(ui): change error toast for rec * change(ui): add tooltip for added live sessions * chore(helm): disabling redis string if not enabled (#1153) Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * change(player): fix typos; priority for 1st dom file * fix(player): priority and await for message processing * change(ui) - player improvements (#1164) * change(ui) - player - back button spacing * change(ui) - onboarding - changes * change(ui) - onboarding - changes * change(ui) - integrations gap-4 * change(ui) - install script copy button styles * change(ui) - copy button in account settings * fix(ui) - error details modal loader position * change(ui) - share popup styles * change(ui) - player improvements * change(ui) - player improvements - playback speed with menu * change(ui) - player improvements - current timezone * change(ui) - player improvements - autoplay options * fix(ui) - user sessions modal - navigation * feat(player): lazy JS DOM node creation; (need fixes for reaching full potential) * fix(player): drasticly reduce amount of node getter call during virtual node insertion * feat(player/VirtualDOM): OnloadVRoot & OnloadStyleSheet for lazy iframe innerContent initialisation & elimination of forceInsertion requirement in this case;; few renamings * style(player): few renamings; comments improved * feat(player/DOMManager): VirtualNodes insertion prioretization (for styles) * fix(player): cursor svg with light border for better visibility on dark backgrounds * change(ui) - session bookmarks remove from the list and copy options * chore(helm): Updating frontend image release (#1166) * chore(helm): Updating frontend image release * fix(helm): PG custom port Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> --------- Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * fix(player): consider stringDict before any CreateDocument (fastfix) * style(player/DOMManager/safeCSSRules): depend on interfaces * fixup! fix(player): consider stringDict before any CreateDocument (fastfix) * fix(player): proper unmount * fix(helm): Variable override, prioriry to the user created one. (#1173) * fix(ui) - search url to wait for metadata to load * fix(tracker): optimise node counting * fix(tracker): changelog * fix(ui) - sessions reload (#1177) * fix(tracker): fix iframe network requests tracking * fix(ui) - check for error status and force logout (#1179) * fix(ui) - token expire * fix(ui) - token expire * change(player): manual decompression for encrypted files * change(player): detect gzip file after decoding * change(ui) - show projects in menu for all * [Storage] different order to compress and encrypt (#1182) * feat(backend): try to compress and encrypt in a new way * chore(helm): Update cors headers for http Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * fix(ui): fix assist tooltip * change(ui): add sleep icon for inactive assist users * fix(ui): fix player automatic jump and start issues * Update .env.sample * Update cli for fetch latest patches and kubeconfig file hierarchy (#1183) * chore(helm): Kubeconfig file hierarchy Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * chore(cli): openreplay -u fetches update from current version, unless flag set Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> --------- Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * chore(cli): Updating comment (#1184) Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * chore(cli): Adding option to keep backup directories (#1185) Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * chore(cli): removing log message (#1186) Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * chore(cli): Updating comment (#1188) Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * chore(helm): Annotation inject order Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * fix(player): fix vroot context getter * feat(ui): display real session time * change(ui) - clearsearch styling on disable * change(ui) - session url changes * refactor(player/DOMManager): notMountedChildren rename * change(ui) - check if saved search present * change(ui) - player control icons and other changes * change(ui) - password validations * change(ui) - password validations * chore(helm): Override image pull policy (#1199) * change(ui) - player user steps improvements (#1201) * change(ui) - user steps * change(ui) - user steps * change(ui) - user steps * change(ui) - user steps - icon and other styles * fix(ui) - xray verticle line sync on resize * change(ui) - projects remove the status check * fix(cli): Proper git tag propegation (#1202) and logging of clone Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * Adding maintenance page * Improved session compression (#1200) * feat(backend): implemented new compression * chore(crons): Updating dockerfile Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * change(ui) - insights improvements * fix(ui) - url search params remove [] for keys * fix(player): fix dict reset * Remove message index from mob file (#1213) * feat(backend): removed message index from mob file messages * feat(backend): remove duplicated messages (by message index) * feat(backend): added MAX_INDEX at the begining of session to indicate a new version of mob file * feat(backend): added comments to code * change(ui): remove indexes from msgs * change(player): remove 8 byte skip for index * change(player): remove indexes * change(player): bugifx * change(tracker): update tests * change(tracker): remove batch writer changes * change(player): fix comments * feat(backend): updated go.mod file * change(player): change time str * feat(player): added mice trail * change(player): change trail color * change(player): change styles for buttons * chore(build): Don't commit chart change for ee patch (#1216) Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * change(ui) updated recaptcha lib - which causing an issue with state reading * change(ui) - no content icon updates from metadata and webhooks * change(player): make cursor icon bigger * fix(player): fix virtualization * fix(player): fix virtualization * fix(ui) - onboarding project edit * change(ui) - no content graphic for projects, and svg component changes * change(ui) - events filter placeholder * change(ui) - ui feedback on user steps * change(ui): add more detials to health status * [Storage] timestamp sorting and filtering (#1218) * feat(backend): combined sorting by index and timestamp * feat(backend): write the only last timestamp message in a row * change(ui) - textarea styles * change(ui) - button text color * change(ui): add more detials to health status * fix(ui): fix screen rec error handling * fix(ui): fix screen rec stopping * fix(tracker): fix q sender token mismatch during assist connection * change(ui) - assist recordings pagination api * change(ui) - assist recordings pagination api * fix(ui) - not popup conflict with timeline tooltip * Updating version * change(tracker): 7.0.0. set max amount on restarts for compression error * fix(ui) - active menu link * fix redis endpoint and chalice health endpoints (#1138) * chore(helm): Adding redis string from global config Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * fix(chalice): health check url for alerts and assist Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> --------- Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * fix(ee): chalice health check (#1142) Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * chore(cli): Adding verbose logging (#1144) Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * chore(helm): Adding option for records bucket (#1146) Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * chore(build): Bump image version of frontend assets while building (#1149) Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * feat(chalice): fixed jobs execution * feat(chalice): configurable mobs expiration * feat(chalice): changes * feat(chalice): refactored Jobs feat(chalice): added limits on Jobs * chore(build): test patch branch * chore(build): testing EE cron-Jobs * Add files via upload (#1156) * Add files via upload (#1157) * chore(helm): Enabling redis string for helm template variable (#1159) fix #1158 Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * Changing default encryption to false (#1161) * Updated hero * feat(chalice): return all records if date is not specified * feat(chalice): refactored records list * Moving cli to scripts folder (#1196) * Revert "Moving cli to scripts folder (#1196)" (#1197) This reverts commitc947e48d99. * feat(chalice): support old FilterType * fix(ui) - alert form crash * fix(ui) - alert form crash * fix(ui) - assist menu status * Redshift connector (#1170) * Updated dependancies for redshift connector, changed os module for python-decouple module * Updated service and images * Updated message protocol, added exception for BatchMetadata when version is 0 (we apply old read method) * fixed load error from s3 to redshift. null values for string columns are now empty strings ("") * Added file test consumer_async.py: reads every 3 minutes kafka raw and send task in background to upload to cloud * Added method to skip messages that are not inserted to cloud * Added logs into consumer_async. Changed urls and issues in sessions table from list to string * Split between messages for sessions table and for events table * Updated redshift tables * Fixed small issue in query redshift_sessions.sql * Updated Dockerfiles. Cleaned logs of consumer_async. Updated/Fixed tables. Transformed Nan as NULL for VARCHAR columns * Added error handler for sql dropped connection * chore(docker): Optimize docker builds Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * Variables renamed * Adding compression libraries * Set default value of count events to 0 (instead of NULL) when event did not occur * Added support specific project tracking. Added PG handler to connect to sessions table * Added method to update values in db connection for sessions ended and restarted * Removing intelligent file copying * chore(connector): Build file Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * Adding connection pool for pg * Renaming and optimizing * Fixed issue of missing information of sessions --------- Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> Co-authored-by: rjshrjndrn <rjshrjndrn@gmail.com> * fix(build): Parallel build Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * feat(chalice): changed release version feat(assist): changed release version feat(peers): changed release version feat(sourcemaps-reader): changed release version feat(chalice): enhanced health-check feat(DB): sessions_count table to keep status * feat(chalice): changed release version * feat(chalice): refactored projects code * feat(chalice): refactored projects code feat(chalice): sessions-check-flag every hour feat(chalice): sessions-check-delta set to 4 hours * feat(chalice): use experimental session search for metrics * feat(chalice): projects stats for health-check feat(DB): projects stats for health-check feat(crons): projects stats for health-check * feat(chalice): changed projects stats for health-check feat(crons): cahnged projects stats for health-check chore(helm): projectStats cron every 18 min chore(helm): projectStats-fix cron every Sunday at 5am * feat(crons): reorganized crons * feat(chalice): fixed typo * feat(chalice): changed health-check response * feat(crons): changed health-check response * (feat): Chalice - Allow SAML users to login with non-password methods as well as the usual password method, for example Windows Integrated Authentication * Move security field to correct area under SAML2 settings * feat(chalice): format code * feat(chalice): changed recordings response * feat(crons): fixed health check cron feat(crons): refactored main * feat(chalice): changed recordings response feat(chalice): updated dependencies feat(crons): updated dependencies feat(alerts): updated dependencies * feat(chalice): fixed recordings response recursion error * feat(assist): updated dependencies feat(sourcemaps-reader): upgraded dependencies * change(ui) - user event text change * fix(ui): fix events merging * fix(connector): handle db connection drop (#1223) * Added compatibility with SaaS, added reboot of connection if connection droped * Small fix * fix(backend): disabled debug log in http handler * fix(player): fix autopause on tabs * Updated python template to read messages with BatchMeta with old version (#1225) * change(ui) - user events text change * change(ui) - webhooks no content icon size * chore(backend): upgraded go to 1.19 and ClickHouse to 2.9.1 * fix(player): fix frustrations ingestion * fix(tracker): fix email detection performance * fix(tracker): fix email masking length * fix(player): fix fullview prop passing to children (live pl) * feat(chalice): reduce issues for replay (#1227) * change(ui) - bugreport modal title color * fix(ui) - elastic config validation rules * change(ui) - issue form and share popup titles * change(ui) - placeholder text change * change(ui) - filter user events text change * feat(chalice): include enforceSSO in signup status (#1228) * Updating kyverno * chore(cli): Override GH repo Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * chore(helm): Update kafka chart Enable metrics and increased storage * change(ui) - enforce sso * Api v1.12.0 (#1230) * feat(chalice): include enforceSSO in signup status * feat(chalice): changed 1-time health-check * fix(helm): typo Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * change(ui) - support icon border * chore(helm): enable kafka jmx metrics Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * change(ui) - fetch details modal - no content text size * change(ui) - playback timemode alignment * fix(connector): fixed bug of cache dict size error (#1226) * change(ui) - text chante on create issue and share popups * change(ui) - share popup styles * change(ui) - user events visit event padding * feat(crons): include fastapi (#1231) * New env variable CLOUD (aws by default) (#1232) * feat(backend): added new env variable CLOUD (aws by default) * chore(backend): Adding env variable for CLOUD Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> --------- Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> Co-authored-by: rjshrjndrn <rjshrjndrn@gmail.com> * Compression worker (#1233) * feat(backend): added extra worker for session compression * feat(backend): debug logs * feat(backend): added compression ratio metric * feat(backend): reduced number of duplicate logs * feat(backend): rewrite workers managment * chore(minio): changed lifecycle rules to support delete-jobs (#1235) * fix(backend): correct compression ratio value * fix(backend): reduced ender tick duration * feat(backend): insert referrer to sessions table (#1237) * chore(cli): Adding separate query for ee cleanup Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * fix(connector): Added checkpoints and sigterm handler (#1234) * fix(connector): fixed bug of cache dict size error * fix(connector): Added method to save state in s3 for redshift if sigterm arise * fix(connector): Added exit signal handler and checkpoint method * Added sslmode selection for connection to database, added use_ssl parameter for S3 connection * fix(cli): Override cli options (#1239) Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * fix(player): fix first 8 byte checker * fix(player): remove logs * Update .env.sample * fix(ui) - search init - wait for filters (#1241) * fix(player): fix first 8 byte checker * chore(cron): Adding missing deps Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * fix(player): fix commit conflict * fix(backend): added Content-Encoding to CORS for http service * fix(backend): added COMPRESSION_THRESHOLD env variable to Dockerfile * fix(player): ensure that player is cleaned on unmount * chore(helm): Updating frontend image release (#1243) * Update README.md * feat(chalice): fixed trace payload parsing * feat(player): player file loader refactoring (#1203) * change(ui): refactor mob loading * refactor(player): split message loader into separate file, remove toast dependency out of player lib, fix types, fix inspector and screen context * refactor(player): simplify file loading, add safe error throws * refactor(player): move loading status changers to the end of the flow * change(ui) - assist call to use iceTransportPolicy all * change(ui) - removed errors route * chore(helm): enablig pg_stat for metrics Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * fix(tracker): fix time inputs capturing * change(ui) - antd dependency * fix(player): clear selection manger on clicks; display frustrations row on xray by default * fix(player): add option todisable network in iframes * refactor(cli): In old clusters kyverno upgrade won't work. So we'll have to upgrade OR only. Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * feat(tracker): new axios capturing; tracker 7.0.1 * feat(chalice) - feature flags (#1252) * feat(api) - feature flags - schema * feat(api) - feature flags - wip * feat(api) - feature flags * feat(api) - feature flags - set back root path * feat(api) - feature flags * feat(api) - feature flags * feat(api) - feature flags - review * feat(DB): feature flags DB structure * feat(chalice): feature flags permissions support feat(chalice): feature flags changed code * feat(chalice): feature flags add permissions to DB --------- Co-authored-by: Taha Yassine Kraiem <tahayk2@gmail.com> * [sourcemaps-reader] Azure blob storage support (#1259) * feat(sourcemaps-reader): implemented azure blob storage support for sourcemaps reader * feat(sourcemaps-reader): azure blob storage support - cleaned code --------- Co-authored-by: Taha Yassine Kraiem <tahayk2@gmail.com> * fix(player): fix selection manager styles and reset * fix(cli): KUBECONFIG PATH override (#1266) Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * chore(cli): Adding info on which kubeconfig is getting used (#1261) Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * feat(ui) - enforce pwd during signup (#1271) * fix(helm): SITE_URL injection * fix(player): hide empty index prop * change(repo): ignore precommit config * change(repo): precommit config * feat(chalice): faster projects response * fix(chalice): ignore SSO for testing * feat(chalice): added PyLint for dev purposes * feat(DB): support tab_id for all events * feat(chalice): removed PyLint * fix(chalice): include metadata in sessions exp search (#1291) (cherry picked from commit07dd9da820) * refactor(chalice): upgraded dependencies refactor(alerts): upgraded dependencies refactor(crons): upgraded dependencies * feat(DB): added tab_id in creation queries feat(DB): added user_city feat(DB): added user_state * feat(DB): added user_city feat(DB): added user_state * feat(DB): create index for user_city feat(DB): create index for user_state * feat(chalice): search sessions by user_city feat(chalice): search sessions by user_state * fix(chalice): install SSO dependencies --------- Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> Co-authored-by: Alexander Zavorotynskiy <zavorotynskiy@pm.me> Co-authored-by: nick-delirium <nikita@openreplay.com> Co-authored-by: Rajesh Rajendran <rjshrjndrn@users.noreply.github.com> Co-authored-by: Shekar Siri <sshekarsiri@gmail.com> Co-authored-by: Alex Kaminskii <alex@openreplay.com> Co-authored-by: rjshrjndrn <rjshrjndrn@gmail.com> Co-authored-by: Mehdi Osman <estradino@users.noreply.github.com> Co-authored-by: MauricioGarciaS <47052044+MauricioGarciaS@users.noreply.github.com> Co-authored-by: Dayan Graham <d.graham50@hotmail.co.uk>
1107 lines
64 KiB
Python
1107 lines
64 KiB
Python
from typing import List
|
|
|
|
import schemas
|
|
from chalicelib.core import events, metadata, projects, performance_event, sessions_favorite
|
|
from chalicelib.utils import pg_client, helper, metrics_helper
|
|
from chalicelib.utils import sql_helper as sh
|
|
|
|
SESSION_PROJECTION_COLS = """s.project_id,
|
|
s.session_id::text AS session_id,
|
|
s.user_uuid,
|
|
s.user_id,
|
|
s.user_os,
|
|
s.user_browser,
|
|
s.user_device,
|
|
s.user_device_type,
|
|
s.user_country,
|
|
s.start_ts,
|
|
s.duration,
|
|
s.events_count,
|
|
s.pages_count,
|
|
s.errors_count,
|
|
s.user_anonymous_id,
|
|
s.platform,
|
|
s.issue_score,
|
|
to_jsonb(s.issue_types) AS issue_types,
|
|
favorite_sessions.session_id NOTNULL AS favorite,
|
|
COALESCE((SELECT TRUE
|
|
FROM public.user_viewed_sessions AS fs
|
|
WHERE s.session_id = fs.session_id
|
|
AND fs.user_id = %(userId)s LIMIT 1), FALSE) AS viewed """
|
|
|
|
|
|
# This function executes the query and return result
|
|
def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_id, errors_only=False,
|
|
error_status=schemas.ErrorStatus.all, count_only=False, issue=None, ids_only=False):
|
|
if data.bookmarked:
|
|
data.startDate, data.endDate = sessions_favorite.get_start_end_timestamp(project_id, user_id)
|
|
|
|
full_args, query_part = search_query_parts(data=data, error_status=error_status, errors_only=errors_only,
|
|
favorite_only=data.bookmarked, issue=issue, project_id=project_id,
|
|
user_id=user_id)
|
|
if data.limit is not None and data.page is not None:
|
|
full_args["sessions_limit"] = data.limit
|
|
full_args["sessions_limit_s"] = (data.page - 1) * data.limit
|
|
full_args["sessions_limit_e"] = data.page * data.limit
|
|
else:
|
|
full_args["sessions_limit"] = 200
|
|
full_args["sessions_limit_s"] = 1
|
|
full_args["sessions_limit_e"] = 200
|
|
|
|
meta_keys = []
|
|
with pg_client.PostgresClient() as cur:
|
|
if errors_only:
|
|
main_query = cur.mogrify(f"""SELECT DISTINCT er.error_id,
|
|
COALESCE((SELECT TRUE
|
|
FROM public.user_viewed_errors AS ve
|
|
WHERE er.error_id = ve.error_id
|
|
AND ve.user_id = %(userId)s LIMIT 1), FALSE) AS viewed
|
|
{query_part};""", full_args)
|
|
|
|
elif count_only:
|
|
main_query = cur.mogrify(f"""SELECT COUNT(DISTINCT s.session_id) AS count_sessions,
|
|
COUNT(DISTINCT s.user_uuid) AS count_users
|
|
{query_part};""", full_args)
|
|
elif data.group_by_user:
|
|
g_sort = "count(full_sessions)"
|
|
if data.order is None:
|
|
data.order = schemas.SortOrderType.desc.value
|
|
else:
|
|
data.order = data.order.value
|
|
if data.sort is not None and data.sort != 'sessionsCount':
|
|
sort = helper.key_to_snake_case(data.sort)
|
|
g_sort = f"{'MIN' if data.order == schemas.SortOrderType.desc else 'MAX'}({sort})"
|
|
else:
|
|
sort = 'start_ts'
|
|
|
|
meta_keys = metadata.get(project_id=project_id)
|
|
main_query = cur.mogrify(f"""SELECT COUNT(*) AS count,
|
|
COALESCE(JSONB_AGG(users_sessions)
|
|
FILTER (WHERE rn>%(sessions_limit_s)s AND rn<=%(sessions_limit_e)s), '[]'::JSONB) AS sessions
|
|
FROM (SELECT user_id,
|
|
count(full_sessions) AS user_sessions_count,
|
|
jsonb_agg(full_sessions) FILTER (WHERE rn <= 1) AS last_session,
|
|
MIN(full_sessions.start_ts) AS first_session_ts,
|
|
ROW_NUMBER() OVER (ORDER BY {g_sort} {data.order}) AS rn
|
|
FROM (SELECT *, ROW_NUMBER() OVER (PARTITION BY user_id ORDER BY {sort} {data.order}) AS rn
|
|
FROM (SELECT DISTINCT ON(s.session_id) {SESSION_PROJECTION_COLS}
|
|
{"," if len(meta_keys) > 0 else ""}{",".join([f'metadata_{m["index"]}' for m in meta_keys])}
|
|
{query_part}
|
|
) AS filtred_sessions
|
|
) AS full_sessions
|
|
GROUP BY user_id
|
|
) AS users_sessions;""",
|
|
full_args)
|
|
elif ids_only:
|
|
main_query = cur.mogrify(f"""SELECT DISTINCT ON(s.session_id) s.session_id
|
|
{query_part}
|
|
ORDER BY s.session_id desc
|
|
LIMIT %(sessions_limit)s OFFSET %(sessions_limit_s)s;""",
|
|
full_args)
|
|
else:
|
|
if data.order is None:
|
|
data.order = schemas.SortOrderType.desc.value
|
|
else:
|
|
data.order = data.order.value
|
|
sort = 'session_id'
|
|
if data.sort is not None and data.sort != "session_id":
|
|
# sort += " " + data.order + "," + helper.key_to_snake_case(data.sort)
|
|
sort = helper.key_to_snake_case(data.sort)
|
|
meta_keys = metadata.get(project_id=project_id)
|
|
main_query = cur.mogrify(f"""SELECT COUNT(full_sessions) AS count,
|
|
COALESCE(JSONB_AGG(full_sessions)
|
|
FILTER (WHERE rn>%(sessions_limit_s)s AND rn<=%(sessions_limit_e)s), '[]'::JSONB) AS sessions
|
|
FROM (SELECT *, ROW_NUMBER() OVER (ORDER BY {sort} {data.order}, issue_score DESC) AS rn
|
|
FROM (SELECT DISTINCT ON(s.session_id) {SESSION_PROJECTION_COLS}
|
|
{"," if len(meta_keys) > 0 else ""}{",".join([f'metadata_{m["index"]}' for m in meta_keys])}
|
|
{query_part}
|
|
ORDER BY s.session_id desc) AS filtred_sessions
|
|
ORDER BY {sort} {data.order}, issue_score DESC) AS full_sessions;""",
|
|
full_args)
|
|
# print("--------------------")
|
|
# print(main_query)
|
|
# print("--------------------")
|
|
try:
|
|
cur.execute(main_query)
|
|
except Exception as err:
|
|
print("--------- SESSIONS SEARCH QUERY EXCEPTION -----------")
|
|
print(main_query.decode('UTF-8'))
|
|
print("--------- PAYLOAD -----------")
|
|
print(data.json())
|
|
print("--------------------")
|
|
raise err
|
|
if errors_only or ids_only:
|
|
return helper.list_to_camel_case(cur.fetchall())
|
|
|
|
sessions = cur.fetchone()
|
|
if count_only:
|
|
return helper.dict_to_camel_case(sessions)
|
|
|
|
total = sessions["count"]
|
|
sessions = sessions["sessions"]
|
|
|
|
if data.group_by_user:
|
|
for i, s in enumerate(sessions):
|
|
sessions[i] = {**s.pop("last_session")[0], **s}
|
|
sessions[i].pop("rn")
|
|
sessions[i]["metadata"] = {k["key"]: sessions[i][f'metadata_{k["index"]}'] for k in meta_keys \
|
|
if sessions[i][f'metadata_{k["index"]}'] is not None}
|
|
else:
|
|
for i, s in enumerate(sessions):
|
|
sessions[i]["metadata"] = {k["key"]: sessions[i][f'metadata_{k["index"]}'] for k in meta_keys \
|
|
if sessions[i][f'metadata_{k["index"]}'] is not None}
|
|
# if not data.group_by_user and data.sort is not None and data.sort != "session_id":
|
|
# sessions = sorted(sessions, key=lambda s: s[helper.key_to_snake_case(data.sort)],
|
|
# reverse=data.order.upper() == "DESC")
|
|
return {
|
|
'total': total,
|
|
'sessions': helper.list_to_camel_case(sessions)
|
|
}
|
|
|
|
|
|
def search2_series(data: schemas.SessionsSearchPayloadSchema, project_id: int, density: int,
|
|
view_type: schemas.MetricTimeseriesViewType, metric_type: schemas.MetricType,
|
|
metric_of: schemas.MetricOfTable, metric_value: List):
|
|
step_size = int(metrics_helper.__get_step_size(endTimestamp=data.endDate, startTimestamp=data.startDate,
|
|
density=density, factor=1, decimal=True))
|
|
extra_event = None
|
|
if metric_of == schemas.MetricOfTable.visited_url:
|
|
extra_event = "events.pages"
|
|
elif metric_of == schemas.MetricOfTable.issues and len(metric_value) > 0:
|
|
data.filters.append(schemas.SessionSearchFilterSchema(value=metric_value, type=schemas.FilterType.issue,
|
|
operator=schemas.SearchEventOperator._is))
|
|
full_args, query_part = search_query_parts(data=data, error_status=None, errors_only=False,
|
|
favorite_only=False, issue=None, project_id=project_id,
|
|
user_id=None, extra_event=extra_event)
|
|
full_args["step_size"] = step_size
|
|
sessions = []
|
|
with pg_client.PostgresClient() as cur:
|
|
if metric_type == schemas.MetricType.timeseries:
|
|
if view_type == schemas.MetricTimeseriesViewType.line_chart:
|
|
main_query = cur.mogrify(f"""WITH full_sessions AS (SELECT DISTINCT ON(s.session_id) s.session_id, s.start_ts
|
|
{query_part})
|
|
SELECT generated_timestamp AS timestamp,
|
|
COUNT(s) AS count
|
|
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp
|
|
LEFT JOIN LATERAL ( SELECT 1 AS s
|
|
FROM full_sessions
|
|
WHERE start_ts >= generated_timestamp
|
|
AND start_ts <= generated_timestamp + %(step_size)s) AS sessions ON (TRUE)
|
|
GROUP BY generated_timestamp
|
|
ORDER BY generated_timestamp;""", full_args)
|
|
else:
|
|
main_query = cur.mogrify(f"""SELECT count(DISTINCT s.session_id) AS count
|
|
{query_part};""", full_args)
|
|
|
|
# print("--------------------")
|
|
# print(main_query)
|
|
# print("--------------------")
|
|
try:
|
|
cur.execute(main_query)
|
|
except Exception as err:
|
|
print("--------- SESSIONS-SERIES QUERY EXCEPTION -----------")
|
|
print(main_query.decode('UTF-8'))
|
|
print("--------- PAYLOAD -----------")
|
|
print(data.json())
|
|
print("--------------------")
|
|
raise err
|
|
if view_type == schemas.MetricTimeseriesViewType.line_chart:
|
|
sessions = cur.fetchall()
|
|
else:
|
|
sessions = cur.fetchone()["count"]
|
|
elif metric_type == schemas.MetricType.table:
|
|
if isinstance(metric_of, schemas.MetricOfTable):
|
|
main_col = "user_id"
|
|
extra_col = ""
|
|
extra_where = ""
|
|
pre_query = ""
|
|
distinct_on = "s.session_id"
|
|
if metric_of == schemas.MetricOfTable.user_country:
|
|
main_col = "user_country"
|
|
elif metric_of == schemas.MetricOfTable.user_device:
|
|
main_col = "user_device"
|
|
elif metric_of == schemas.MetricOfTable.user_browser:
|
|
main_col = "user_browser"
|
|
elif metric_of == schemas.MetricOfTable.issues:
|
|
main_col = "issue"
|
|
extra_col = f", UNNEST(s.issue_types) AS {main_col}"
|
|
if len(metric_value) > 0:
|
|
extra_where = []
|
|
for i in range(len(metric_value)):
|
|
arg_name = f"selected_issue_{i}"
|
|
extra_where.append(f"{main_col} = %({arg_name})s")
|
|
full_args[arg_name] = metric_value[i]
|
|
extra_where = f"WHERE ({' OR '.join(extra_where)})"
|
|
elif metric_of == schemas.MetricOfTable.visited_url:
|
|
main_col = "path"
|
|
extra_col = ", path"
|
|
distinct_on += ",path"
|
|
main_query = cur.mogrify(f"""{pre_query}
|
|
SELECT COUNT(*) AS count, COALESCE(JSONB_AGG(users_sessions) FILTER ( WHERE rn <= 200 ), '[]'::JSONB) AS values
|
|
FROM (SELECT {main_col} AS name,
|
|
count(DISTINCT session_id) AS session_count,
|
|
ROW_NUMBER() OVER (ORDER BY count(full_sessions) DESC) AS rn
|
|
FROM (SELECT *
|
|
FROM (SELECT DISTINCT ON({distinct_on}) s.session_id, s.user_uuid,
|
|
s.user_id, s.user_os,
|
|
s.user_browser, s.user_device,
|
|
s.user_device_type, s.user_country, s.issue_types{extra_col}
|
|
{query_part}
|
|
ORDER BY s.session_id desc) AS filtred_sessions
|
|
) AS full_sessions
|
|
{extra_where}
|
|
GROUP BY {main_col}
|
|
ORDER BY session_count DESC) AS users_sessions;""",
|
|
full_args)
|
|
# print("--------------------")
|
|
# print(main_query)
|
|
# print("--------------------")
|
|
cur.execute(main_query)
|
|
sessions = cur.fetchone()
|
|
for s in sessions["values"]:
|
|
s.pop("rn")
|
|
sessions["values"] = helper.list_to_camel_case(sessions["values"])
|
|
|
|
return sessions
|
|
|
|
|
|
def __is_valid_event(is_any: bool, event: schemas._SessionSearchEventSchema):
|
|
return not (not is_any and len(event.value) == 0 and event.type not in [schemas.EventType.request_details,
|
|
schemas.EventType.graphql] \
|
|
or event.type in [schemas.PerformanceEventType.location_dom_complete,
|
|
schemas.PerformanceEventType.location_largest_contentful_paint_time,
|
|
schemas.PerformanceEventType.location_ttfb,
|
|
schemas.PerformanceEventType.location_avg_cpu_load,
|
|
schemas.PerformanceEventType.location_avg_memory_usage
|
|
] and (event.source is None or len(event.source) == 0) \
|
|
or event.type in [schemas.EventType.request_details, schemas.EventType.graphql] and (
|
|
event.filters is None or len(event.filters) == 0))
|
|
|
|
|
|
# this function generates the query and return the generated-query with the dict of query arguments
|
|
def search_query_parts(data: schemas.SessionsSearchPayloadSchema, error_status, errors_only, favorite_only, issue,
|
|
project_id, user_id, extra_event=None):
|
|
ss_constraints = []
|
|
full_args = {"project_id": project_id, "startDate": data.startDate, "endDate": data.endDate,
|
|
"projectId": project_id, "userId": user_id}
|
|
extra_constraints = [
|
|
"s.project_id = %(project_id)s",
|
|
"s.duration IS NOT NULL"
|
|
]
|
|
extra_from = ""
|
|
events_query_part = ""
|
|
if len(data.filters) > 0:
|
|
meta_keys = None
|
|
for i, f in enumerate(data.filters):
|
|
if not isinstance(f.value, list):
|
|
f.value = [f.value]
|
|
filter_type = f.type
|
|
f.value = helper.values_for_operator(value=f.value, op=f.operator)
|
|
f_k = f"f_value{i}"
|
|
full_args = {**full_args, **sh.multi_values(f.value, value_key=f_k)}
|
|
op = sh.get_sql_operator(f.operator) \
|
|
if filter_type not in [schemas.FilterType.events_count] else f.operator
|
|
is_any = sh.isAny_opreator(f.operator)
|
|
is_undefined = sh.isUndefined_operator(f.operator)
|
|
if not is_any and not is_undefined and len(f.value) == 0:
|
|
continue
|
|
is_not = False
|
|
if sh.is_negation_operator(f.operator):
|
|
is_not = True
|
|
if filter_type == schemas.FilterType.user_browser:
|
|
if is_any:
|
|
extra_constraints.append('s.user_browser IS NOT NULL')
|
|
ss_constraints.append('ms.user_browser IS NOT NULL')
|
|
else:
|
|
extra_constraints.append(
|
|
sh.multi_conditions(f's.user_browser {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
|
|
ss_constraints.append(
|
|
sh.multi_conditions(f'ms.user_browser {op} %({f_k})s', f.value, is_not=is_not,
|
|
value_key=f_k))
|
|
|
|
elif filter_type in [schemas.FilterType.user_os, schemas.FilterType.user_os_ios]:
|
|
if is_any:
|
|
extra_constraints.append('s.user_os IS NOT NULL')
|
|
ss_constraints.append('ms.user_os IS NOT NULL')
|
|
else:
|
|
extra_constraints.append(
|
|
sh.multi_conditions(f's.user_os {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
|
|
ss_constraints.append(
|
|
sh.multi_conditions(f'ms.user_os {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
|
|
|
|
elif filter_type in [schemas.FilterType.user_device, schemas.FilterType.user_device_ios]:
|
|
if is_any:
|
|
extra_constraints.append('s.user_device IS NOT NULL')
|
|
ss_constraints.append('ms.user_device IS NOT NULL')
|
|
else:
|
|
extra_constraints.append(
|
|
sh.multi_conditions(f's.user_device {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
|
|
ss_constraints.append(
|
|
sh.multi_conditions(f'ms.user_device {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
|
|
|
|
elif filter_type in [schemas.FilterType.user_country, schemas.FilterType.user_country_ios]:
|
|
if is_any:
|
|
extra_constraints.append('s.user_country IS NOT NULL')
|
|
ss_constraints.append('ms.user_country IS NOT NULL')
|
|
else:
|
|
extra_constraints.append(
|
|
sh.multi_conditions(f's.user_country {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
|
|
ss_constraints.append(
|
|
sh.multi_conditions(f'ms.user_country {op} %({f_k})s', f.value, is_not=is_not,
|
|
value_key=f_k))
|
|
|
|
elif filter_type == schemas.FilterType.user_city:
|
|
if is_any:
|
|
extra_constraints.append('s.user_city IS NOT NULL')
|
|
ss_constraints.append('ms.user_city IS NOT NULL')
|
|
else:
|
|
extra_constraints.append(
|
|
sh.multi_conditions(f's.user_city {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
|
|
ss_constraints.append(
|
|
sh.multi_conditions(f'ms.user_city {op} %({f_k})s', f.value, is_not=is_not,
|
|
value_key=f_k))
|
|
|
|
elif filter_type == schemas.FilterType.user_state:
|
|
if is_any:
|
|
extra_constraints.append('s.user_state IS NOT NULL')
|
|
ss_constraints.append('ms.user_state IS NOT NULL')
|
|
else:
|
|
extra_constraints.append(
|
|
sh.multi_conditions(f's.user_state {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
|
|
ss_constraints.append(
|
|
sh.multi_conditions(f'ms.user_state {op} %({f_k})s', f.value, is_not=is_not,
|
|
value_key=f_k))
|
|
|
|
elif filter_type in [schemas.FilterType.utm_source]:
|
|
if is_any:
|
|
extra_constraints.append('s.utm_source IS NOT NULL')
|
|
ss_constraints.append('ms.utm_source IS NOT NULL')
|
|
elif is_undefined:
|
|
extra_constraints.append('s.utm_source IS NULL')
|
|
ss_constraints.append('ms.utm_source IS NULL')
|
|
else:
|
|
extra_constraints.append(
|
|
sh.multi_conditions(f's.utm_source {op} %({f_k})s::text', f.value, is_not=is_not,
|
|
value_key=f_k))
|
|
ss_constraints.append(
|
|
sh.multi_conditions(f'ms.utm_source {op} %({f_k})s::text', f.value, is_not=is_not,
|
|
value_key=f_k))
|
|
elif filter_type in [schemas.FilterType.utm_medium]:
|
|
if is_any:
|
|
extra_constraints.append('s.utm_medium IS NOT NULL')
|
|
ss_constraints.append('ms.utm_medium IS NOT NULL')
|
|
elif is_undefined:
|
|
extra_constraints.append('s.utm_medium IS NULL')
|
|
ss_constraints.append('ms.utm_medium IS NULL')
|
|
else:
|
|
extra_constraints.append(
|
|
sh.multi_conditions(f's.utm_medium {op} %({f_k})s::text', f.value, is_not=is_not,
|
|
value_key=f_k))
|
|
ss_constraints.append(
|
|
sh.multi_conditions(f'ms.utm_medium {op} %({f_k})s::text', f.value, is_not=is_not,
|
|
value_key=f_k))
|
|
elif filter_type in [schemas.FilterType.utm_campaign]:
|
|
if is_any:
|
|
extra_constraints.append('s.utm_campaign IS NOT NULL')
|
|
ss_constraints.append('ms.utm_campaign IS NOT NULL')
|
|
elif is_undefined:
|
|
extra_constraints.append('s.utm_campaign IS NULL')
|
|
ss_constraints.append('ms.utm_campaign IS NULL')
|
|
else:
|
|
extra_constraints.append(
|
|
sh.multi_conditions(f's.utm_campaign {op} %({f_k})s::text', f.value, is_not=is_not,
|
|
value_key=f_k))
|
|
ss_constraints.append(
|
|
sh.multi_conditions(f'ms.utm_campaign {op} %({f_k})s::text', f.value, is_not=is_not,
|
|
value_key=f_k))
|
|
|
|
elif filter_type == schemas.FilterType.duration:
|
|
if len(f.value) > 0 and f.value[0] is not None:
|
|
extra_constraints.append("s.duration >= %(minDuration)s")
|
|
ss_constraints.append("ms.duration >= %(minDuration)s")
|
|
full_args["minDuration"] = f.value[0]
|
|
if len(f.value) > 1 and f.value[1] is not None and int(f.value[1]) > 0:
|
|
extra_constraints.append("s.duration <= %(maxDuration)s")
|
|
ss_constraints.append("ms.duration <= %(maxDuration)s")
|
|
full_args["maxDuration"] = f.value[1]
|
|
elif filter_type == schemas.FilterType.referrer:
|
|
# extra_from += f"INNER JOIN {events.event_type.LOCATION.table} AS p USING(session_id)"
|
|
if is_any:
|
|
extra_constraints.append('s.base_referrer IS NOT NULL')
|
|
else:
|
|
extra_constraints.append(
|
|
sh.multi_conditions(f"s.base_referrer {op} %({f_k})s", f.value, is_not=is_not,
|
|
value_key=f_k))
|
|
elif filter_type == events.EventType.METADATA.ui_type:
|
|
# get metadata list only if you need it
|
|
if meta_keys is None:
|
|
meta_keys = metadata.get(project_id=project_id)
|
|
meta_keys = {m["key"]: m["index"] for m in meta_keys}
|
|
if f.source in meta_keys.keys():
|
|
if is_any:
|
|
extra_constraints.append(f"s.{metadata.index_to_colname(meta_keys[f.source])} IS NOT NULL")
|
|
ss_constraints.append(f"ms.{metadata.index_to_colname(meta_keys[f.source])} IS NOT NULL")
|
|
elif is_undefined:
|
|
extra_constraints.append(f"s.{metadata.index_to_colname(meta_keys[f.source])} IS NULL")
|
|
ss_constraints.append(f"ms.{metadata.index_to_colname(meta_keys[f.source])} IS NULL")
|
|
else:
|
|
extra_constraints.append(
|
|
sh.multi_conditions(
|
|
f"s.{metadata.index_to_colname(meta_keys[f.source])} {op} %({f_k})s::text",
|
|
f.value, is_not=is_not, value_key=f_k))
|
|
ss_constraints.append(
|
|
sh.multi_conditions(
|
|
f"ms.{metadata.index_to_colname(meta_keys[f.source])} {op} %({f_k})s::text",
|
|
f.value, is_not=is_not, value_key=f_k))
|
|
elif filter_type in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
|
|
if is_any:
|
|
extra_constraints.append('s.user_id IS NOT NULL')
|
|
ss_constraints.append('ms.user_id IS NOT NULL')
|
|
elif is_undefined:
|
|
extra_constraints.append('s.user_id IS NULL')
|
|
ss_constraints.append('ms.user_id IS NULL')
|
|
else:
|
|
extra_constraints.append(
|
|
sh.multi_conditions(f"s.user_id {op} %({f_k})s::text", f.value, is_not=is_not,
|
|
value_key=f_k))
|
|
ss_constraints.append(
|
|
sh.multi_conditions(f"ms.user_id {op} %({f_k})s::text", f.value, is_not=is_not,
|
|
value_key=f_k))
|
|
elif filter_type in [schemas.FilterType.user_anonymous_id,
|
|
schemas.FilterType.user_anonymous_id_ios]:
|
|
if is_any:
|
|
extra_constraints.append('s.user_anonymous_id IS NOT NULL')
|
|
ss_constraints.append('ms.user_anonymous_id IS NOT NULL')
|
|
elif is_undefined:
|
|
extra_constraints.append('s.user_anonymous_id IS NULL')
|
|
ss_constraints.append('ms.user_anonymous_id IS NULL')
|
|
else:
|
|
extra_constraints.append(
|
|
sh.multi_conditions(f"s.user_anonymous_id {op} %({f_k})s::text", f.value, is_not=is_not,
|
|
value_key=f_k))
|
|
ss_constraints.append(
|
|
sh.multi_conditions(f"ms.user_anonymous_id {op} %({f_k})s::text", f.value, is_not=is_not,
|
|
value_key=f_k))
|
|
elif filter_type in [schemas.FilterType.rev_id, schemas.FilterType.rev_id_ios]:
|
|
if is_any:
|
|
extra_constraints.append('s.rev_id IS NOT NULL')
|
|
ss_constraints.append('ms.rev_id IS NOT NULL')
|
|
elif is_undefined:
|
|
extra_constraints.append('s.rev_id IS NULL')
|
|
ss_constraints.append('ms.rev_id IS NULL')
|
|
else:
|
|
extra_constraints.append(
|
|
sh.multi_conditions(f"s.rev_id {op} %({f_k})s::text", f.value, is_not=is_not, value_key=f_k))
|
|
ss_constraints.append(
|
|
sh.multi_conditions(f"ms.rev_id {op} %({f_k})s::text", f.value, is_not=is_not,
|
|
value_key=f_k))
|
|
elif filter_type == schemas.FilterType.platform:
|
|
# op = __ sh.get_sql_operator(f.operator)
|
|
extra_constraints.append(
|
|
sh.multi_conditions(f"s.user_device_type {op} %({f_k})s", f.value, is_not=is_not,
|
|
value_key=f_k))
|
|
ss_constraints.append(
|
|
sh.multi_conditions(f"ms.user_device_type {op} %({f_k})s", f.value, is_not=is_not,
|
|
value_key=f_k))
|
|
elif filter_type == schemas.FilterType.issue:
|
|
if is_any:
|
|
extra_constraints.append("array_length(s.issue_types, 1) > 0")
|
|
ss_constraints.append("array_length(ms.issue_types, 1) > 0")
|
|
else:
|
|
extra_constraints.append(
|
|
sh.multi_conditions(f"%({f_k})s {op} ANY (s.issue_types)", f.value, is_not=is_not,
|
|
value_key=f_k))
|
|
ss_constraints.append(
|
|
sh.multi_conditions(f"%({f_k})s {op} ANY (ms.issue_types)", f.value, is_not=is_not,
|
|
value_key=f_k))
|
|
# search sessions with click_rage on a specific selector
|
|
if len(f.filters) > 0 and schemas.IssueType.click_rage in f.value:
|
|
for j, sf in enumerate(f.filters):
|
|
if sf.operator == schemas.IssueFilterOperator._on_selector:
|
|
f_k = f"f_value{i}_{j}"
|
|
full_args = {**full_args, **sh.multi_values(sf.value, value_key=f_k)}
|
|
extra_constraints += ["mc.timestamp>=%(startDate)s",
|
|
"mc.timestamp<=%(endDate)s",
|
|
"mis.type='click_rage'",
|
|
sh.multi_conditions(f"mc.selector=%({f_k})s",
|
|
sf.value, is_not=is_not,
|
|
value_key=f_k)]
|
|
|
|
extra_from += """INNER JOIN events.clicks AS mc USING(session_id)
|
|
INNER JOIN events_common.issues USING (session_id,timestamp)
|
|
INNER JOIN public.issues AS mis USING (issue_id)\n"""
|
|
|
|
elif filter_type == schemas.FilterType.events_count:
|
|
extra_constraints.append(
|
|
sh.multi_conditions(f"s.events_count {op} %({f_k})s", f.value, is_not=is_not,
|
|
value_key=f_k))
|
|
ss_constraints.append(
|
|
sh.multi_conditions(f"ms.events_count {op} %({f_k})s", f.value, is_not=is_not,
|
|
value_key=f_k))
|
|
# ---------------------------------------------------------------------------
|
|
if len(data.events) > 0:
|
|
valid_events_count = 0
|
|
for event in data.events:
|
|
is_any = sh.isAny_opreator(event.operator)
|
|
if not isinstance(event.value, list):
|
|
event.value = [event.value]
|
|
if __is_valid_event(is_any=is_any, event=event):
|
|
valid_events_count += 1
|
|
events_query_from = []
|
|
event_index = 0
|
|
or_events = data.events_order == schemas.SearchEventOrder._or
|
|
# events_joiner = " FULL JOIN " if or_events else " INNER JOIN LATERAL "
|
|
events_joiner = " UNION " if or_events else " INNER JOIN LATERAL "
|
|
for i, event in enumerate(data.events):
|
|
event_type = event.type
|
|
is_any = sh.isAny_opreator(event.operator)
|
|
if not isinstance(event.value, list):
|
|
event.value = [event.value]
|
|
if not __is_valid_event(is_any=is_any, event=event):
|
|
continue
|
|
op = sh.get_sql_operator(event.operator)
|
|
is_not = False
|
|
if sh.is_negation_operator(event.operator):
|
|
is_not = True
|
|
op = sh.reverse_sql_operator(op)
|
|
if event_index == 0 or or_events:
|
|
event_from = "%s INNER JOIN public.sessions AS ms USING (session_id)"
|
|
event_where = ["ms.project_id = %(projectId)s", "main.timestamp >= %(startDate)s",
|
|
"main.timestamp <= %(endDate)s", "ms.start_ts >= %(startDate)s",
|
|
"ms.start_ts <= %(endDate)s", "ms.duration IS NOT NULL"]
|
|
if favorite_only and not errors_only:
|
|
event_from += "INNER JOIN public.user_favorite_sessions AS fs USING(session_id)"
|
|
event_where.append("fs.user_id = %(userId)s")
|
|
else:
|
|
event_from = "%s"
|
|
event_where = ["main.timestamp >= %(startDate)s", "main.timestamp <= %(endDate)s",
|
|
"main.session_id=event_0.session_id"]
|
|
if data.events_order == schemas.SearchEventOrder._then:
|
|
event_where.append(f"event_{event_index - 1}.timestamp <= main.timestamp")
|
|
e_k = f"e_value{i}"
|
|
s_k = e_k + "_source"
|
|
if event.type != schemas.PerformanceEventType.time_between_events:
|
|
event.value = helper.values_for_operator(value=event.value, op=event.operator)
|
|
full_args = {**full_args,
|
|
**sh.multi_values(event.value, value_key=e_k),
|
|
**sh.multi_values(event.source, value_key=s_k)}
|
|
|
|
if event_type == events.EventType.CLICK.ui_type:
|
|
event_from = event_from % f"{events.EventType.CLICK.table} AS main "
|
|
if not is_any:
|
|
if event.operator == schemas.ClickEventExtraOperator._on_selector:
|
|
event_where.append(
|
|
sh.multi_conditions(f"main.selector = %({e_k})s", event.value, value_key=e_k))
|
|
else:
|
|
event_where.append(
|
|
sh.multi_conditions(f"main.{events.EventType.CLICK.column} {op} %({e_k})s", event.value,
|
|
value_key=e_k))
|
|
|
|
elif event_type == events.EventType.INPUT.ui_type:
|
|
event_from = event_from % f"{events.EventType.INPUT.table} AS main "
|
|
if not is_any:
|
|
event_where.append(
|
|
sh.multi_conditions(f"main.{events.EventType.INPUT.column} {op} %({e_k})s", event.value,
|
|
value_key=e_k))
|
|
if event.source is not None and len(event.source) > 0:
|
|
event_where.append(sh.multi_conditions(f"main.value ILIKE %(custom{i})s", event.source,
|
|
value_key=f"custom{i}"))
|
|
full_args = {**full_args, **sh.multi_values(event.source, value_key=f"custom{i}")}
|
|
|
|
elif event_type == events.EventType.LOCATION.ui_type:
|
|
event_from = event_from % f"{events.EventType.LOCATION.table} AS main "
|
|
if not is_any:
|
|
event_where.append(
|
|
sh.multi_conditions(f"main.{events.EventType.LOCATION.column} {op} %({e_k})s",
|
|
event.value, value_key=e_k))
|
|
elif event_type == events.EventType.CUSTOM.ui_type:
|
|
event_from = event_from % f"{events.EventType.CUSTOM.table} AS main "
|
|
if not is_any:
|
|
event_where.append(
|
|
sh.multi_conditions(f"main.{events.EventType.CUSTOM.column} {op} %({e_k})s", event.value,
|
|
value_key=e_k))
|
|
elif event_type == events.EventType.REQUEST.ui_type:
|
|
event_from = event_from % f"{events.EventType.REQUEST.table} AS main "
|
|
if not is_any:
|
|
event_where.append(
|
|
sh.multi_conditions(f"main.{events.EventType.REQUEST.column} {op} %({e_k})s", event.value,
|
|
value_key=e_k))
|
|
# elif event_type == events.event_type.GRAPHQL.ui_type:
|
|
# event_from = event_from % f"{events.event_type.GRAPHQL.table} AS main "
|
|
# if not is_any:
|
|
# event_where.append(
|
|
# _multiple_conditions(f"main.{events.event_type.GRAPHQL.column} {op} %({e_k})s", event.value,
|
|
# value_key=e_k))
|
|
elif event_type == events.EventType.STATEACTION.ui_type:
|
|
event_from = event_from % f"{events.EventType.STATEACTION.table} AS main "
|
|
if not is_any:
|
|
event_where.append(
|
|
sh.multi_conditions(f"main.{events.EventType.STATEACTION.column} {op} %({e_k})s",
|
|
event.value, value_key=e_k))
|
|
elif event_type == events.EventType.ERROR.ui_type:
|
|
event_from = event_from % f"{events.EventType.ERROR.table} AS main INNER JOIN public.errors AS main1 USING(error_id)"
|
|
event.source = list(set(event.source))
|
|
if not is_any and event.value not in [None, "*", ""]:
|
|
event_where.append(
|
|
sh.multi_conditions(f"(main1.message {op} %({e_k})s OR main1.name {op} %({e_k})s)",
|
|
event.value, value_key=e_k))
|
|
if len(event.source) > 0 and event.source[0] not in [None, "*", ""]:
|
|
event_where.append(sh.multi_conditions(f"main1.source = %({s_k})s", event.source, value_key=s_k))
|
|
|
|
|
|
# ----- IOS
|
|
elif event_type == events.EventType.CLICK_IOS.ui_type:
|
|
event_from = event_from % f"{events.EventType.CLICK_IOS.table} AS main "
|
|
if not is_any:
|
|
event_where.append(
|
|
sh.multi_conditions(f"main.{events.EventType.CLICK_IOS.column} {op} %({e_k})s",
|
|
event.value, value_key=e_k))
|
|
|
|
elif event_type == events.EventType.INPUT_IOS.ui_type:
|
|
event_from = event_from % f"{events.EventType.INPUT_IOS.table} AS main "
|
|
if not is_any:
|
|
event_where.append(
|
|
sh.multi_conditions(f"main.{events.EventType.INPUT_IOS.column} {op} %({e_k})s",
|
|
event.value, value_key=e_k))
|
|
if event.source is not None and len(event.source) > 0:
|
|
event_where.append(sh.multi_conditions(f"main.value ILIKE %(custom{i})s", event.source,
|
|
value_key="custom{i}"))
|
|
full_args = {**full_args, **sh.multi_values(event.source, f"custom{i}")}
|
|
elif event_type == events.EventType.VIEW_IOS.ui_type:
|
|
event_from = event_from % f"{events.EventType.VIEW_IOS.table} AS main "
|
|
if not is_any:
|
|
event_where.append(
|
|
sh.multi_conditions(f"main.{events.EventType.VIEW_IOS.column} {op} %({e_k})s",
|
|
event.value, value_key=e_k))
|
|
elif event_type == events.EventType.CUSTOM_IOS.ui_type:
|
|
event_from = event_from % f"{events.EventType.CUSTOM_IOS.table} AS main "
|
|
if not is_any:
|
|
event_where.append(
|
|
sh.multi_conditions(f"main.{events.EventType.CUSTOM_IOS.column} {op} %({e_k})s",
|
|
event.value, value_key=e_k))
|
|
elif event_type == events.EventType.REQUEST_IOS.ui_type:
|
|
event_from = event_from % f"{events.EventType.REQUEST_IOS.table} AS main "
|
|
if not is_any:
|
|
event_where.append(
|
|
sh.multi_conditions(f"main.{events.EventType.REQUEST_IOS.column} {op} %({e_k})s",
|
|
event.value, value_key=e_k))
|
|
elif event_type == events.EventType.ERROR_IOS.ui_type:
|
|
event_from = event_from % f"{events.EventType.ERROR_IOS.table} AS main INNER JOIN public.crashes_ios AS main1 USING(crash_id)"
|
|
if not is_any and event.value not in [None, "*", ""]:
|
|
event_where.append(
|
|
sh.multi_conditions(f"(main1.reason {op} %({e_k})s OR main1.name {op} %({e_k})s)",
|
|
event.value, value_key=e_k))
|
|
elif event_type == schemas.PerformanceEventType.fetch_failed:
|
|
event_from = event_from % f"{events.EventType.REQUEST.table} AS main "
|
|
if not is_any:
|
|
event_where.append(
|
|
sh.multi_conditions(f"main.{events.EventType.REQUEST.column} {op} %({e_k})s",
|
|
event.value, value_key=e_k))
|
|
col = performance_event.get_col(event_type)
|
|
colname = col["column"]
|
|
event_where.append(f"main.{colname} = FALSE")
|
|
# elif event_type == schemas.PerformanceEventType.fetch_duration:
|
|
# event_from = event_from % f"{events.event_type.REQUEST.table} AS main "
|
|
# if not is_any:
|
|
# event_where.append(
|
|
# _multiple_conditions(f"main.{events.event_type.REQUEST.column} {op} %({e_k})s",
|
|
# event.value, value_key=e_k))
|
|
# col = performance_event.get_col(event_type)
|
|
# colname = col["column"]
|
|
# tname = "main"
|
|
# e_k += "_custom"
|
|
# full_args = {**full_args, **_ sh.multiple_values(event.source, value_key=e_k)}
|
|
# event_where.append(f"{tname}.{colname} IS NOT NULL AND {tname}.{colname}>0 AND " +
|
|
# _multiple_conditions(f"{tname}.{colname} {event.sourceOperator} %({e_k})s",
|
|
# event.source, value_key=e_k))
|
|
elif event_type in [schemas.PerformanceEventType.location_dom_complete,
|
|
schemas.PerformanceEventType.location_largest_contentful_paint_time,
|
|
schemas.PerformanceEventType.location_ttfb,
|
|
schemas.PerformanceEventType.location_avg_cpu_load,
|
|
schemas.PerformanceEventType.location_avg_memory_usage
|
|
]:
|
|
event_from = event_from % f"{events.EventType.LOCATION.table} AS main "
|
|
col = performance_event.get_col(event_type)
|
|
colname = col["column"]
|
|
tname = "main"
|
|
if col.get("extraJoin") is not None:
|
|
tname = "ej"
|
|
event_from += f" INNER JOIN {col['extraJoin']} AS {tname} USING(session_id)"
|
|
event_where += [f"{tname}.timestamp >= main.timestamp", f"{tname}.timestamp >= %(startDate)s",
|
|
f"{tname}.timestamp <= %(endDate)s"]
|
|
if not is_any:
|
|
event_where.append(
|
|
sh.multi_conditions(f"main.{events.EventType.LOCATION.column} {op} %({e_k})s",
|
|
event.value, value_key=e_k))
|
|
e_k += "_custom"
|
|
full_args = {**full_args, **sh.multi_values(event.source, value_key=e_k)}
|
|
|
|
event_where.append(f"{tname}.{colname} IS NOT NULL AND {tname}.{colname}>0 AND " +
|
|
sh.multi_conditions(f"{tname}.{colname} {event.sourceOperator.value} %({e_k})s",
|
|
event.source, value_key=e_k))
|
|
elif event_type == schemas.PerformanceEventType.time_between_events:
|
|
event_from = event_from % f"{getattr(events.EventType, event.value[0].type).table} AS main INNER JOIN {getattr(events.EventType, event.value[1].type).table} AS main2 USING(session_id) "
|
|
if not isinstance(event.value[0].value, list):
|
|
event.value[0].value = [event.value[0].value]
|
|
if not isinstance(event.value[1].value, list):
|
|
event.value[1].value = [event.value[1].value]
|
|
event.value[0].value = helper.values_for_operator(value=event.value[0].value,
|
|
op=event.value[0].operator)
|
|
event.value[1].value = helper.values_for_operator(value=event.value[1].value,
|
|
op=event.value[0].operator)
|
|
e_k1 = e_k + "_e1"
|
|
e_k2 = e_k + "_e2"
|
|
full_args = {**full_args,
|
|
**sh.multi_values(event.value[0].value, value_key=e_k1),
|
|
**sh.multi_values(event.value[1].value, value_key=e_k2)}
|
|
s_op = sh.get_sql_operator(event.value[0].operator)
|
|
event_where += ["main2.timestamp >= %(startDate)s", "main2.timestamp <= %(endDate)s"]
|
|
if event_index > 0 and not or_events:
|
|
event_where.append("main2.session_id=event_0.session_id")
|
|
is_any = sh.isAny_opreator(event.value[0].operator)
|
|
if not is_any:
|
|
event_where.append(
|
|
sh.multi_conditions(
|
|
f"main.{getattr(events.EventType, event.value[0].type).column} {s_op} %({e_k1})s",
|
|
event.value[0].value, value_key=e_k1))
|
|
s_op = sh.get_sql_operator(event.value[1].operator)
|
|
is_any = sh.isAny_opreator(event.value[1].operator)
|
|
if not is_any:
|
|
event_where.append(
|
|
sh.multi_conditions(
|
|
f"main2.{getattr(events.EventType, event.value[1].type).column} {s_op} %({e_k2})s",
|
|
event.value[1].value, value_key=e_k2))
|
|
|
|
e_k += "_custom"
|
|
full_args = {**full_args, **sh.multi_values(event.source, value_key=e_k)}
|
|
event_where.append(
|
|
sh.multi_conditions(f"main2.timestamp - main.timestamp {event.sourceOperator.value} %({e_k})s",
|
|
event.source, value_key=e_k))
|
|
|
|
elif event_type == schemas.EventType.request_details:
|
|
event_from = event_from % f"{events.EventType.REQUEST.table} AS main "
|
|
apply = False
|
|
for j, f in enumerate(event.filters):
|
|
is_any = sh.isAny_opreator(f.operator)
|
|
if is_any or len(f.value) == 0:
|
|
continue
|
|
f.value = helper.values_for_operator(value=f.value, op=f.operator)
|
|
op = sh.get_sql_operator(f.operator)
|
|
e_k_f = e_k + f"_fetch{j}"
|
|
full_args = {**full_args, **sh.multi_values(f.value, value_key=e_k_f)}
|
|
if f.type == schemas.FetchFilterType._url:
|
|
event_where.append(
|
|
sh.multi_conditions(f"main.{events.EventType.REQUEST.column} {op} %({e_k_f})s::text",
|
|
f.value, value_key=e_k_f))
|
|
apply = True
|
|
elif f.type == schemas.FetchFilterType._status_code:
|
|
event_where.append(
|
|
sh.multi_conditions(f"main.status_code {f.operator.value} %({e_k_f})s::integer", f.value,
|
|
value_key=e_k_f))
|
|
apply = True
|
|
elif f.type == schemas.FetchFilterType._method:
|
|
event_where.append(
|
|
sh.multi_conditions(f"main.method {op} %({e_k_f})s", f.value, value_key=e_k_f))
|
|
apply = True
|
|
elif f.type == schemas.FetchFilterType._duration:
|
|
event_where.append(
|
|
sh.multi_conditions(f"main.duration {f.operator.value} %({e_k_f})s::integer", f.value,
|
|
value_key=e_k_f))
|
|
apply = True
|
|
elif f.type == schemas.FetchFilterType._request_body:
|
|
event_where.append(
|
|
sh.multi_conditions(f"main.request_body {op} %({e_k_f})s::text", f.value,
|
|
value_key=e_k_f))
|
|
apply = True
|
|
elif f.type == schemas.FetchFilterType._response_body:
|
|
event_where.append(
|
|
sh.multi_conditions(f"main.response_body {op} %({e_k_f})s::text", f.value,
|
|
value_key=e_k_f))
|
|
apply = True
|
|
else:
|
|
print(f"undefined FETCH filter: {f.type}")
|
|
if not apply:
|
|
continue
|
|
elif event_type == schemas.EventType.graphql:
|
|
event_from = event_from % f"{events.EventType.GRAPHQL.table} AS main "
|
|
for j, f in enumerate(event.filters):
|
|
is_any = sh.isAny_opreator(f.operator)
|
|
if is_any or len(f.value) == 0:
|
|
continue
|
|
f.value = helper.values_for_operator(value=f.value, op=f.operator)
|
|
op = sh.get_sql_operator(f.operator)
|
|
e_k_f = e_k + f"_graphql{j}"
|
|
full_args = {**full_args, **sh.multi_values(f.value, value_key=e_k_f)}
|
|
if f.type == schemas.GraphqlFilterType._name:
|
|
event_where.append(
|
|
sh.multi_conditions(f"main.{events.EventType.GRAPHQL.column} {op} %({e_k_f})s", f.value,
|
|
value_key=e_k_f))
|
|
elif f.type == schemas.GraphqlFilterType._method:
|
|
event_where.append(
|
|
sh.multi_conditions(f"main.method {op} %({e_k_f})s", f.value, value_key=e_k_f))
|
|
elif f.type == schemas.GraphqlFilterType._request_body:
|
|
event_where.append(
|
|
sh.multi_conditions(f"main.request_body {op} %({e_k_f})s", f.value, value_key=e_k_f))
|
|
elif f.type == schemas.GraphqlFilterType._response_body:
|
|
event_where.append(
|
|
sh.multi_conditions(f"main.response_body {op} %({e_k_f})s", f.value, value_key=e_k_f))
|
|
else:
|
|
print(f"undefined GRAPHQL filter: {f.type}")
|
|
else:
|
|
continue
|
|
if event_index == 0 or or_events:
|
|
event_where += ss_constraints
|
|
if is_not:
|
|
if event_index == 0 or or_events:
|
|
events_query_from.append(f"""\
|
|
(SELECT
|
|
session_id,
|
|
0 AS timestamp
|
|
FROM sessions
|
|
WHERE EXISTS(SELECT session_id
|
|
FROM {event_from}
|
|
WHERE {" AND ".join(event_where)}
|
|
AND sessions.session_id=ms.session_id) IS FALSE
|
|
AND project_id = %(projectId)s
|
|
AND start_ts >= %(startDate)s
|
|
AND start_ts <= %(endDate)s
|
|
AND duration IS NOT NULL
|
|
) {"" if or_events else (f"AS event_{event_index}" + ("ON(TRUE)" if event_index > 0 else ""))}\
|
|
""")
|
|
else:
|
|
events_query_from.append(f"""\
|
|
(SELECT
|
|
event_0.session_id,
|
|
event_{event_index - 1}.timestamp AS timestamp
|
|
WHERE EXISTS(SELECT session_id FROM {event_from} WHERE {" AND ".join(event_where)}) IS FALSE
|
|
) AS event_{event_index} {"ON(TRUE)" if event_index > 0 else ""}\
|
|
""")
|
|
else:
|
|
events_query_from.append(f"""\
|
|
(SELECT main.session_id, {"MIN" if event_index < (valid_events_count - 1) else "MAX"}(main.timestamp) AS timestamp
|
|
FROM {event_from}
|
|
WHERE {" AND ".join(event_where)}
|
|
GROUP BY 1
|
|
) {"" if or_events else (f"AS event_{event_index} " + ("ON(TRUE)" if event_index > 0 else ""))}\
|
|
""")
|
|
event_index += 1
|
|
if event_index > 0:
|
|
if or_events:
|
|
events_query_part = f"""SELECT
|
|
session_id,
|
|
MIN(timestamp) AS first_event_ts,
|
|
MAX(timestamp) AS last_event_ts
|
|
FROM ({events_joiner.join(events_query_from)}) AS u
|
|
GROUP BY 1"""
|
|
else:
|
|
events_query_part = f"""SELECT
|
|
event_0.session_id,
|
|
MIN(event_0.timestamp) AS first_event_ts,
|
|
MAX(event_{event_index - 1}.timestamp) AS last_event_ts
|
|
FROM {events_joiner.join(events_query_from)}
|
|
GROUP BY 1"""
|
|
else:
|
|
data.events = []
|
|
# ---------------------------------------------------------------------------
|
|
if data.startDate is not None:
|
|
extra_constraints.append("s.start_ts >= %(startDate)s")
|
|
if data.endDate is not None:
|
|
extra_constraints.append("s.start_ts <= %(endDate)s")
|
|
# if data.platform is not None:
|
|
# if data.platform == schemas.PlatformType.mobile:
|
|
# extra_constraints.append(b"s.user_os in ('Android','BlackBerry OS','iOS','Tizen','Windows Phone')")
|
|
# elif data.platform == schemas.PlatformType.desktop:
|
|
# extra_constraints.append(
|
|
# b"s.user_os in ('Chrome OS','Fedora','Firefox OS','Linux','Mac OS X','Ubuntu','Windows')")
|
|
|
|
if errors_only:
|
|
extra_from += f" INNER JOIN {events.EventType.ERROR.table} AS er USING (session_id) INNER JOIN public.errors AS ser USING (error_id)"
|
|
extra_constraints.append("ser.source = 'js_exception'")
|
|
extra_constraints.append("ser.project_id = %(project_id)s")
|
|
# if error_status != schemas.ErrorStatus.all:
|
|
# extra_constraints.append("ser.status = %(error_status)s")
|
|
# full_args["error_status"] = error_status
|
|
# if favorite_only:
|
|
# extra_from += " INNER JOIN public.user_favorite_errors AS ufe USING (error_id)"
|
|
# extra_constraints.append("ufe.user_id = %(userId)s")
|
|
|
|
if favorite_only and not errors_only and user_id is not None:
|
|
extra_from += """INNER JOIN (SELECT user_id, session_id
|
|
FROM public.user_favorite_sessions
|
|
WHERE user_id = %(userId)s) AS favorite_sessions
|
|
USING (session_id)"""
|
|
elif not favorite_only and not errors_only and user_id is not None:
|
|
extra_from += """LEFT JOIN (SELECT user_id, session_id
|
|
FROM public.user_favorite_sessions
|
|
WHERE user_id = %(userId)s) AS favorite_sessions
|
|
USING (session_id)"""
|
|
extra_join = ""
|
|
if issue is not None:
|
|
extra_join = """
|
|
INNER JOIN LATERAL(SELECT TRUE FROM events_common.issues INNER JOIN public.issues AS p_issues USING (issue_id)
|
|
WHERE issues.session_id=f.session_id
|
|
AND p_issues.type=%(issue_type)s
|
|
AND p_issues.context_string=%(issue_contextString)s
|
|
AND timestamp >= f.first_event_ts
|
|
AND timestamp <= f.last_event_ts) AS issues ON(TRUE)
|
|
"""
|
|
full_args["issue_contextString"] = issue["contextString"]
|
|
full_args["issue_type"] = issue["type"]
|
|
if extra_event:
|
|
extra_join += f"""INNER JOIN {extra_event} AS ev USING(session_id)"""
|
|
extra_constraints.append("ev.timestamp>=%(startDate)s")
|
|
extra_constraints.append("ev.timestamp<=%(endDate)s")
|
|
query_part = f"""\
|
|
FROM {f"({events_query_part}) AS f" if len(events_query_part) > 0 else "public.sessions AS s"}
|
|
{extra_join}
|
|
{"INNER JOIN public.sessions AS s USING(session_id)" if len(events_query_part) > 0 else ""}
|
|
{extra_from}
|
|
WHERE
|
|
{" AND ".join(extra_constraints)}"""
|
|
return full_args, query_part
|
|
|
|
|
|
def search_by_metadata(tenant_id, user_id, m_key, m_value, project_id=None):
|
|
if project_id is None:
|
|
all_projects = projects.get_projects(tenant_id=tenant_id)
|
|
else:
|
|
all_projects = [
|
|
projects.get_project(tenant_id=tenant_id, project_id=int(project_id), include_last_session=False,
|
|
include_gdpr=False)]
|
|
|
|
all_projects = {int(p["projectId"]): p["name"] for p in all_projects}
|
|
project_ids = list(all_projects.keys())
|
|
|
|
available_keys = metadata.get_keys_by_projects(project_ids)
|
|
for i in available_keys:
|
|
available_keys[i]["user_id"] = schemas.FilterType.user_id
|
|
available_keys[i]["user_anonymous_id"] = schemas.FilterType.user_anonymous_id
|
|
results = {}
|
|
for i in project_ids:
|
|
if m_key not in available_keys[i].values():
|
|
available_keys.pop(i)
|
|
results[i] = {"total": 0, "sessions": [], "missingMetadata": True}
|
|
project_ids = list(available_keys.keys())
|
|
if len(project_ids) > 0:
|
|
with pg_client.PostgresClient() as cur:
|
|
sub_queries = []
|
|
for i in project_ids:
|
|
col_name = list(available_keys[i].keys())[list(available_keys[i].values()).index(m_key)]
|
|
sub_queries.append(cur.mogrify(
|
|
f"(SELECT COALESCE(COUNT(s.*)) AS count FROM public.sessions AS s WHERE s.project_id = %(id)s AND s.{col_name} = %(value)s) AS \"{i}\"",
|
|
{"id": i, "value": m_value}).decode('UTF-8'))
|
|
query = f"""SELECT {", ".join(sub_queries)};"""
|
|
cur.execute(query=query)
|
|
|
|
rows = cur.fetchone()
|
|
|
|
sub_queries = []
|
|
for i in rows.keys():
|
|
results[i] = {"total": rows[i], "sessions": [], "missingMetadata": False, "name": all_projects[int(i)]}
|
|
if rows[i] > 0:
|
|
col_name = list(available_keys[int(i)].keys())[list(available_keys[int(i)].values()).index(m_key)]
|
|
sub_queries.append(
|
|
cur.mogrify(
|
|
f"""(
|
|
SELECT *
|
|
FROM (
|
|
SELECT DISTINCT ON(favorite_sessions.session_id, s.session_id) {SESSION_PROJECTION_COLS}
|
|
FROM public.sessions AS s LEFT JOIN (SELECT session_id
|
|
FROM public.user_favorite_sessions
|
|
WHERE user_favorite_sessions.user_id = %(userId)s
|
|
) AS favorite_sessions USING (session_id)
|
|
WHERE s.project_id = %(id)s AND s.duration IS NOT NULL AND s.{col_name} = %(value)s
|
|
) AS full_sessions
|
|
ORDER BY favorite DESC, issue_score DESC
|
|
LIMIT 10
|
|
)""",
|
|
{"id": i, "value": m_value, "userId": user_id}).decode('UTF-8'))
|
|
if len(sub_queries) > 0:
|
|
cur.execute("\nUNION\n".join(sub_queries))
|
|
rows = cur.fetchall()
|
|
for i in rows:
|
|
results[str(i["project_id"])]["sessions"].append(helper.dict_to_camel_case(i))
|
|
return results
|
|
|
|
|
|
def get_user_sessions(project_id, user_id, start_date, end_date):
|
|
with pg_client.PostgresClient() as cur:
|
|
constraints = ["s.project_id = %(projectId)s", "s.user_id = %(userId)s"]
|
|
if start_date is not None:
|
|
constraints.append("s.start_ts >= %(startDate)s")
|
|
if end_date is not None:
|
|
constraints.append("s.start_ts <= %(endDate)s")
|
|
|
|
query_part = f"""\
|
|
FROM public.sessions AS s
|
|
WHERE {" AND ".join(constraints)}"""
|
|
|
|
cur.execute(cur.mogrify(f"""\
|
|
SELECT s.project_id,
|
|
s.session_id::text AS session_id,
|
|
s.user_uuid,
|
|
s.user_id,
|
|
s.user_os,
|
|
s.user_browser,
|
|
s.user_device,
|
|
s.user_country,
|
|
s.start_ts,
|
|
s.duration,
|
|
s.events_count,
|
|
s.pages_count,
|
|
s.errors_count
|
|
{query_part}
|
|
ORDER BY s.session_id
|
|
LIMIT 50;""", {
|
|
"projectId": project_id,
|
|
"userId": user_id,
|
|
"startDate": start_date,
|
|
"endDate": end_date
|
|
}))
|
|
|
|
sessions = cur.fetchall()
|
|
return helper.list_to_camel_case(sessions)
|
|
|
|
|
|
def get_session_user(project_id, user_id):
|
|
with pg_client.PostgresClient() as cur:
|
|
query = cur.mogrify(
|
|
"""\
|
|
SELECT
|
|
user_id,
|
|
count(*) as session_count,
|
|
max(start_ts) as last_seen,
|
|
min(start_ts) as first_seen
|
|
FROM
|
|
"public".sessions
|
|
WHERE
|
|
project_id = %(project_id)s
|
|
AND user_id = %(userId)s
|
|
AND duration is not null
|
|
GROUP BY user_id;
|
|
""",
|
|
{"project_id": project_id, "userId": user_id}
|
|
)
|
|
cur.execute(query=query)
|
|
data = cur.fetchone()
|
|
return helper.dict_to_camel_case(data)
|
|
|
|
|
|
def count_all():
|
|
with pg_client.PostgresClient(unlimited_query=True) as cur:
|
|
cur.execute(query="SELECT COUNT(session_id) AS count FROM public.sessions")
|
|
row = cur.fetchone()
|
|
return row.get("count", 0) if row else 0
|
|
|
|
|
|
def session_exists(project_id, session_id):
|
|
with pg_client.PostgresClient() as cur:
|
|
query = cur.mogrify("""SELECT 1
|
|
FROM public.sessions
|
|
WHERE session_id=%(session_id)s
|
|
AND project_id=%(project_id)s
|
|
LIMIT 1;""",
|
|
{"project_id": project_id, "session_id": session_id})
|
|
cur.execute(query)
|
|
row = cur.fetchone()
|
|
return row is not None
|