Merge branch 'dev' into live-se-red
This commit is contained in:
commit
39e05faab4
60 changed files with 842 additions and 1345 deletions
17
api/Pipfile
17
api/Pipfile
|
|
@ -4,20 +4,23 @@ verify_ssl = true
|
|||
name = "pypi"
|
||||
|
||||
[packages]
|
||||
sqlparse = "==0.5.2"
|
||||
urllib3 = "==1.26.16"
|
||||
requests = "==2.32.3"
|
||||
boto3 = "==1.35.60"
|
||||
pyjwt = "==2.9.0"
|
||||
boto3 = "==1.35.76"
|
||||
pyjwt = "==2.10.1"
|
||||
psycopg2-binary = "==2.9.10"
|
||||
psycopg = {extras = ["pool", "binary"], version = "==3.2.3"}
|
||||
psycopg = {extras = ["binary", "pool"], version = "==3.2.3"}
|
||||
clickhouse-driver = {extras = ["lz4"], version = "==0.2.9"}
|
||||
clickhouse-connect = "==0.8.9"
|
||||
elasticsearch = "==8.16.0"
|
||||
jira = "==3.8.0"
|
||||
cachetools = "==5.5.0"
|
||||
fastapi = "==0.115.5"
|
||||
uvicorn = {extras = ["standard"], version = "==0.32.0"}
|
||||
fastapi = "==0.115.6"
|
||||
uvicorn = {extras = ["standard"], version = "==0.32.1"}
|
||||
python-decouple = "==3.8"
|
||||
pydantic = {extras = ["email"], version = "==2.9.2"}
|
||||
apscheduler = "==3.10.4"
|
||||
pydantic = {extras = ["email"], version = "==2.10.3"}
|
||||
apscheduler = "==3.11.0"
|
||||
redis = "==5.2.0"
|
||||
|
||||
[dev-packages]
|
||||
|
|
|
|||
10
api/app.py
10
api/app.py
|
|
@ -13,17 +13,16 @@ from psycopg.rows import dict_row
|
|||
from starlette.responses import StreamingResponse
|
||||
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils import pg_client, ch_client
|
||||
from crons import core_crons, core_dynamic_crons
|
||||
from routers import core, core_dynamic
|
||||
from routers.subs import insights, metrics, v1_api, health, usability_tests, spot
|
||||
from routers.subs import insights, metrics, v1_api, health, usability_tests, spot, product_anaytics
|
||||
|
||||
loglevel = config("LOGLEVEL", default=logging.WARNING)
|
||||
print(f">Loglevel set to: {loglevel}")
|
||||
logging.basicConfig(level=loglevel)
|
||||
|
||||
|
||||
|
||||
class ORPYAsyncConnection(AsyncConnection):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
|
|
@ -39,6 +38,7 @@ async def lifespan(app: FastAPI):
|
|||
|
||||
app.schedule = AsyncIOScheduler()
|
||||
await pg_client.init()
|
||||
await ch_client.init()
|
||||
app.schedule.start()
|
||||
|
||||
for job in core_crons.cron_jobs + core_dynamic_crons.cron_jobs:
|
||||
|
|
@ -128,3 +128,7 @@ app.include_router(usability_tests.app_apikey)
|
|||
app.include_router(spot.public_app)
|
||||
app.include_router(spot.app)
|
||||
app.include_router(spot.app_apikey)
|
||||
|
||||
app.include_router(product_anaytics.public_app)
|
||||
app.include_router(product_anaytics.app)
|
||||
app.include_router(product_anaytics.app_apikey)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,10 @@
|
|||
from decouple import config
|
||||
import logging
|
||||
|
||||
logging.basicConfig(level=config("LOGLEVEL", default=logging.INFO))
|
||||
|
||||
if config("EXP_CH_LAYER", cast=bool, default=True):
|
||||
from . import metrics_ch as metrics
|
||||
from . import metrics as metrics_legacy
|
||||
else:
|
||||
from . import metrics
|
||||
|
|
@ -173,7 +173,6 @@ def get_chart(project: schemas.ProjectContext, data: schemas.CardSchema, user_id
|
|||
schemas.MetricType.TABLE: __get_table_chart,
|
||||
schemas.MetricType.HEAT_MAP: __get_heat_map_chart,
|
||||
schemas.MetricType.FUNNEL: __get_funnel_chart,
|
||||
schemas.MetricType.INSIGHTS: not_supported,
|
||||
schemas.MetricType.PATH_ANALYSIS: __get_path_analysis_chart
|
||||
}
|
||||
return supported.get(data.metric_type, not_supported)(project=project, data=data, user_id=user_id)
|
||||
|
|
@ -220,7 +219,6 @@ def get_issues(project: schemas.ProjectContext, user_id: int, data: schemas.Card
|
|||
schemas.MetricType.TIMESERIES: not_supported,
|
||||
schemas.MetricType.TABLE: not_supported,
|
||||
schemas.MetricType.HEAT_MAP: not_supported,
|
||||
schemas.MetricType.INSIGHTS: not_supported,
|
||||
schemas.MetricType.PATH_ANALYSIS: not_supported,
|
||||
}
|
||||
return supported.get(data.metric_type, not_supported)()
|
||||
|
|
|
|||
|
|
@ -168,7 +168,7 @@ def get_processed_sessions(project_id, startTimestamp=TimeUTC.now(delta_days=-1)
|
|||
params = {"step_size": step_size, "project_id": project_id, "startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args)}
|
||||
|
||||
rows = ch.execute(query=ch_query, params=params)
|
||||
rows = ch.execute(query=ch_query, parameters=params)
|
||||
|
||||
results = {
|
||||
"value": sum([r["value"] for r in rows]),
|
||||
|
|
@ -187,7 +187,7 @@ def get_processed_sessions(project_id, startTimestamp=TimeUTC.now(delta_days=-1)
|
|||
params = {"project_id": project_id, "startTimestamp": startTimestamp, "endTimestamp": endTimestamp,
|
||||
**__get_constraint_values(args)}
|
||||
|
||||
count = ch.execute(query=ch_query, params=params)
|
||||
count = ch.execute(query=ch_query, parameters=params)
|
||||
|
||||
count = count[0]["count"]
|
||||
|
||||
|
|
@ -234,7 +234,7 @@ def __get_domains_errors_4xx_and_5xx(status, project_id, startTimestamp=TimeUTC.
|
|||
"endTimestamp": endTimestamp,
|
||||
"step_size": step_size,
|
||||
"status_code": status, **__get_constraint_values(args)}
|
||||
rows = ch.execute(query=ch_query, params=params)
|
||||
rows = ch.execute(query=ch_query, parameters=params)
|
||||
rows = __nested_array_to_dict_array(rows)
|
||||
neutral = __get_domains_errors_neutral(rows)
|
||||
rows = __merge_rows_with_neutral(rows, neutral)
|
||||
|
|
@ -289,9 +289,9 @@ def get_errors_per_domains(project_id, limit, page, startTimestamp=TimeUTC.now(d
|
|||
ORDER BY errors_count DESC
|
||||
LIMIT %(limit)s OFFSET %(limit_s)s;"""
|
||||
logger.debug("-----------")
|
||||
logger.debug(ch.format(query=ch_query, params=params))
|
||||
logger.debug(ch.format(query=ch_query, parameters=params))
|
||||
logger.debug("-----------")
|
||||
rows = ch.execute(query=ch_query, params=params)
|
||||
rows = ch.execute(query=ch_query, parameters=params)
|
||||
response = {"count": 0, "total": 0, "values": []}
|
||||
if len(rows) > 0:
|
||||
response["count"] = rows[0]["count"]
|
||||
|
|
@ -328,8 +328,7 @@ def get_errors_per_type(project_id, startTimestamp=TimeUTC.now(delta_days=-1), e
|
|||
"project_id": project_id,
|
||||
"startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args)}
|
||||
# print(ch.format(query=ch_query, params=params))
|
||||
rows = ch.execute(query=ch_query, params=params)
|
||||
rows = ch.execute(query=ch_query, parameters=params)
|
||||
rows = helper.list_to_camel_case(rows)
|
||||
|
||||
return __complete_missing_steps(rows=rows, start_time=startTimestamp,
|
||||
|
|
@ -416,8 +415,7 @@ def get_resources_by_party(project_id, startTimestamp=TimeUTC.now(delta_days=-1)
|
|||
"project_id": project_id,
|
||||
"startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args)}
|
||||
# print(ch.format(query=ch_query, params=params))
|
||||
rows = ch.execute(query=ch_query, params=params)
|
||||
rows = ch.execute(query=ch_query, parameters=params)
|
||||
return helper.list_to_camel_case(__complete_missing_steps(rows=rows, start_time=startTimestamp,
|
||||
end_time=endTimestamp,
|
||||
density=density,
|
||||
|
|
@ -466,7 +464,7 @@ def __get_user_activity_avg_visited_pages(ch, project_id, startTimestamp, endTim
|
|||
params = {"project_id": project_id, "startTimestamp": startTimestamp, "endTimestamp": endTimestamp,
|
||||
**__get_constraint_values(args)}
|
||||
|
||||
rows = ch.execute(query=ch_query, params=params)
|
||||
rows = ch.execute(query=ch_query, parameters=params)
|
||||
|
||||
return rows
|
||||
|
||||
|
|
@ -490,7 +488,7 @@ def __get_user_activity_avg_visited_pages_chart(ch, project_id, startTimestamp,
|
|||
WHERE count>0
|
||||
GROUP BY timestamp
|
||||
ORDER BY timestamp;"""
|
||||
rows = ch.execute(query=ch_query, params=params)
|
||||
rows = ch.execute(query=ch_query, parameters=params)
|
||||
rows = __complete_missing_steps(rows=rows, start_time=startTimestamp,
|
||||
end_time=endTimestamp,
|
||||
density=density, neutral={"value": 0})
|
||||
|
|
@ -519,7 +517,7 @@ def get_top_metrics_count_requests(project_id, startTimestamp=TimeUTC.now(delta_
|
|||
"startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp,
|
||||
"value": value, **__get_constraint_values(args)}
|
||||
rows = ch.execute(query=ch_query, params=params)
|
||||
rows = ch.execute(query=ch_query, parameters=params)
|
||||
result = rows[0]
|
||||
ch_query = f"""SELECT toUnixTimestamp(toStartOfInterval(pages.datetime, INTERVAL %(step_size)s second ))*1000 AS timestamp,
|
||||
COUNT(1) AS value
|
||||
|
|
@ -527,7 +525,8 @@ def get_top_metrics_count_requests(project_id, startTimestamp=TimeUTC.now(delta_
|
|||
WHERE {" AND ".join(ch_sub_query_chart)}
|
||||
GROUP BY timestamp
|
||||
ORDER BY timestamp;"""
|
||||
rows = ch.execute(query=ch_query, params={**params, **__get_constraint_values(args)})
|
||||
params = {**params, **__get_constraint_values(args)}
|
||||
rows = ch.execute(query=ch_query, parameters=params)
|
||||
rows = __complete_missing_steps(rows=rows, start_time=startTimestamp,
|
||||
end_time=endTimestamp,
|
||||
density=density, neutral={"value": 0})
|
||||
|
|
@ -559,7 +558,7 @@ def get_unique_users(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
|||
params = {"step_size": step_size, "project_id": project_id, "startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args)}
|
||||
|
||||
rows = ch.execute(query=ch_query, params=params)
|
||||
rows = ch.execute(query=ch_query, parameters=params)
|
||||
|
||||
results = {
|
||||
"value": sum([r["value"] for r in rows]),
|
||||
|
|
@ -578,7 +577,7 @@ def get_unique_users(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
|||
params = {"project_id": project_id, "startTimestamp": startTimestamp, "endTimestamp": endTimestamp,
|
||||
**__get_constraint_values(args)}
|
||||
|
||||
count = ch.execute(query=ch_query, params=params)
|
||||
count = ch.execute(query=ch_query, parameters=params)
|
||||
|
||||
count = count[0]["count"]
|
||||
|
||||
|
|
@ -606,9 +605,9 @@ def get_speed_index_location(project_id, startTimestamp=TimeUTC.now(delta_days=-
|
|||
params = {"project_id": project_id,
|
||||
"startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp, **__get_constraint_values(args)}
|
||||
rows = ch.execute(query=ch_query, params=params)
|
||||
rows = ch.execute(query=ch_query, parameters=params)
|
||||
ch_query = f"""SELECT COALESCE(avgOrNull(pages.speed_index),0) AS avg
|
||||
FROM {exp_ch_helper.get_main_events_table(startTimestamp)} AS pages
|
||||
WHERE {" AND ".join(ch_sub_query)};"""
|
||||
avg = ch.execute(query=ch_query, params=params)[0]["avg"] if len(rows) > 0 else 0
|
||||
avg = ch.execute(query=ch_query, parameters=params)[0]["avg"] if len(rows) > 0 else 0
|
||||
return {"value": avg, "chart": helper.list_to_camel_case(rows), "unit": schemas.TemplatePredefinedUnits.MILLISECOND}
|
||||
14
api/chalicelib/core/product_anaytics2.py
Normal file
14
api/chalicelib/core/product_anaytics2.py
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
from chalicelib.utils.ch_client import ClickHouseClient
|
||||
|
||||
|
||||
def search_events(project_id: int, data: dict):
|
||||
with ClickHouseClient() as ch_client:
|
||||
r = ch_client.format(
|
||||
"""SELECT *
|
||||
FROM taha.events
|
||||
WHERE project_id=%(project_id)s
|
||||
ORDER BY created_at;""",
|
||||
params={"project_id": project_id})
|
||||
x = ch_client.execute(r)
|
||||
|
||||
return x
|
||||
|
|
@ -129,13 +129,13 @@ def add_edit(tenant_id, data: schemas.WebhookSchema, replace_none=None):
|
|||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=f"name already exists.")
|
||||
if data.webhook_id is not None:
|
||||
return update(tenant_id=tenant_id, webhook_id=data.webhook_id,
|
||||
changes={"endpoint": data.endpoint.unicode_string(),
|
||||
changes={"endpoint": data.endpoint,
|
||||
"authHeader": data.auth_header,
|
||||
"name": data.name},
|
||||
replace_none=replace_none)
|
||||
else:
|
||||
return add(tenant_id=tenant_id,
|
||||
endpoint=data.endpoint.unicode_string(),
|
||||
endpoint=data.endpoint,
|
||||
auth_header=data.auth_header,
|
||||
name=data.name,
|
||||
replace_none=replace_none)
|
||||
|
|
|
|||
|
|
@ -11,3 +11,9 @@ if smtp.has_smtp():
|
|||
logger.info("valid SMTP configuration found")
|
||||
else:
|
||||
logger.info("no SMTP configuration found or SMTP validation failed")
|
||||
|
||||
if config("EXP_CH_DRIVER", cast=bool, default=True):
|
||||
logging.info(">>> Using new CH driver")
|
||||
from . import ch_client_exp as ch_client
|
||||
else:
|
||||
from . import ch_client
|
||||
|
|
|
|||
|
|
@ -3,15 +3,15 @@ import logging
|
|||
import clickhouse_driver
|
||||
from decouple import config
|
||||
|
||||
logging.basicConfig(level=config("LOGLEVEL", default=logging.INFO))
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
settings = {}
|
||||
if config('ch_timeout', cast=int, default=-1) > 0:
|
||||
logging.info(f"CH-max_execution_time set to {config('ch_timeout')}s")
|
||||
logger.info(f"CH-max_execution_time set to {config('ch_timeout')}s")
|
||||
settings = {**settings, "max_execution_time": config('ch_timeout', cast=int)}
|
||||
|
||||
if config('ch_receive_timeout', cast=int, default=-1) > 0:
|
||||
logging.info(f"CH-receive_timeout set to {config('ch_receive_timeout')}s")
|
||||
logger.info(f"CH-receive_timeout set to {config('ch_receive_timeout')}s")
|
||||
settings = {**settings, "receive_timeout": config('ch_receive_timeout', cast=int)}
|
||||
|
||||
|
||||
|
|
@ -35,20 +35,20 @@ class ClickHouseClient:
|
|||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def execute(self, query, params=None, **args):
|
||||
def execute(self, query, parameters=None, **args):
|
||||
try:
|
||||
results = self.__client.execute(query=query, params=params, with_column_types=True, **args)
|
||||
results = self.__client.execute(query=query, params=parameters, with_column_types=True, **args)
|
||||
keys = tuple(x for x, y in results[1])
|
||||
return [dict(zip(keys, i)) for i in results[0]]
|
||||
except Exception as err:
|
||||
logging.error("--------- CH EXCEPTION -----------")
|
||||
logging.error(err)
|
||||
logging.error("--------- CH QUERY EXCEPTION -----------")
|
||||
logging.error(self.format(query=query, params=params)
|
||||
.replace('\n', '\\n')
|
||||
.replace(' ', ' ')
|
||||
.replace(' ', ' '))
|
||||
logging.error("--------------------")
|
||||
logger.error("--------- CH EXCEPTION -----------")
|
||||
logger.error(err)
|
||||
logger.error("--------- CH QUERY EXCEPTION -----------")
|
||||
logger.error(self.format(query=query, parameters=parameters)
|
||||
.replace('\n', '\\n')
|
||||
.replace(' ', ' ')
|
||||
.replace(' ', ' '))
|
||||
logger.error("--------------------")
|
||||
raise err
|
||||
|
||||
def insert(self, query, params=None, **args):
|
||||
|
|
@ -57,10 +57,18 @@ class ClickHouseClient:
|
|||
def client(self):
|
||||
return self.__client
|
||||
|
||||
def format(self, query, params):
|
||||
if params is None:
|
||||
def format(self, query, parameters):
|
||||
if parameters is None:
|
||||
return query
|
||||
return self.__client.substitute_params(query, params, self.__client.connection.context)
|
||||
return self.__client.substitute_params(query, parameters, self.__client.connection.context)
|
||||
|
||||
def __exit__(self, *args):
|
||||
pass
|
||||
|
||||
|
||||
async def init():
|
||||
logger.info(f">CH_POOL:not defined")
|
||||
|
||||
|
||||
async def terminate():
|
||||
pass
|
||||
179
api/chalicelib/utils/ch_client_exp.py
Normal file
179
api/chalicelib/utils/ch_client_exp.py
Normal file
|
|
@ -0,0 +1,179 @@
|
|||
import logging
|
||||
import threading
|
||||
import time
|
||||
from functools import wraps
|
||||
from queue import Queue
|
||||
|
||||
import clickhouse_connect
|
||||
from clickhouse_connect.driver.query import QueryContext
|
||||
from decouple import config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
settings = {}
|
||||
if config('ch_timeout', cast=int, default=-1) > 0:
|
||||
logging.info(f"CH-max_execution_time set to {config('ch_timeout')}s")
|
||||
settings = {**settings, "max_execution_time": config('ch_timeout', cast=int)}
|
||||
|
||||
if config('ch_receive_timeout', cast=int, default=-1) > 0:
|
||||
logging.info(f"CH-receive_timeout set to {config('ch_receive_timeout')}s")
|
||||
settings = {**settings, "receive_timeout": config('ch_receive_timeout', cast=int)}
|
||||
|
||||
extra_args = {}
|
||||
if config("CH_COMPRESSION", cast=bool, default=True):
|
||||
extra_args["compression"] = "lz4"
|
||||
|
||||
|
||||
def transform_result(original_function):
|
||||
@wraps(original_function)
|
||||
def wrapper(*args, **kwargs):
|
||||
result = original_function(*args, **kwargs)
|
||||
if isinstance(result, clickhouse_connect.driver.query.QueryResult):
|
||||
column_names = result.column_names
|
||||
result = result.result_rows
|
||||
result = [dict(zip(column_names, row)) for row in result]
|
||||
|
||||
return result
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class ClickHouseConnectionPool:
|
||||
def __init__(self, min_size, max_size, settings):
|
||||
self.min_size = min_size
|
||||
self.max_size = max_size
|
||||
self.pool = Queue()
|
||||
self.lock = threading.Lock()
|
||||
self.total_connections = 0
|
||||
self.settings = settings
|
||||
|
||||
# Initialize the pool with min_size connections
|
||||
for _ in range(self.min_size):
|
||||
client = clickhouse_connect.get_client(host=config("ch_host"),
|
||||
database=config("ch_database", default="default"),
|
||||
user=config("ch_user", default="default"),
|
||||
password=config("ch_password", default=""),
|
||||
port=config("ch_port_http", cast=int),
|
||||
settings=settings,
|
||||
**extra_args)
|
||||
self.pool.put(client)
|
||||
self.total_connections += 1
|
||||
|
||||
def get_connection(self):
|
||||
try:
|
||||
# Try to get a connection without blocking
|
||||
client = self.pool.get_nowait()
|
||||
return client
|
||||
except Empty:
|
||||
with self.lock:
|
||||
if self.total_connections < self.max_size:
|
||||
client = clickhouse_connect.get_client(
|
||||
host=config("ch_host"),
|
||||
database=config("ch_database", default="default"),
|
||||
user=config("ch_user", default="default"),
|
||||
password=config("ch_password", default=""),
|
||||
port=config("ch_port_http", cast=int),
|
||||
settings=settings,
|
||||
**extra_args
|
||||
)
|
||||
self.total_connections += 1
|
||||
return client
|
||||
# If max_size reached, wait until a connection is available
|
||||
client = self.pool.get()
|
||||
return client
|
||||
|
||||
def release_connection(self, client):
|
||||
self.pool.put(client)
|
||||
|
||||
def close_all(self):
|
||||
with self.lock:
|
||||
while not self.pool.empty():
|
||||
client = self.pool.get()
|
||||
client.close()
|
||||
self.total_connections = 0
|
||||
|
||||
|
||||
CH_pool: ClickHouseConnectionPool = None
|
||||
|
||||
RETRY_MAX = config("CH_RETRY_MAX", cast=int, default=50)
|
||||
RETRY_INTERVAL = config("CH_RETRY_INTERVAL", cast=int, default=2)
|
||||
RETRY = 0
|
||||
|
||||
|
||||
def make_pool():
|
||||
if not config('CH_POOL', cast=bool, default=True):
|
||||
return
|
||||
global CH_pool
|
||||
global RETRY
|
||||
if CH_pool is not None:
|
||||
try:
|
||||
CH_pool.close_all()
|
||||
except Exception as error:
|
||||
logger.error("Error while closing all connexions to CH", error)
|
||||
try:
|
||||
CH_pool = ClickHouseConnectionPool(min_size=config("PG_MINCONN", cast=int, default=4),
|
||||
max_size=config("PG_MAXCONN", cast=int, default=8),
|
||||
settings=settings)
|
||||
if CH_pool is not None:
|
||||
logger.info("Connection pool created successfully for CH")
|
||||
except Exception as error:
|
||||
logger.error("Error while connecting to CH", error)
|
||||
if RETRY < RETRY_MAX:
|
||||
RETRY += 1
|
||||
logger.info(f"waiting for {RETRY_INTERVAL}s before retry n°{RETRY}")
|
||||
time.sleep(RETRY_INTERVAL)
|
||||
make_pool()
|
||||
else:
|
||||
raise error
|
||||
|
||||
|
||||
class ClickHouseClient:
|
||||
__client = None
|
||||
|
||||
def __init__(self, database=None):
|
||||
if self.__client is None:
|
||||
if config('CH_POOL', cast=bool, default=True):
|
||||
self.__client = CH_pool.get_connection()
|
||||
else:
|
||||
self.__client = clickhouse_connect.get_client(host=config("ch_host"),
|
||||
database=database if database else config("ch_database",
|
||||
default="default"),
|
||||
user=config("ch_user", default="default"),
|
||||
password=config("ch_password", default=""),
|
||||
port=config("ch_port_http", cast=int),
|
||||
settings=settings,
|
||||
**extra_args)
|
||||
self.__client.execute = transform_result(self.__client.query)
|
||||
self.__client.format = self.format
|
||||
|
||||
def __enter__(self):
|
||||
return self.__client
|
||||
|
||||
def format(self, query, *, parameters=None):
|
||||
if parameters is None:
|
||||
return query
|
||||
return query % {
|
||||
key: f"'{value}'" if isinstance(value, str) else value
|
||||
for key, value in parameters.items()
|
||||
}
|
||||
|
||||
def __exit__(self, *args):
|
||||
if config('CH_POOL', cast=bool, default=True):
|
||||
CH_pool.release_connection(self.__client)
|
||||
else:
|
||||
self.__client.close()
|
||||
|
||||
|
||||
async def init():
|
||||
logger.info(f">CH_POOL:{config('CH_POOL', default=None)}")
|
||||
if config('CH_POOL', cast=bool, default=True):
|
||||
make_pool()
|
||||
|
||||
|
||||
async def terminate():
|
||||
global CH_pool
|
||||
if CH_pool is not None:
|
||||
try:
|
||||
CH_pool.close_all()
|
||||
logger.info("Closed all connexions to CH")
|
||||
except Exception as error:
|
||||
logger.error("Error while closing all connexions to CH", error)
|
||||
68
api/chalicelib/utils/exp_ch_helper.py
Normal file
68
api/chalicelib/utils/exp_ch_helper.py
Normal file
|
|
@ -0,0 +1,68 @@
|
|||
from typing import Union
|
||||
|
||||
import schemas
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_main_events_table(timestamp=0, platform="web"):
|
||||
if platform == "web":
|
||||
return "experimental.events"
|
||||
else:
|
||||
return "experimental.ios_events"
|
||||
|
||||
|
||||
def get_main_sessions_table(timestamp=0):
|
||||
return "experimental.sessions"
|
||||
|
||||
|
||||
def get_user_favorite_sessions_table(timestamp=0):
|
||||
return "experimental.user_favorite_sessions"
|
||||
|
||||
|
||||
def get_user_viewed_sessions_table(timestamp=0):
|
||||
return "experimental.user_viewed_sessions"
|
||||
|
||||
|
||||
def get_user_viewed_errors_table(timestamp=0):
|
||||
return "experimental.user_viewed_errors"
|
||||
|
||||
|
||||
def get_main_js_errors_sessions_table(timestamp=0):
|
||||
return get_main_events_table(timestamp=timestamp)
|
||||
|
||||
|
||||
def get_event_type(event_type: Union[schemas.EventType, schemas.PerformanceEventType], platform="web"):
|
||||
defs = {
|
||||
schemas.EventType.CLICK: "CLICK",
|
||||
schemas.EventType.INPUT: "INPUT",
|
||||
schemas.EventType.LOCATION: "LOCATION",
|
||||
schemas.PerformanceEventType.LOCATION_DOM_COMPLETE: "LOCATION",
|
||||
schemas.PerformanceEventType.LOCATION_LARGEST_CONTENTFUL_PAINT_TIME: "LOCATION",
|
||||
schemas.PerformanceEventType.LOCATION_TTFB: "LOCATION",
|
||||
schemas.EventType.CUSTOM: "CUSTOM",
|
||||
schemas.EventType.REQUEST: "REQUEST",
|
||||
schemas.EventType.REQUEST_DETAILS: "REQUEST",
|
||||
schemas.PerformanceEventType.FETCH_FAILED: "REQUEST",
|
||||
schemas.GraphqlFilterType.GRAPHQL_NAME: "GRAPHQL",
|
||||
schemas.EventType.STATE_ACTION: "STATEACTION",
|
||||
schemas.EventType.ERROR: "ERROR",
|
||||
schemas.PerformanceEventType.LOCATION_AVG_CPU_LOAD: 'PERFORMANCE',
|
||||
schemas.PerformanceEventType.LOCATION_AVG_MEMORY_USAGE: 'PERFORMANCE',
|
||||
schemas.FetchFilterType.FETCH_URL: 'REQUEST'
|
||||
}
|
||||
defs_mobile = {
|
||||
schemas.EventType.CLICK_MOBILE: "TAP",
|
||||
schemas.EventType.INPUT_MOBILE: "INPUT",
|
||||
schemas.EventType.CUSTOM_MOBILE: "CUSTOM",
|
||||
schemas.EventType.REQUEST_MOBILE: "REQUEST",
|
||||
schemas.EventType.ERROR_MOBILE: "CRASH",
|
||||
schemas.EventType.VIEW_MOBILE: "VIEW",
|
||||
schemas.EventType.SWIPE_MOBILE: "SWIPE"
|
||||
}
|
||||
if platform != "web" and event_type in defs_mobile:
|
||||
return defs_mobile.get(event_type)
|
||||
if event_type not in defs:
|
||||
raise Exception(f"unsupported EventType:{event_type}")
|
||||
return defs.get(event_type)
|
||||
|
|
@ -8,6 +8,12 @@ assistList=/sockets-list
|
|||
CANVAS_PATTERN=%(sessionId)s/%(recordingId)s.tar.zst
|
||||
captcha_key=
|
||||
captcha_server=
|
||||
CH_COMPRESSION=true
|
||||
ch_host=
|
||||
ch_port=
|
||||
ch_port_http=
|
||||
ch_receive_timeout=10
|
||||
ch_timeout=30
|
||||
change_password_link=/reset-password?invitation=%s&&pass=%s
|
||||
DEVTOOLS_MOB_PATTERN=%(sessionId)s/devtools.mob
|
||||
EFS_DEVTOOLS_MOB_PATTERN=%(sessionId)sdevtools
|
||||
|
|
@ -63,4 +69,6 @@ SITE_URL=
|
|||
sourcemaps_bucket=sourcemaps
|
||||
sourcemaps_reader=http://sourcemapreader-openreplay.app.svc.cluster.local:9000/sourcemaps/{}/sourcemaps
|
||||
STAGE=default-foss
|
||||
TZ=UTC
|
||||
TZ=UTC
|
||||
EXP_CH_DRIVER=true
|
||||
EXP_CH_LAYER=true
|
||||
|
|
@ -1,18 +1,20 @@
|
|||
# Keep this version to not have conflicts between requests and boto3
|
||||
urllib3==1.26.16
|
||||
requests==2.32.3
|
||||
boto3==1.35.60
|
||||
pyjwt==2.9.0
|
||||
boto3==1.35.76
|
||||
pyjwt==2.10.1
|
||||
psycopg2-binary==2.9.10
|
||||
psycopg[pool,binary]==3.2.3
|
||||
clickhouse-driver[lz4]==0.2.9
|
||||
clickhouse-connect==0.8.9
|
||||
elasticsearch==8.16.0
|
||||
jira==3.8.0
|
||||
cachetools==5.5.0
|
||||
|
||||
|
||||
|
||||
fastapi==0.115.5
|
||||
uvicorn[standard]==0.32.0
|
||||
fastapi==0.115.6
|
||||
uvicorn[standard]==0.32.1
|
||||
python-decouple==3.8
|
||||
pydantic[email]==2.9.2
|
||||
apscheduler==3.10.4
|
||||
pydantic[email]==2.10.3
|
||||
apscheduler==3.11.0
|
||||
|
|
|
|||
|
|
@ -1,20 +1,22 @@
|
|||
# Keep this version to not have conflicts between requests and boto3
|
||||
urllib3==1.26.16
|
||||
requests==2.32.3
|
||||
boto3==1.35.60
|
||||
pyjwt==2.9.0
|
||||
boto3==1.35.76
|
||||
pyjwt==2.10.1
|
||||
psycopg2-binary==2.9.10
|
||||
psycopg[pool,binary]==3.2.3
|
||||
clickhouse-driver[lz4]==0.2.9
|
||||
clickhouse-connect==0.8.9
|
||||
elasticsearch==8.16.0
|
||||
jira==3.8.0
|
||||
cachetools==5.5.0
|
||||
|
||||
|
||||
|
||||
fastapi==0.115.5
|
||||
uvicorn[standard]==0.32.0
|
||||
fastapi==0.115.6
|
||||
uvicorn[standard]==0.32.1
|
||||
python-decouple==3.8
|
||||
pydantic[email]==2.9.2
|
||||
apscheduler==3.10.4
|
||||
pydantic[email]==2.10.3
|
||||
apscheduler==3.11.0
|
||||
|
||||
redis==5.2.0
|
||||
|
|
|
|||
17
api/routers/subs/product_anaytics.py
Normal file
17
api/routers/subs/product_anaytics.py
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
from typing import Union
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import product_anaytics2
|
||||
from fastapi import Body, Depends
|
||||
from or_dependencies import OR_context
|
||||
from routers.base import get_routers
|
||||
|
||||
|
||||
public_app, app, app_apikey = get_routers()
|
||||
|
||||
|
||||
@app.post('/{projectId}/events/search', tags=["dashboard"])
|
||||
def search_events(projectId: int,
|
||||
# data: schemas.CreateDashboardSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return product_anaytics2.search_events(project_id=projectId, data={})
|
||||
|
|
@ -211,7 +211,8 @@ class IssueTrackingJiraSchema(IssueTrackingIntegration):
|
|||
|
||||
class WebhookSchema(BaseModel):
|
||||
webhook_id: Optional[int] = Field(default=None)
|
||||
endpoint: AnyHttpUrl = Field(...)
|
||||
processed_endpoint: AnyHttpUrl = Field(..., alias="endpoint")
|
||||
endpoint: Optional[str] = Field(default=None, doc_hidden=True)
|
||||
auth_header: Optional[str] = Field(default=None)
|
||||
name: str = Field(default="", max_length=100, pattern=NAME_PATTERN)
|
||||
|
||||
|
|
@ -920,7 +921,6 @@ class MetricType(str, Enum):
|
|||
RETENTION = "retention"
|
||||
STICKINESS = "stickiness"
|
||||
HEAT_MAP = "heatMap"
|
||||
INSIGHTS = "insights"
|
||||
|
||||
|
||||
class MetricOfErrors(str, Enum):
|
||||
|
|
@ -1196,31 +1196,6 @@ class CardHeatMap(__CardSchema):
|
|||
return self
|
||||
|
||||
|
||||
class MetricOfInsights(str, Enum):
|
||||
ISSUE_CATEGORIES = "issueCategories"
|
||||
|
||||
|
||||
class CardInsights(__CardSchema):
|
||||
metric_type: Literal[MetricType.INSIGHTS]
|
||||
metric_of: MetricOfInsights = Field(default=MetricOfInsights.ISSUE_CATEGORIES)
|
||||
view_type: MetricOtherViewType = Field(...)
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def __enforce_default(cls, values):
|
||||
values["view_type"] = MetricOtherViewType.LIST_CHART
|
||||
return values
|
||||
|
||||
@model_validator(mode="after")
|
||||
def __transform(self):
|
||||
self.metric_of = MetricOfInsights(self.metric_of)
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def restrictions(self):
|
||||
raise ValueError(f"metricType:{MetricType.INSIGHTS} not supported yet.")
|
||||
|
||||
|
||||
class CardPathAnalysisSeriesSchema(CardSeriesSchema):
|
||||
name: Optional[str] = Field(default=None)
|
||||
filter: PathAnalysisSchema = Field(...)
|
||||
|
|
@ -1297,7 +1272,7 @@ __cards_union_base = Union[
|
|||
CardErrors,
|
||||
CardWebVital, CardHeatMap,
|
||||
CardPathAnalysis]
|
||||
CardSchema = ORUnion(Union[__cards_union_base, CardInsights], discriminator='metric_type')
|
||||
CardSchema = ORUnion(__cards_union_base, discriminator='metric_type')
|
||||
|
||||
|
||||
class UpdateCardStatusSchema(BaseModel):
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ func (d *dataDogClient) FetchSessionData(credentials interface{}, sessionID uint
|
|||
// Not a struct, will try to parse as JSON string
|
||||
strCfg, ok := credentials.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid credentials, got: %+v", credentials)
|
||||
return nil, fmt.Errorf("invalid credentials")
|
||||
}
|
||||
cfg = datadogConfig{}
|
||||
if site, ok := strCfg["site"].(string); ok {
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ func (d *dynatraceClient) FetchSessionData(credentials interface{}, sessionID ui
|
|||
if !ok {
|
||||
strCfg, ok := credentials.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid credentials, got: %+v", credentials)
|
||||
return nil, fmt.Errorf("invalid credentials")
|
||||
}
|
||||
cfg = dynatraceConfig{}
|
||||
if val, ok := strCfg["environment"].(string); ok {
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ func (e *elasticsearchClient) FetchSessionData(credentials interface{}, sessionI
|
|||
if !ok {
|
||||
strCfg, ok := credentials.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid credentials, got: %+v", credentials)
|
||||
return nil, fmt.Errorf("invalid credentials")
|
||||
}
|
||||
cfg = elasticsearchConfig{}
|
||||
if val, ok := strCfg["url"].(string); ok {
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ func (s *sentryClient) FetchSessionData(credentials interface{}, sessionID uint6
|
|||
if !ok {
|
||||
strCfg, ok := credentials.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid credentials, got: %+v", credentials)
|
||||
return nil, fmt.Errorf("invalid credentials")
|
||||
}
|
||||
cfg = sentryConfig{}
|
||||
if val, ok := strCfg["organization_slug"].(string); ok {
|
||||
|
|
|
|||
7
ee/api/.gitignore
vendored
7
ee/api/.gitignore
vendored
|
|
@ -194,6 +194,9 @@ Pipfile.lock
|
|||
/chalicelib/core/collaboration_msteams.py
|
||||
/chalicelib/core/collaboration_slack.py
|
||||
/chalicelib/core/countries.py
|
||||
/chalicelib/core/metrics.py
|
||||
/chalicelib/core/metrics_ch.py
|
||||
/chalicelib/core/custom_metrics.py
|
||||
/chalicelib/core/custom_metrics_predefined.py
|
||||
/chalicelib/core/dashboards.py
|
||||
/chalicelib/core/errors_favorite.py
|
||||
|
|
@ -279,3 +282,7 @@ Pipfile.lock
|
|||
/chalicelib/core/unprocessed_sessions.py
|
||||
/run-db_init-dev.sh
|
||||
/.dev/
|
||||
/chalicelib/core/product_anaytics2.py
|
||||
/chalicelib/utils/ch_client.py
|
||||
/chalicelib/utils/ch_client_exp.py
|
||||
/routers/subs/product_anaytics.py
|
||||
|
|
|
|||
|
|
@ -6,20 +6,21 @@ name = "pypi"
|
|||
[packages]
|
||||
urllib3 = "==1.26.16"
|
||||
requests = "==2.32.3"
|
||||
boto3 = "==1.35.60"
|
||||
pyjwt = "==2.9.0"
|
||||
boto3 = "==1.35.76"
|
||||
pyjwt = "==2.10.1"
|
||||
psycopg2-binary = "==2.9.10"
|
||||
psycopg = {extras = ["binary", "pool"], version = "==3.2.3"}
|
||||
clickhouse-driver = {extras = ["lz4"], version = "==0.2.9"}
|
||||
clickhouse-connect = "==0.8.9"
|
||||
elasticsearch = "==8.16.0"
|
||||
jira = "==3.8.0"
|
||||
cachetools = "==5.5.0"
|
||||
fastapi = "==0.115.5"
|
||||
uvicorn = {extras = ["standard"], version = "==0.32.0"}
|
||||
fastapi = "==0.115.6"
|
||||
uvicorn = {extras = ["standard"], version = "==0.32.1"}
|
||||
gunicorn = "==23.0.0"
|
||||
python-decouple = "==3.8"
|
||||
pydantic = {extras = ["email"], version = "==2.9.2"}
|
||||
apscheduler = "==3.10.4"
|
||||
clickhouse-driver = {extras = ["lz4"], version = "==0.2.9"}
|
||||
pydantic = {extras = ["email"], version = "==2.10.3"}
|
||||
apscheduler = "==3.11.0"
|
||||
python3-saml = "==1.16.0"
|
||||
python-multipart = "==0.0.17"
|
||||
redis = "==5.2.0"
|
||||
|
|
|
|||
|
|
@ -17,11 +17,11 @@ from starlette.responses import StreamingResponse, JSONResponse
|
|||
from chalicelib.core import traces
|
||||
from chalicelib.utils import events_queue
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils import pg_client, ch_client
|
||||
from crons import core_crons, ee_crons, core_dynamic_crons
|
||||
from routers import core, core_dynamic
|
||||
from routers import ee
|
||||
from routers.subs import insights, metrics, v1_api, health, usability_tests, spot
|
||||
from routers.subs import insights, metrics, v1_api, health, usability_tests, spot, product_anaytics
|
||||
from routers.subs import v1_api_ee
|
||||
|
||||
if config("ENABLE_SSO", cast=bool, default=True):
|
||||
|
|
@ -48,6 +48,7 @@ async def lifespan(app: FastAPI):
|
|||
app.schedule = AsyncIOScheduler()
|
||||
app.queue_system = queue.Queue()
|
||||
await pg_client.init()
|
||||
await ch_client.init()
|
||||
await events_queue.init()
|
||||
app.schedule.start()
|
||||
|
||||
|
|
@ -149,6 +150,10 @@ app.include_router(spot.public_app)
|
|||
app.include_router(spot.app)
|
||||
app.include_router(spot.app_apikey)
|
||||
|
||||
app.include_router(product_anaytics.public_app)
|
||||
app.include_router(product_anaytics.app)
|
||||
app.include_router(product_anaytics.app_apikey)
|
||||
|
||||
if config("ENABLE_SSO", cast=bool, default=True):
|
||||
app.include_router(saml.public_app)
|
||||
app.include_router(saml.app)
|
||||
|
|
|
|||
|
|
@ -4,6 +4,10 @@ import logging
|
|||
logging.basicConfig(level=config("LOGLEVEL", default=logging.INFO))
|
||||
|
||||
from . import sessions as sessions_legacy
|
||||
from . import custom_metrics as custom_metrics_legacy
|
||||
from . import custom_metrics_ee as custom_metrics
|
||||
from . import metrics_ch as metrics
|
||||
from . import metrics as metrics_legacy
|
||||
|
||||
if config("EXP_SESSIONS_SEARCH", cast=bool, default=False):
|
||||
logging.info(">>> Using experimental sessions search")
|
||||
|
|
|
|||
|
|
@ -1,703 +0,0 @@
|
|||
import json
|
||||
import logging
|
||||
|
||||
from decouple import config
|
||||
from fastapi import HTTPException, status
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import funnels, issues, heatmaps, sessions_insights, sessions_mobs, sessions_favorite, \
|
||||
product_analytics, custom_metrics_predefined
|
||||
from chalicelib.utils import helper, pg_client
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.utils.storage import extra
|
||||
|
||||
if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
|
||||
logging.info(">>> Using experimental error search")
|
||||
from . import errors_exp as errors
|
||||
else:
|
||||
from . import errors as errors
|
||||
|
||||
if config("EXP_SESSIONS_SEARCH_METRIC", cast=bool, default=False):
|
||||
from chalicelib.core import sessions
|
||||
else:
|
||||
from chalicelib.core import sessions_legacy as sessions
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# TODO: refactor this to split
|
||||
# timeseries /
|
||||
# table of errors / table of issues / table of browsers / table of devices / table of countries / table of URLs
|
||||
# remove "table of" calls from this function
|
||||
def __try_live(project_id, data: schemas.CardSchema):
|
||||
results = []
|
||||
for i, s in enumerate(data.series):
|
||||
results.append(sessions.search2_series(data=s.filter, project_id=project_id, density=data.density,
|
||||
view_type=data.view_type, metric_type=data.metric_type,
|
||||
metric_of=data.metric_of, metric_value=data.metric_value))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def __get_table_of_series(project_id, data: schemas.CardSchema):
|
||||
results = []
|
||||
for i, s in enumerate(data.series):
|
||||
results.append(sessions.search2_table(data=s.filter, project_id=project_id, density=data.density,
|
||||
metric_of=data.metric_of, metric_value=data.metric_value,
|
||||
metric_format=data.metric_format))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def __get_funnel_chart(project: schemas.ProjectContext, data: schemas.CardFunnel, user_id: int = None):
|
||||
if len(data.series) == 0:
|
||||
return {
|
||||
"stages": [],
|
||||
"totalDropDueToIssues": 0
|
||||
}
|
||||
|
||||
# return funnels.get_top_insights_on_the_fly_widget(project_id=project_id,
|
||||
# data=data.series[0].filter,
|
||||
# metric_format=data.metric_format)
|
||||
return funnels.get_simple_funnel(project=project,
|
||||
data=data.series[0].filter,
|
||||
metric_format=data.metric_format)
|
||||
|
||||
|
||||
def __get_errors_list(project: schemas.ProjectContext, user_id, data: schemas.CardSchema):
|
||||
if len(data.series) == 0:
|
||||
return {
|
||||
"total": 0,
|
||||
"errors": []
|
||||
}
|
||||
return errors.search(data.series[0].filter, project_id=project.project_id, user_id=user_id)
|
||||
|
||||
|
||||
def __get_sessions_list(project: schemas.ProjectContext, user_id, data: schemas.CardSchema):
|
||||
if len(data.series) == 0:
|
||||
logger.debug("empty series")
|
||||
return {
|
||||
"total": 0,
|
||||
"sessions": []
|
||||
}
|
||||
return sessions.search_sessions(data=data.series[0].filter, project_id=project.project_id, user_id=user_id)
|
||||
|
||||
|
||||
def __get_heat_map_chart(project: schemas.ProjectContext, user_id, data: schemas.CardHeatMap,
|
||||
include_mobs: bool = True):
|
||||
if len(data.series) == 0:
|
||||
return None
|
||||
data.series[0].filter.filters += data.series[0].filter.events
|
||||
data.series[0].filter.events = []
|
||||
return heatmaps.search_short_session(project_id=project.project_id, user_id=user_id,
|
||||
data=schemas.HeatMapSessionsSearch(
|
||||
**data.series[0].filter.model_dump()),
|
||||
include_mobs=include_mobs)
|
||||
|
||||
|
||||
# EE only
|
||||
def __get_insights_chart(project: schemas.ProjectContext, data: schemas.CardInsights, user_id: int = None):
|
||||
return sessions_insights.fetch_selected(project_id=project.project_id,
|
||||
data=schemas.GetInsightsSchema(startTimestamp=data.startTimestamp,
|
||||
endTimestamp=data.endTimestamp,
|
||||
metricValue=data.metric_value,
|
||||
series=data.series))
|
||||
|
||||
|
||||
def __get_path_analysis_chart(project: schemas.ProjectContext, user_id: int, data: schemas.CardPathAnalysis):
|
||||
if len(data.series) == 0:
|
||||
data.series.append(
|
||||
schemas.CardPathAnalysisSeriesSchema(startTimestamp=data.startTimestamp, endTimestamp=data.endTimestamp))
|
||||
elif not isinstance(data.series[0].filter, schemas.PathAnalysisSchema):
|
||||
data.series[0].filter = schemas.PathAnalysisSchema()
|
||||
|
||||
return product_analytics.path_analysis(project_id=project.project_id, data=data)
|
||||
|
||||
|
||||
def __get_timeseries_chart(project: schemas.ProjectContext, data: schemas.CardTimeSeries, user_id: int = None):
|
||||
series_charts = __try_live(project_id=project.project_id, data=data)
|
||||
results = [{}] * len(series_charts[0])
|
||||
for i in range(len(results)):
|
||||
for j, series_chart in enumerate(series_charts):
|
||||
results[i] = {**results[i], "timestamp": series_chart[i]["timestamp"],
|
||||
data.series[j].name if data.series[j].name else j + 1: series_chart[i]["count"]}
|
||||
return results
|
||||
|
||||
|
||||
def not_supported(**args):
|
||||
raise Exception("not supported")
|
||||
|
||||
|
||||
def __get_table_of_user_ids(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
|
||||
return __get_table_of_series(project_id=project.project_id, data=data)
|
||||
|
||||
|
||||
def __get_table_of_sessions(project: schemas.ProjectContext, data: schemas.CardTable, user_id):
|
||||
return __get_sessions_list(project=project, user_id=user_id, data=data)
|
||||
|
||||
|
||||
def __get_table_of_errors(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int):
|
||||
return __get_errors_list(project=project, user_id=user_id, data=data)
|
||||
|
||||
|
||||
def __get_table_of_issues(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
|
||||
return __get_table_of_series(project_id=project.project_id, data=data)
|
||||
|
||||
|
||||
def __get_table_of_browsers(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
|
||||
return __get_table_of_series(project_id=project.project_id, data=data)
|
||||
|
||||
|
||||
def __get_table_of_devises(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
|
||||
return __get_table_of_series(project_id=project.project_id, data=data)
|
||||
|
||||
|
||||
def __get_table_of_countries(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
|
||||
return __get_table_of_series(project_id=project.project_id, data=data)
|
||||
|
||||
|
||||
def __get_table_of_urls(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
|
||||
return __get_table_of_series(project_id=project.project_id, data=data)
|
||||
|
||||
|
||||
def __get_table_of_referrers(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
|
||||
return __get_table_of_series(project_id=project.project_id, data=data)
|
||||
|
||||
|
||||
def __get_table_of_requests(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
|
||||
return __get_table_of_series(project_id=project.project_id, data=data)
|
||||
|
||||
|
||||
def __get_table_chart(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int):
|
||||
supported = {
|
||||
schemas.MetricOfTable.SESSIONS: __get_table_of_sessions,
|
||||
schemas.MetricOfTable.ERRORS: __get_table_of_errors,
|
||||
schemas.MetricOfTable.USER_ID: __get_table_of_user_ids,
|
||||
schemas.MetricOfTable.ISSUES: __get_table_of_issues,
|
||||
schemas.MetricOfTable.USER_BROWSER: __get_table_of_browsers,
|
||||
schemas.MetricOfTable.USER_DEVICE: __get_table_of_devises,
|
||||
schemas.MetricOfTable.USER_COUNTRY: __get_table_of_countries,
|
||||
schemas.MetricOfTable.VISITED_URL: __get_table_of_urls,
|
||||
schemas.MetricOfTable.REFERRER: __get_table_of_referrers,
|
||||
schemas.MetricOfTable.FETCH: __get_table_of_requests
|
||||
}
|
||||
return supported.get(data.metric_of, not_supported)(project=project, data=data, user_id=user_id)
|
||||
|
||||
|
||||
def get_chart(project: schemas.ProjectContext, data: schemas.CardSchema, user_id: int):
|
||||
if data.is_predefined:
|
||||
return custom_metrics_predefined.get_metric(key=data.metric_of,
|
||||
project_id=project.project_id,
|
||||
data=data.model_dump())
|
||||
|
||||
supported = {
|
||||
schemas.MetricType.TIMESERIES: __get_timeseries_chart,
|
||||
schemas.MetricType.TABLE: __get_table_chart,
|
||||
schemas.MetricType.HEAT_MAP: __get_heat_map_chart,
|
||||
schemas.MetricType.FUNNEL: __get_funnel_chart,
|
||||
schemas.MetricType.INSIGHTS: __get_insights_chart,
|
||||
schemas.MetricType.PATH_ANALYSIS: __get_path_analysis_chart
|
||||
}
|
||||
return supported.get(data.metric_type, not_supported)(project=project, data=data, user_id=user_id)
|
||||
|
||||
|
||||
def get_sessions_by_card_id(project_id, user_id, metric_id, data: schemas.CardSessionsSchema):
|
||||
# No need for this because UI is sending the full payload
|
||||
# card: dict = get_card(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
|
||||
# if card is None:
|
||||
# return None
|
||||
# metric: schemas.CardSchema = schemas.CardSchema(**card)
|
||||
# metric: schemas.CardSchema = __merge_metric_with_data(metric=metric, data=data)
|
||||
if not card_exists(metric_id=metric_id, project_id=project_id, user_id=user_id):
|
||||
return None
|
||||
results = []
|
||||
for s in data.series:
|
||||
results.append({"seriesId": s.series_id, "seriesName": s.name,
|
||||
**sessions.search_sessions(data=s.filter, project_id=project_id, user_id=user_id)})
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def get_sessions(project_id, user_id, data: schemas.CardSessionsSchema):
|
||||
results = []
|
||||
if len(data.series) == 0:
|
||||
return results
|
||||
for s in data.series:
|
||||
if len(data.filters) > 0:
|
||||
s.filter.filters += data.filters
|
||||
s.filter = schemas.SessionsSearchPayloadSchema(**s.filter.model_dump(by_alias=True))
|
||||
|
||||
results.append({"seriesId": None, "seriesName": s.name,
|
||||
**sessions.search_sessions(data=s.filter, project_id=project_id, user_id=user_id)})
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def get_issues(project: schemas.ProjectContext, user_id: int, data: schemas.CardSchema):
|
||||
if data.is_predefined:
|
||||
return not_supported()
|
||||
if data.metric_of == schemas.MetricOfTable.ISSUES:
|
||||
return __get_table_of_issues(project=project, user_id=user_id, data=data)
|
||||
supported = {
|
||||
schemas.MetricType.TIMESERIES: not_supported,
|
||||
schemas.MetricType.TABLE: not_supported,
|
||||
schemas.MetricType.HEAT_MAP: not_supported,
|
||||
schemas.MetricType.INSIGHTS: not_supported,
|
||||
schemas.MetricType.PATH_ANALYSIS: not_supported,
|
||||
}
|
||||
return supported.get(data.metric_type, not_supported)()
|
||||
|
||||
|
||||
def __get_path_analysis_card_info(data: schemas.CardPathAnalysis):
|
||||
r = {"start_point": [s.model_dump() for s in data.start_point],
|
||||
"start_type": data.start_type,
|
||||
"excludes": [e.model_dump() for e in data.excludes],
|
||||
"hideExcess": data.hide_excess}
|
||||
return r
|
||||
|
||||
|
||||
def create_card(project: schemas.ProjectContext, user_id, data: schemas.CardSchema, dashboard=False):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
session_data = None
|
||||
if data.metric_type == schemas.MetricType.HEAT_MAP:
|
||||
if data.session_id is not None:
|
||||
session_data = {"sessionId": data.session_id}
|
||||
else:
|
||||
session_data = __get_heat_map_chart(project=project, user_id=user_id,
|
||||
data=data, include_mobs=False)
|
||||
if session_data is not None:
|
||||
session_data = {"sessionId": session_data["sessionId"]}
|
||||
|
||||
if session_data is not None:
|
||||
# for EE only
|
||||
keys = sessions_mobs. \
|
||||
__get_mob_keys(project_id=project.project_id, session_id=session_data["sessionId"])
|
||||
keys += sessions_mobs. \
|
||||
__get_mob_keys_deprecated(session_id=session_data["sessionId"]) # To support old sessions
|
||||
tag = config('RETENTION_L_VALUE', default='vault')
|
||||
for k in keys:
|
||||
try:
|
||||
extra.tag_session(file_key=k, tag_value=tag)
|
||||
except Exception as e:
|
||||
logger.warning(f"!!!Error while tagging: {k} to {tag} for heatMap")
|
||||
logger.error(str(e))
|
||||
|
||||
_data = {"session_data": json.dumps(session_data) if session_data is not None else None}
|
||||
for i, s in enumerate(data.series):
|
||||
for k in s.model_dump().keys():
|
||||
_data[f"{k}_{i}"] = s.__getattribute__(k)
|
||||
_data[f"index_{i}"] = i
|
||||
_data[f"filter_{i}"] = s.filter.json()
|
||||
series_len = len(data.series)
|
||||
params = {"user_id": user_id, "project_id": project.project_id, **data.model_dump(), **_data,
|
||||
"default_config": json.dumps(data.default_config.model_dump()), "card_info": None}
|
||||
if data.metric_type == schemas.MetricType.PATH_ANALYSIS:
|
||||
params["card_info"] = json.dumps(__get_path_analysis_card_info(data=data))
|
||||
|
||||
query = """INSERT INTO metrics (project_id, user_id, name, is_public,
|
||||
view_type, metric_type, metric_of, metric_value,
|
||||
metric_format, default_config, thumbnail, data,
|
||||
card_info)
|
||||
VALUES (%(project_id)s, %(user_id)s, %(name)s, %(is_public)s,
|
||||
%(view_type)s, %(metric_type)s, %(metric_of)s, %(metric_value)s,
|
||||
%(metric_format)s, %(default_config)s, %(thumbnail)s, %(session_data)s,
|
||||
%(card_info)s)
|
||||
RETURNING metric_id"""
|
||||
if len(data.series) > 0:
|
||||
query = f"""WITH m AS ({query})
|
||||
INSERT INTO metric_series(metric_id, index, name, filter)
|
||||
VALUES {",".join([f"((SELECT metric_id FROM m), %(index_{i})s, %(name_{i})s, %(filter_{i})s::jsonb)"
|
||||
for i in range(series_len)])}
|
||||
RETURNING metric_id;"""
|
||||
|
||||
query = cur.mogrify(query, params)
|
||||
cur.execute(query)
|
||||
r = cur.fetchone()
|
||||
if dashboard:
|
||||
return r["metric_id"]
|
||||
return {"data": get_card(metric_id=r["metric_id"], project_id=project.project_id, user_id=user_id)}
|
||||
|
||||
|
||||
def update_card(metric_id, user_id, project_id, data: schemas.CardSchema):
|
||||
metric: dict = get_card(metric_id=metric_id, project_id=project_id,
|
||||
user_id=user_id, flatten=False, include_data=True)
|
||||
if metric is None:
|
||||
return None
|
||||
series_ids = [r["seriesId"] for r in metric["series"]]
|
||||
n_series = []
|
||||
d_series_ids = []
|
||||
u_series = []
|
||||
u_series_ids = []
|
||||
params = {"metric_id": metric_id, "is_public": data.is_public, "name": data.name,
|
||||
"user_id": user_id, "project_id": project_id, "view_type": data.view_type,
|
||||
"metric_type": data.metric_type, "metric_of": data.metric_of,
|
||||
"metric_value": data.metric_value, "metric_format": data.metric_format,
|
||||
"config": json.dumps(data.default_config.model_dump()), "thumbnail": data.thumbnail}
|
||||
for i, s in enumerate(data.series):
|
||||
prefix = "u_"
|
||||
if s.index is None:
|
||||
s.index = i
|
||||
if s.series_id is None or s.series_id not in series_ids:
|
||||
n_series.append({"i": i, "s": s})
|
||||
prefix = "n_"
|
||||
else:
|
||||
u_series.append({"i": i, "s": s})
|
||||
u_series_ids.append(s.series_id)
|
||||
ns = s.model_dump()
|
||||
for k in ns.keys():
|
||||
if k == "filter":
|
||||
ns[k] = json.dumps(ns[k])
|
||||
params[f"{prefix}{k}_{i}"] = ns[k]
|
||||
for i in series_ids:
|
||||
if i not in u_series_ids:
|
||||
d_series_ids.append(i)
|
||||
params["d_series_ids"] = tuple(d_series_ids)
|
||||
params["card_info"] = None
|
||||
params["session_data"] = json.dumps(metric["data"])
|
||||
if data.metric_type == schemas.MetricType.PATH_ANALYSIS:
|
||||
params["card_info"] = json.dumps(__get_path_analysis_card_info(data=data))
|
||||
elif data.metric_type == schemas.MetricType.HEAT_MAP:
|
||||
if data.session_id is not None:
|
||||
params["session_data"] = json.dumps({"sessionId": data.session_id})
|
||||
elif metric.get("data") and metric["data"].get("sessionId"):
|
||||
params["session_data"] = json.dumps({"sessionId": metric["data"]["sessionId"]})
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
sub_queries = []
|
||||
if len(n_series) > 0:
|
||||
sub_queries.append(f"""\
|
||||
n AS (INSERT INTO metric_series (metric_id, index, name, filter)
|
||||
VALUES {",".join([f"(%(metric_id)s, %(n_index_{s['i']})s, %(n_name_{s['i']})s, %(n_filter_{s['i']})s::jsonb)"
|
||||
for s in n_series])}
|
||||
RETURNING 1)""")
|
||||
if len(u_series) > 0:
|
||||
sub_queries.append(f"""\
|
||||
u AS (UPDATE metric_series
|
||||
SET name=series.name,
|
||||
filter=series.filter,
|
||||
index=series.index
|
||||
FROM (VALUES {",".join([f"(%(u_series_id_{s['i']})s,%(u_index_{s['i']})s,%(u_name_{s['i']})s,%(u_filter_{s['i']})s::jsonb)"
|
||||
for s in u_series])}) AS series(series_id, index, name, filter)
|
||||
WHERE metric_series.metric_id =%(metric_id)s AND metric_series.series_id=series.series_id
|
||||
RETURNING 1)""")
|
||||
if len(d_series_ids) > 0:
|
||||
sub_queries.append("""\
|
||||
d AS (DELETE FROM metric_series WHERE metric_id =%(metric_id)s AND series_id IN %(d_series_ids)s
|
||||
RETURNING 1)""")
|
||||
query = cur.mogrify(f"""\
|
||||
{"WITH " if len(sub_queries) > 0 else ""}{",".join(sub_queries)}
|
||||
UPDATE metrics
|
||||
SET name = %(name)s, is_public= %(is_public)s,
|
||||
view_type= %(view_type)s, metric_type= %(metric_type)s,
|
||||
metric_of= %(metric_of)s, metric_value= %(metric_value)s,
|
||||
metric_format= %(metric_format)s,
|
||||
edited_at = timezone('utc'::text, now()),
|
||||
default_config = %(config)s,
|
||||
thumbnail = %(thumbnail)s,
|
||||
card_info = %(card_info)s,
|
||||
data = %(session_data)s
|
||||
WHERE metric_id = %(metric_id)s
|
||||
AND project_id = %(project_id)s
|
||||
AND (user_id = %(user_id)s OR is_public)
|
||||
RETURNING metric_id;""", params)
|
||||
cur.execute(query)
|
||||
return get_card(metric_id=metric_id, project_id=project_id, user_id=user_id)
|
||||
|
||||
|
||||
def search_all(project_id, user_id, data: schemas.SearchCardsSchema, include_series=False):
|
||||
constraints = ["metrics.project_id = %(project_id)s",
|
||||
"metrics.deleted_at ISNULL"]
|
||||
params = {"project_id": project_id, "user_id": user_id,
|
||||
"offset": (data.page - 1) * data.limit,
|
||||
"limit": data.limit, }
|
||||
if data.mine_only:
|
||||
constraints.append("user_id = %(user_id)s")
|
||||
else:
|
||||
constraints.append("(user_id = %(user_id)s OR metrics.is_public)")
|
||||
if data.shared_only:
|
||||
constraints.append("is_public")
|
||||
|
||||
if data.query is not None and len(data.query) > 0:
|
||||
constraints.append("(name ILIKE %(query)s OR owner.owner_email ILIKE %(query)s)")
|
||||
params["query"] = helper.values_for_operator(value=data.query,
|
||||
op=schemas.SearchEventOperator.CONTAINS)
|
||||
with pg_client.PostgresClient() as cur:
|
||||
sub_join = ""
|
||||
if include_series:
|
||||
sub_join = """LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(metric_series.* ORDER BY index),'[]'::jsonb) AS series
|
||||
FROM metric_series
|
||||
WHERE metric_series.metric_id = metrics.metric_id
|
||||
AND metric_series.deleted_at ISNULL
|
||||
) AS metric_series ON (TRUE)"""
|
||||
query = cur.mogrify(
|
||||
f"""SELECT metric_id, project_id, user_id, name, is_public, created_at, edited_at,
|
||||
metric_type, metric_of, metric_format, metric_value, view_type, is_pinned,
|
||||
dashboards, owner_email, owner_name, default_config AS config, thumbnail
|
||||
FROM metrics
|
||||
{sub_join}
|
||||
LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(connected_dashboards.* ORDER BY is_public,name),'[]'::jsonb) AS dashboards
|
||||
FROM (SELECT DISTINCT dashboard_id, name, is_public
|
||||
FROM dashboards INNER JOIN dashboard_widgets USING (dashboard_id)
|
||||
WHERE deleted_at ISNULL
|
||||
AND dashboard_widgets.metric_id = metrics.metric_id
|
||||
AND project_id = %(project_id)s
|
||||
AND ((dashboards.user_id = %(user_id)s OR is_public))) AS connected_dashboards
|
||||
) AS connected_dashboards ON (TRUE)
|
||||
LEFT JOIN LATERAL (SELECT email AS owner_email, name AS owner_name
|
||||
FROM users
|
||||
WHERE deleted_at ISNULL
|
||||
AND users.user_id = metrics.user_id
|
||||
) AS owner ON (TRUE)
|
||||
WHERE {" AND ".join(constraints)}
|
||||
ORDER BY created_at {data.order.value}
|
||||
LIMIT %(limit)s OFFSET %(offset)s;""", params)
|
||||
logger.debug("---------")
|
||||
logger.debug(query)
|
||||
logger.debug("---------")
|
||||
cur.execute(query)
|
||||
rows = cur.fetchall()
|
||||
if include_series:
|
||||
for r in rows:
|
||||
for s in r["series"]:
|
||||
s["filter"] = helper.old_search_payload_to_flat(s["filter"])
|
||||
else:
|
||||
for r in rows:
|
||||
r["created_at"] = TimeUTC.datetime_to_timestamp(r["created_at"])
|
||||
r["edited_at"] = TimeUTC.datetime_to_timestamp(r["edited_at"])
|
||||
rows = helper.list_to_camel_case(rows)
|
||||
return rows
|
||||
|
||||
|
||||
def get_all(project_id, user_id):
|
||||
default_search = schemas.SearchCardsSchema()
|
||||
rows = search_all(project_id=project_id, user_id=user_id, data=default_search)
|
||||
result = rows
|
||||
while len(rows) == default_search.limit:
|
||||
default_search.page += 1
|
||||
rows = search_all(project_id=project_id, user_id=user_id, data=default_search)
|
||||
result += rows
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def delete_card(project_id, metric_id, user_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
UPDATE public.metrics
|
||||
SET deleted_at = timezone('utc'::text, now()), edited_at = timezone('utc'::text, now())
|
||||
WHERE project_id = %(project_id)s
|
||||
AND metric_id = %(metric_id)s
|
||||
AND (user_id = %(user_id)s OR is_public)
|
||||
RETURNING data;""",
|
||||
{"metric_id": metric_id, "project_id": project_id, "user_id": user_id})
|
||||
)
|
||||
# for EE only
|
||||
row = cur.fetchone()
|
||||
if row:
|
||||
if row["data"] and not sessions_favorite.favorite_session_exists(session_id=row["data"]["sessionId"]):
|
||||
keys = sessions_mobs. \
|
||||
__get_mob_keys(project_id=project_id, session_id=row["data"]["sessionId"])
|
||||
keys += sessions_mobs. \
|
||||
__get_mob_keys_deprecated(session_id=row["data"]["sessionId"]) # To support old sessions
|
||||
tag = config('RETENTION_D_VALUE', default='default')
|
||||
for k in keys:
|
||||
try:
|
||||
extra.tag_session(file_key=k, tag_value=tag)
|
||||
except Exception as e:
|
||||
logger.warning(f"!!!Error while tagging: {k} to {tag} for heatMap")
|
||||
logger.error(str(e))
|
||||
return {"state": "success"}
|
||||
|
||||
|
||||
def __get_path_analysis_attributes(row):
|
||||
card_info = row.pop("cardInfo")
|
||||
row["excludes"] = card_info.get("excludes", [])
|
||||
row["startPoint"] = card_info.get("startPoint", [])
|
||||
row["startType"] = card_info.get("startType", "start")
|
||||
row["hideExcess"] = card_info.get("hideExcess", False)
|
||||
return row
|
||||
|
||||
|
||||
def get_card(metric_id, project_id, user_id, flatten: bool = True, include_data: bool = False):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
f"""SELECT metric_id, project_id, user_id, name, is_public, created_at, deleted_at, edited_at, metric_type,
|
||||
view_type, metric_of, metric_value, metric_format, is_pinned, default_config,
|
||||
default_config AS config,series, dashboards, owner_email, card_info
|
||||
{',data' if include_data else ''}
|
||||
FROM metrics
|
||||
LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(metric_series.* ORDER BY index),'[]'::jsonb) AS series
|
||||
FROM metric_series
|
||||
WHERE metric_series.metric_id = metrics.metric_id
|
||||
AND metric_series.deleted_at ISNULL
|
||||
) AS metric_series ON (TRUE)
|
||||
LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(connected_dashboards.* ORDER BY is_public,name),'[]'::jsonb) AS dashboards
|
||||
FROM (SELECT dashboard_id, name, is_public
|
||||
FROM dashboards INNER JOIN dashboard_widgets USING (dashboard_id)
|
||||
WHERE deleted_at ISNULL
|
||||
AND project_id = %(project_id)s
|
||||
AND ((dashboards.user_id = %(user_id)s OR is_public))
|
||||
AND metric_id = %(metric_id)s) AS connected_dashboards
|
||||
) AS connected_dashboards ON (TRUE)
|
||||
LEFT JOIN LATERAL (SELECT email AS owner_email
|
||||
FROM users
|
||||
WHERE deleted_at ISNULL
|
||||
AND users.user_id = metrics.user_id
|
||||
) AS owner ON (TRUE)
|
||||
WHERE metrics.project_id = %(project_id)s
|
||||
AND metrics.deleted_at ISNULL
|
||||
AND (metrics.user_id = %(user_id)s OR metrics.is_public)
|
||||
AND metrics.metric_id = %(metric_id)s
|
||||
ORDER BY created_at;""",
|
||||
{"metric_id": metric_id, "project_id": project_id, "user_id": user_id}
|
||||
)
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
if row is None:
|
||||
return None
|
||||
row["created_at"] = TimeUTC.datetime_to_timestamp(row["created_at"])
|
||||
row["edited_at"] = TimeUTC.datetime_to_timestamp(row["edited_at"])
|
||||
if flatten:
|
||||
for s in row["series"]:
|
||||
s["filter"] = helper.old_search_payload_to_flat(s["filter"])
|
||||
row = helper.dict_to_camel_case(row)
|
||||
if row["metricType"] == schemas.MetricType.PATH_ANALYSIS:
|
||||
row = __get_path_analysis_attributes(row=row)
|
||||
return row
|
||||
|
||||
|
||||
def get_series_for_alert(project_id, user_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(
|
||||
"""SELECT series_id AS value,
|
||||
metrics.name || '.' || (COALESCE(metric_series.name, 'series ' || index)) || '.count' AS name,
|
||||
'count' AS unit,
|
||||
FALSE AS predefined,
|
||||
metric_id,
|
||||
series_id
|
||||
FROM metric_series
|
||||
INNER JOIN metrics USING (metric_id)
|
||||
WHERE metrics.deleted_at ISNULL
|
||||
AND metrics.project_id = %(project_id)s
|
||||
AND metrics.metric_type = 'timeseries'
|
||||
AND (user_id = %(user_id)s OR is_public)
|
||||
ORDER BY name;""",
|
||||
{"project_id": project_id, "user_id": user_id}
|
||||
)
|
||||
)
|
||||
rows = cur.fetchall()
|
||||
return helper.list_to_camel_case(rows)
|
||||
|
||||
|
||||
def change_state(project_id, metric_id, user_id, status):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
UPDATE public.metrics
|
||||
SET active = %(status)s
|
||||
WHERE metric_id = %(metric_id)s
|
||||
AND (user_id = %(user_id)s OR is_public);""",
|
||||
{"metric_id": metric_id, "status": status, "user_id": user_id})
|
||||
)
|
||||
return get_card(metric_id=metric_id, project_id=project_id, user_id=user_id)
|
||||
|
||||
|
||||
def get_funnel_sessions_by_issue(user_id, project_id, metric_id, issue_id,
|
||||
data: schemas.CardSessionsSchema
|
||||
# , range_value=None, start_date=None, end_date=None
|
||||
):
|
||||
# No need for this because UI is sending the full payload
|
||||
# card: dict = get_card(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
|
||||
# if card is None:
|
||||
# return None
|
||||
# metric: schemas.CardSchema = schemas.CardSchema(**card)
|
||||
# metric: schemas.CardSchema = __merge_metric_with_data(metric=metric, data=data)
|
||||
# if metric is None:
|
||||
# return None
|
||||
if not card_exists(metric_id=metric_id, project_id=project_id, user_id=user_id):
|
||||
return None
|
||||
for s in data.series:
|
||||
s.filter.startTimestamp = data.startTimestamp
|
||||
s.filter.endTimestamp = data.endTimestamp
|
||||
s.filter.limit = data.limit
|
||||
s.filter.page = data.page
|
||||
issues_list = funnels.get_issues_on_the_fly_widget(project_id=project_id, data=s.filter).get("issues", {})
|
||||
issues_list = issues_list.get("significant", []) + issues_list.get("insignificant", [])
|
||||
issue = None
|
||||
for i in issues_list:
|
||||
if i.get("issueId", "") == issue_id:
|
||||
issue = i
|
||||
break
|
||||
if issue is None:
|
||||
issue = issues.get(project_id=project_id, issue_id=issue_id)
|
||||
if issue is not None:
|
||||
issue = {**issue,
|
||||
"affectedSessions": 0,
|
||||
"affectedUsers": 0,
|
||||
"conversionImpact": 0,
|
||||
"lostConversions": 0,
|
||||
"unaffectedSessions": 0}
|
||||
return {"seriesId": s.series_id, "seriesName": s.name,
|
||||
"sessions": sessions.search_sessions(user_id=user_id, project_id=project_id,
|
||||
issue=issue, data=s.filter)
|
||||
if issue is not None else {"total": 0, "sessions": []},
|
||||
"issue": issue}
|
||||
|
||||
|
||||
def make_chart_from_card(project: schemas.ProjectContext, user_id, metric_id, data: schemas.CardSessionsSchema):
|
||||
raw_metric: dict = get_card(metric_id=metric_id, project_id=project.project_id, user_id=user_id, include_data=True)
|
||||
|
||||
if raw_metric is None:
|
||||
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="card not found")
|
||||
raw_metric["startTimestamp"] = data.startTimestamp
|
||||
raw_metric["endTimestamp"] = data.endTimestamp
|
||||
raw_metric["limit"] = data.limit
|
||||
raw_metric["density"] = data.density
|
||||
metric: schemas.CardSchema = schemas.CardSchema(**raw_metric)
|
||||
|
||||
if metric.is_predefined:
|
||||
return custom_metrics_predefined.get_metric(key=metric.metric_of,
|
||||
project_id=project.project_id,
|
||||
data=data.model_dump())
|
||||
elif metric.metric_type == schemas.MetricType.HEAT_MAP:
|
||||
if raw_metric["data"] and raw_metric["data"].get("sessionId"):
|
||||
return heatmaps.get_selected_session(project_id=project.project_id,
|
||||
session_id=raw_metric["data"]["sessionId"])
|
||||
else:
|
||||
return heatmaps.search_short_session(project_id=project.project_id,
|
||||
data=schemas.HeatMapSessionsSearch(**metric.model_dump()),
|
||||
user_id=user_id)
|
||||
|
||||
return get_chart(project=project, data=metric, user_id=user_id)
|
||||
|
||||
|
||||
def card_exists(metric_id, project_id, user_id) -> bool:
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
f"""SELECT 1
|
||||
FROM metrics
|
||||
LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(connected_dashboards.* ORDER BY is_public,name),'[]'::jsonb) AS dashboards
|
||||
FROM (SELECT dashboard_id, name, is_public
|
||||
FROM dashboards INNER JOIN dashboard_widgets USING (dashboard_id)
|
||||
WHERE deleted_at ISNULL
|
||||
AND project_id = %(project_id)s
|
||||
AND ((dashboards.user_id = %(user_id)s OR is_public))
|
||||
AND metric_id = %(metric_id)s) AS connected_dashboards
|
||||
) AS connected_dashboards ON (TRUE)
|
||||
LEFT JOIN LATERAL (SELECT email AS owner_email
|
||||
FROM users
|
||||
WHERE deleted_at ISNULL
|
||||
AND users.user_id = metrics.user_id
|
||||
) AS owner ON (TRUE)
|
||||
WHERE metrics.project_id = %(project_id)s
|
||||
AND metrics.deleted_at ISNULL
|
||||
AND (metrics.user_id = %(user_id)s OR metrics.is_public)
|
||||
AND metrics.metric_id = %(metric_id)s
|
||||
ORDER BY created_at;""",
|
||||
{"metric_id": metric_id, "project_id": project_id, "user_id": user_id}
|
||||
)
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
return row is not None
|
||||
234
ee/api/chalicelib/core/custom_metrics_ee.py
Normal file
234
ee/api/chalicelib/core/custom_metrics_ee.py
Normal file
|
|
@ -0,0 +1,234 @@
|
|||
import json
|
||||
import logging
|
||||
|
||||
from decouple import config
|
||||
from fastapi import HTTPException, status
|
||||
from .custom_metrics import *
|
||||
import schemas
|
||||
from chalicelib.core import funnels, issues, heatmaps, sessions_mobs, sessions_favorite, \
|
||||
product_analytics, custom_metrics_predefined
|
||||
from chalicelib.utils import helper, pg_client
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.utils.storage import extra
|
||||
|
||||
if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
|
||||
logging.info(">>> Using experimental error search")
|
||||
from . import errors_exp as errors
|
||||
else:
|
||||
from . import errors as errors
|
||||
|
||||
if config("EXP_SESSIONS_SEARCH_METRIC", cast=bool, default=False):
|
||||
from chalicelib.core import sessions
|
||||
else:
|
||||
from chalicelib.core import sessions_legacy as sessions
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# TODO: refactor this to split
|
||||
# timeseries /
|
||||
# table of errors / table of issues / table of browsers / table of devices / table of countries / table of URLs
|
||||
# remove "table of" calls from this function
|
||||
def __try_live(project_id, data: schemas.CardSchema):
|
||||
results = []
|
||||
for i, s in enumerate(data.series):
|
||||
results.append(sessions.search2_series(data=s.filter, project_id=project_id, density=data.density,
|
||||
view_type=data.view_type, metric_type=data.metric_type,
|
||||
metric_of=data.metric_of, metric_value=data.metric_value))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def __get_table_of_series(project_id, data: schemas.CardSchema):
|
||||
results = []
|
||||
for i, s in enumerate(data.series):
|
||||
results.append(sessions.search2_table(data=s.filter, project_id=project_id, density=data.density,
|
||||
metric_of=data.metric_of, metric_value=data.metric_value,
|
||||
metric_format=data.metric_format))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def __get_errors_list(project: schemas.ProjectContext, user_id, data: schemas.CardSchema):
|
||||
if len(data.series) == 0:
|
||||
return {
|
||||
"total": 0,
|
||||
"errors": []
|
||||
}
|
||||
return errors.search(data.series[0].filter, project_id=project.project_id, user_id=user_id)
|
||||
|
||||
|
||||
def __get_sessions_list(project: schemas.ProjectContext, user_id, data: schemas.CardSchema):
|
||||
if len(data.series) == 0:
|
||||
logger.debug("empty series")
|
||||
return {
|
||||
"total": 0,
|
||||
"sessions": []
|
||||
}
|
||||
return sessions.search_sessions(data=data.series[0].filter, project_id=project.project_id, user_id=user_id)
|
||||
|
||||
|
||||
def get_sessions_by_card_id(project_id, user_id, metric_id, data: schemas.CardSessionsSchema):
|
||||
# No need for this because UI is sending the full payload
|
||||
# card: dict = get_card(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
|
||||
# if card is None:
|
||||
# return None
|
||||
# metric: schemas.CardSchema = schemas.CardSchema(**card)
|
||||
# metric: schemas.CardSchema = __merge_metric_with_data(metric=metric, data=data)
|
||||
if not card_exists(metric_id=metric_id, project_id=project_id, user_id=user_id):
|
||||
return None
|
||||
results = []
|
||||
for s in data.series:
|
||||
results.append({"seriesId": s.series_id, "seriesName": s.name,
|
||||
**sessions.search_sessions(data=s.filter, project_id=project_id, user_id=user_id)})
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def get_sessions(project_id, user_id, data: schemas.CardSessionsSchema):
|
||||
results = []
|
||||
if len(data.series) == 0:
|
||||
return results
|
||||
for s in data.series:
|
||||
if len(data.filters) > 0:
|
||||
s.filter.filters += data.filters
|
||||
s.filter = schemas.SessionsSearchPayloadSchema(**s.filter.model_dump(by_alias=True))
|
||||
|
||||
results.append({"seriesId": None, "seriesName": s.name,
|
||||
**sessions.search_sessions(data=s.filter, project_id=project_id, user_id=user_id)})
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def create_card(project: schemas.ProjectContext, user_id, data: schemas.CardSchema, dashboard=False):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
session_data = None
|
||||
if data.metric_type == schemas.MetricType.HEAT_MAP:
|
||||
if data.session_id is not None:
|
||||
session_data = {"sessionId": data.session_id}
|
||||
else:
|
||||
session_data = __get_heat_map_chart(project=project, user_id=user_id,
|
||||
data=data, include_mobs=False)
|
||||
if session_data is not None:
|
||||
session_data = {"sessionId": session_data["sessionId"]}
|
||||
|
||||
if session_data is not None:
|
||||
# for EE only
|
||||
keys = sessions_mobs. \
|
||||
__get_mob_keys(project_id=project.project_id, session_id=session_data["sessionId"])
|
||||
keys += sessions_mobs. \
|
||||
__get_mob_keys_deprecated(session_id=session_data["sessionId"]) # To support old sessions
|
||||
tag = config('RETENTION_L_VALUE', default='vault')
|
||||
for k in keys:
|
||||
try:
|
||||
extra.tag_session(file_key=k, tag_value=tag)
|
||||
except Exception as e:
|
||||
logger.warning(f"!!!Error while tagging: {k} to {tag} for heatMap")
|
||||
logger.error(str(e))
|
||||
|
||||
_data = {"session_data": json.dumps(session_data) if session_data is not None else None}
|
||||
for i, s in enumerate(data.series):
|
||||
for k in s.model_dump().keys():
|
||||
_data[f"{k}_{i}"] = s.__getattribute__(k)
|
||||
_data[f"index_{i}"] = i
|
||||
_data[f"filter_{i}"] = s.filter.json()
|
||||
series_len = len(data.series)
|
||||
params = {"user_id": user_id, "project_id": project.project_id, **data.model_dump(), **_data,
|
||||
"default_config": json.dumps(data.default_config.model_dump()), "card_info": None}
|
||||
if data.metric_type == schemas.MetricType.PATH_ANALYSIS:
|
||||
params["card_info"] = json.dumps(__get_path_analysis_card_info(data=data))
|
||||
|
||||
query = """INSERT INTO metrics (project_id, user_id, name, is_public,
|
||||
view_type, metric_type, metric_of, metric_value,
|
||||
metric_format, default_config, thumbnail, data,
|
||||
card_info)
|
||||
VALUES (%(project_id)s, %(user_id)s, %(name)s, %(is_public)s,
|
||||
%(view_type)s, %(metric_type)s, %(metric_of)s, %(metric_value)s,
|
||||
%(metric_format)s, %(default_config)s, %(thumbnail)s, %(session_data)s,
|
||||
%(card_info)s)
|
||||
RETURNING metric_id"""
|
||||
if len(data.series) > 0:
|
||||
query = f"""WITH m AS ({query})
|
||||
INSERT INTO metric_series(metric_id, index, name, filter)
|
||||
VALUES {",".join([f"((SELECT metric_id FROM m), %(index_{i})s, %(name_{i})s, %(filter_{i})s::jsonb)"
|
||||
for i in range(series_len)])}
|
||||
RETURNING metric_id;"""
|
||||
|
||||
query = cur.mogrify(query, params)
|
||||
cur.execute(query)
|
||||
r = cur.fetchone()
|
||||
if dashboard:
|
||||
return r["metric_id"]
|
||||
return {"data": get_card(metric_id=r["metric_id"], project_id=project.project_id, user_id=user_id)}
|
||||
|
||||
|
||||
def delete_card(project_id, metric_id, user_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
UPDATE public.metrics
|
||||
SET deleted_at = timezone('utc'::text, now()), edited_at = timezone('utc'::text, now())
|
||||
WHERE project_id = %(project_id)s
|
||||
AND metric_id = %(metric_id)s
|
||||
AND (user_id = %(user_id)s OR is_public)
|
||||
RETURNING data;""",
|
||||
{"metric_id": metric_id, "project_id": project_id, "user_id": user_id})
|
||||
)
|
||||
# for EE only
|
||||
row = cur.fetchone()
|
||||
if row:
|
||||
if row["data"] and not sessions_favorite.favorite_session_exists(session_id=row["data"]["sessionId"]):
|
||||
keys = sessions_mobs. \
|
||||
__get_mob_keys(project_id=project_id, session_id=row["data"]["sessionId"])
|
||||
keys += sessions_mobs. \
|
||||
__get_mob_keys_deprecated(session_id=row["data"]["sessionId"]) # To support old sessions
|
||||
tag = config('RETENTION_D_VALUE', default='default')
|
||||
for k in keys:
|
||||
try:
|
||||
extra.tag_session(file_key=k, tag_value=tag)
|
||||
except Exception as e:
|
||||
logger.warning(f"!!!Error while tagging: {k} to {tag} for heatMap")
|
||||
logger.error(str(e))
|
||||
return {"state": "success"}
|
||||
|
||||
|
||||
def get_funnel_sessions_by_issue(user_id, project_id, metric_id, issue_id,
|
||||
data: schemas.CardSessionsSchema
|
||||
# , range_value=None, start_date=None, end_date=None
|
||||
):
|
||||
# No need for this because UI is sending the full payload
|
||||
# card: dict = get_card(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
|
||||
# if card is None:
|
||||
# return None
|
||||
# metric: schemas.CardSchema = schemas.CardSchema(**card)
|
||||
# metric: schemas.CardSchema = __merge_metric_with_data(metric=metric, data=data)
|
||||
# if metric is None:
|
||||
# return None
|
||||
if not card_exists(metric_id=metric_id, project_id=project_id, user_id=user_id):
|
||||
return None
|
||||
for s in data.series:
|
||||
s.filter.startTimestamp = data.startTimestamp
|
||||
s.filter.endTimestamp = data.endTimestamp
|
||||
s.filter.limit = data.limit
|
||||
s.filter.page = data.page
|
||||
issues_list = funnels.get_issues_on_the_fly_widget(project_id=project_id, data=s.filter).get("issues", {})
|
||||
issues_list = issues_list.get("significant", []) + issues_list.get("insignificant", [])
|
||||
issue = None
|
||||
for i in issues_list:
|
||||
if i.get("issueId", "") == issue_id:
|
||||
issue = i
|
||||
break
|
||||
if issue is None:
|
||||
issue = issues.get(project_id=project_id, issue_id=issue_id)
|
||||
if issue is not None:
|
||||
issue = {**issue,
|
||||
"affectedSessions": 0,
|
||||
"affectedUsers": 0,
|
||||
"conversionImpact": 0,
|
||||
"lostConversions": 0,
|
||||
"unaffectedSessions": 0}
|
||||
return {"seriesId": s.series_id, "seriesName": s.name,
|
||||
"sessions": sessions.search_sessions(user_id=user_id, project_id=project_id,
|
||||
issue=issue, data=s.filter)
|
||||
if issue is not None else {"total": 0, "sessions": []},
|
||||
"issue": issue}
|
||||
|
|
@ -182,3 +182,20 @@ def delete(tenant_id, user_id, role_id):
|
|||
{"tenant_id": tenant_id, "role_id": role_id})
|
||||
cur.execute(query=query)
|
||||
return get_roles(tenant_id=tenant_id)
|
||||
|
||||
|
||||
def get_role(tenant_id, role_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify("""SELECT roles.*
|
||||
FROM public.roles
|
||||
WHERE tenant_id =%(tenant_id)s
|
||||
AND deleted_at IS NULL
|
||||
AND not service_role
|
||||
AND role_id = %(role_id)s
|
||||
LIMIT 1;""",
|
||||
{"tenant_id": tenant_id, "role_id": role_id})
|
||||
cur.execute(query=query)
|
||||
row = cur.fetchone()
|
||||
if row is not None:
|
||||
row["created_at"] = TimeUTC.datetime_to_timestamp(row["created_at"])
|
||||
return helper.dict_to_camel_case(row)
|
||||
|
|
|
|||
|
|
@ -1,467 +0,0 @@
|
|||
from typing import Optional
|
||||
import logging
|
||||
import schemas
|
||||
from chalicelib.core import metrics
|
||||
from chalicelib.core import sessions_exp
|
||||
from chalicelib.utils import ch_client
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _table_slice(table, index):
|
||||
col = list()
|
||||
for row in table:
|
||||
col.append(row[index])
|
||||
return col
|
||||
|
||||
|
||||
def _table_where(table, index, value):
|
||||
new_table = list()
|
||||
for row in table:
|
||||
if row[index] == value:
|
||||
new_table.append(row)
|
||||
return new_table
|
||||
|
||||
|
||||
def _sum_table_index(table, index):
|
||||
s = 0
|
||||
count = 0
|
||||
for row in table:
|
||||
v = row[index]
|
||||
if v is None:
|
||||
continue
|
||||
s += v
|
||||
count += 1
|
||||
return s
|
||||
|
||||
|
||||
def _mean_table_index(table, index):
|
||||
s = _sum_table_index(table, index)
|
||||
c = len(table)
|
||||
return s / c
|
||||
|
||||
|
||||
def _sort_table_index(table, index, reverse=False):
|
||||
return sorted(table, key=lambda k: k[index], reverse=reverse)
|
||||
|
||||
|
||||
def _select_rec(l, selector):
|
||||
if len(selector) == 1:
|
||||
return l[selector[0]]
|
||||
else:
|
||||
s = selector[0]
|
||||
L = l[s]
|
||||
type_ = type(s)
|
||||
if type_ == slice:
|
||||
return [_select_rec(l_, selector[1:]) for l_ in L]
|
||||
elif type_ == int:
|
||||
return [_select_rec(L, selector[1:])]
|
||||
|
||||
|
||||
def __get_two_values(response, time_index='hh', name_index='name'):
|
||||
columns = list(response[0].keys())
|
||||
name_index_val = columns.index(name_index)
|
||||
time_index_value = columns.index(time_index)
|
||||
|
||||
table = [list(r.values()) for r in response]
|
||||
table_hh1 = list()
|
||||
table_hh2 = list()
|
||||
hh_vals = list()
|
||||
names_hh1 = list()
|
||||
names_hh2 = list()
|
||||
for e in table:
|
||||
if e[time_index_value] not in hh_vals and len(hh_vals) == 2:
|
||||
break
|
||||
elif e[time_index_value] not in hh_vals:
|
||||
hh_vals.append(e[time_index_value])
|
||||
|
||||
if len(hh_vals) == 1:
|
||||
table_hh1.append(e)
|
||||
if e[name_index_val] not in names_hh1:
|
||||
names_hh1.append(e[name_index_val])
|
||||
elif len(hh_vals) == 2:
|
||||
table_hh2.append(e)
|
||||
if e[name_index_val] not in names_hh2:
|
||||
names_hh2.append(e[name_index_val])
|
||||
return table_hh1, table_hh2, columns, names_hh1, names_hh2
|
||||
|
||||
|
||||
def query_requests_by_period(project_id, start_time, end_time, filters: Optional[schemas.SessionsSearchPayloadSchema]):
|
||||
params = {
|
||||
"project_id": project_id, "startTimestamp": start_time, "endTimestamp": end_time,
|
||||
"step_size": metrics.__get_step_size(endTimestamp=end_time, startTimestamp=start_time, density=3)
|
||||
}
|
||||
params, sub_query = __filter_subquery(project_id=project_id, filters=filters, params=params)
|
||||
conditions = ["event_type = 'REQUEST'"]
|
||||
query = f"""WITH toUInt32(toStartOfInterval(toDateTime(%(startTimestamp)s/1000), INTERVAL %(step_size)s second)) AS start,
|
||||
toUInt32(toStartOfInterval(toDateTime(%(endTimestamp)s/1000), INTERVAL %(step_size)s second)) AS end
|
||||
SELECT T1.hh, countIf(T2.session_id != 0) as sessions, avg(T2.success) as success_rate, T2.url_host as names,
|
||||
T2.url_path as source, avg(T2.duration) as avg_duration
|
||||
FROM (SELECT arrayJoin(arrayMap(x -> toDateTime(x), range(start, end, %(step_size)s))) as hh) AS T1
|
||||
LEFT JOIN (SELECT session_id, url_host, url_path, success, message, duration, toStartOfInterval(datetime, INTERVAL %(step_size)s second) as dtime
|
||||
FROM experimental.events
|
||||
{sub_query}
|
||||
WHERE project_id = {project_id}
|
||||
AND {" AND ".join(conditions)}) AS T2 ON T2.dtime = T1.hh
|
||||
GROUP BY T1.hh, T2.url_host, T2.url_path
|
||||
ORDER BY T1.hh DESC;"""
|
||||
with ch_client.ClickHouseClient() as conn:
|
||||
query = conn.format(query=query, params=params)
|
||||
logging.debug("--------")
|
||||
logging.debug(query)
|
||||
logging.debug("--------")
|
||||
res = conn.execute(query=query)
|
||||
if res is None or sum([r.get("sessions") for r in res]) == 0:
|
||||
return []
|
||||
|
||||
table_hh1, table_hh2, columns, this_period_hosts, last_period_hosts = __get_two_values(res, time_index='hh',
|
||||
name_index='source')
|
||||
test = [k[4] for k in table_hh1]
|
||||
del res
|
||||
|
||||
new_hosts = [x for x in this_period_hosts if x not in last_period_hosts]
|
||||
common_names = [x for x in this_period_hosts if x not in new_hosts]
|
||||
|
||||
source_idx = columns.index('source')
|
||||
duration_idx = columns.index('avg_duration')
|
||||
# success_idx = columns.index('success_rate')
|
||||
# delta_duration = dict()
|
||||
# delta_success = dict()
|
||||
new_duration_values = dict()
|
||||
duration_values = dict()
|
||||
for n in common_names:
|
||||
d1_tmp = _table_where(table_hh1, source_idx, n)
|
||||
d2_tmp = _table_where(table_hh2, source_idx, n)
|
||||
old_duration = _mean_table_index(d2_tmp, duration_idx)
|
||||
new_duration = _mean_table_index(d1_tmp, duration_idx)
|
||||
if old_duration == 0:
|
||||
continue
|
||||
duration_values[n] = new_duration, old_duration, (new_duration - old_duration) / old_duration
|
||||
# delta_duration[n] = (_mean_table_index(d1_tmp, duration_idx) - _duration1) / _duration1
|
||||
# delta_success[n] = _mean_table_index(d1_tmp, success_idx) - _mean_table_index(d2_tmp, success_idx)
|
||||
for n in new_hosts:
|
||||
d1_tmp = _table_where(table_hh1, source_idx, n)
|
||||
new_duration_values[n] = _mean_table_index(d1_tmp, duration_idx)
|
||||
|
||||
# names_idx = columns.index('names')
|
||||
total = _sum_table_index(table_hh1, duration_idx)
|
||||
d1_tmp = _sort_table_index(table_hh1, duration_idx, reverse=True)
|
||||
_tmp = _table_slice(d1_tmp, duration_idx)
|
||||
_tmp2 = _table_slice(d1_tmp, source_idx)
|
||||
|
||||
increase = sorted(duration_values.items(), key=lambda k: k[1][-1], reverse=True)
|
||||
ratio = sorted(zip(_tmp2, _tmp), key=lambda k: k[1], reverse=True)
|
||||
# names_ = set([k[0] for k in increase[:3]+ratio[:3]]+new_hosts[:3])
|
||||
names_ = set([k[0] for k in increase[:3] + ratio[:3]]) # we took out new hosts since they dont give much info
|
||||
|
||||
results = list()
|
||||
for n in names_:
|
||||
if n is None:
|
||||
continue
|
||||
data_ = {'category': schemas.InsightCategories.NETWORK, 'name': n,
|
||||
'value': None, 'oldValue': None, 'ratio': None, 'change': None, 'isNew': True}
|
||||
for n_, v in ratio:
|
||||
if n == n_:
|
||||
if n in new_hosts:
|
||||
data_['value'] = new_duration_values[n]
|
||||
data_['ratio'] = 100 * v / total
|
||||
break
|
||||
for n_, v in increase:
|
||||
if n == n_:
|
||||
data_['value'] = v[0]
|
||||
data_['oldValue'] = v[1]
|
||||
data_['change'] = 100 * v[2]
|
||||
data_['isNew'] = False
|
||||
break
|
||||
results.append(data_)
|
||||
return results
|
||||
|
||||
|
||||
def __filter_subquery(project_id: int, filters: Optional[schemas.SessionsSearchPayloadSchema], params: dict):
|
||||
sub_query = ""
|
||||
if filters and (len(filters.events) > 0 or len(filters.filters)) > 0:
|
||||
qp_params, sub_query = sessions_exp.search_query_parts_ch(data=filters, project_id=project_id,
|
||||
error_status=None,
|
||||
errors_only=True, favorite_only=None,
|
||||
issue=None, user_id=None)
|
||||
params = {**params, **qp_params}
|
||||
# TODO: test if this line impacts other cards beside insights
|
||||
# sub_query = f"INNER JOIN {sub_query} USING(session_id)"
|
||||
return params, sub_query
|
||||
|
||||
|
||||
def query_most_errors_by_period(project_id, start_time, end_time,
|
||||
filters: Optional[schemas.SessionsSearchPayloadSchema]):
|
||||
params = {
|
||||
"project_id": project_id, "startTimestamp": start_time, "endTimestamp": end_time,
|
||||
"step_size": metrics.__get_step_size(endTimestamp=end_time, startTimestamp=start_time, density=3)
|
||||
}
|
||||
params, sub_query = __filter_subquery(project_id=project_id, filters=filters, params=params)
|
||||
conditions = ["event_type = 'ERROR'"]
|
||||
query = f"""WITH toUInt32(toStartOfInterval(toDateTime(%(startTimestamp)s/1000), INTERVAL %(step_size)s second)) AS start,
|
||||
toUInt32(toStartOfInterval(toDateTime(%(endTimestamp)s/1000), INTERVAL %(step_size)s second)) AS end
|
||||
SELECT T1.hh, countIf(T2.session_id != 0) as sessions, T2.message_name as names,
|
||||
groupUniqArray(T2.source) as sources
|
||||
FROM (SELECT arrayJoin(arrayMap(x -> toDateTime(x), range(start, end, %(step_size)s))) as hh) AS T1
|
||||
LEFT JOIN (SELECT session_id, concat(name,': ', message) as message_name, source, toStartOfInterval(datetime, INTERVAL %(step_size)s second) as dtime
|
||||
FROM experimental.events
|
||||
{sub_query}
|
||||
WHERE project_id = {project_id}
|
||||
AND datetime >= toDateTime(%(startTimestamp)s/1000)
|
||||
AND datetime < toDateTime(%(endTimestamp)s/1000)
|
||||
AND {" AND ".join(conditions)}) AS T2 ON T2.dtime = T1.hh
|
||||
GROUP BY T1.hh, T2.message_name
|
||||
ORDER BY T1.hh DESC;"""
|
||||
|
||||
with ch_client.ClickHouseClient() as conn:
|
||||
query = conn.format(query=query, params=params)
|
||||
logging.debug("--------")
|
||||
logging.debug(query)
|
||||
logging.debug("--------")
|
||||
res = conn.execute(query=query)
|
||||
if res is None or sum([r.get("sessions") for r in res]) == 0:
|
||||
return []
|
||||
|
||||
table_hh1, table_hh2, columns, this_period_errors, last_period_errors = __get_two_values(res, time_index='hh',
|
||||
name_index='names')
|
||||
del res
|
||||
new_errors = [x for x in this_period_errors if x not in last_period_errors]
|
||||
common_errors = [x for x in this_period_errors if x not in new_errors]
|
||||
|
||||
sessions_idx = columns.index('sessions')
|
||||
names_idx = columns.index('names')
|
||||
|
||||
percentage_errors = dict()
|
||||
total = _sum_table_index(table_hh1, sessions_idx)
|
||||
# error_increase = dict()
|
||||
new_error_values = dict()
|
||||
error_values = dict()
|
||||
for n in this_period_errors:
|
||||
if n is None:
|
||||
continue
|
||||
percentage_errors[n] = _sum_table_index(_table_where(table_hh1, names_idx, n), sessions_idx)
|
||||
new_error_values[n] = _sum_table_index(_table_where(table_hh1, names_idx, n), sessions_idx)
|
||||
for n in common_errors:
|
||||
if n is None:
|
||||
continue
|
||||
sum_old_errors = _sum_table_index(_table_where(table_hh2, names_idx, n), sessions_idx)
|
||||
if sum_old_errors == 0:
|
||||
continue
|
||||
sum_new_errors = _sum_table_index(_table_where(table_hh1, names_idx, n), sessions_idx)
|
||||
# error_increase[n] = (new_errors - old_errors) / old_errors
|
||||
error_values[n] = sum_new_errors, sum_old_errors, (sum_new_errors - sum_old_errors) / sum_old_errors
|
||||
ratio = sorted(percentage_errors.items(), key=lambda k: k[1], reverse=True)
|
||||
increase = sorted(error_values.items(), key=lambda k: k[1][-1], reverse=True)
|
||||
names_ = set([k[0] for k in increase[:3] + ratio[:3]] + new_errors[:3])
|
||||
|
||||
results = list()
|
||||
for n in names_:
|
||||
if n is None:
|
||||
continue
|
||||
data_ = {'category': schemas.InsightCategories.ERRORS, 'name': n,
|
||||
'value': None, 'oldValue': None, 'ratio': None, 'change': None, 'isNew': True}
|
||||
for n_, v in ratio:
|
||||
if n == n_:
|
||||
if n in new_errors:
|
||||
data_['value'] = new_error_values[n]
|
||||
data_['ratio'] = 100 * v / total
|
||||
break
|
||||
for n_, v in increase:
|
||||
if n == n_:
|
||||
data_['value'] = v[0]
|
||||
data_['oldValue'] = v[1]
|
||||
data_['change'] = 100 * v[2]
|
||||
data_['isNew'] = False
|
||||
break
|
||||
results.append(data_)
|
||||
return results
|
||||
|
||||
|
||||
def query_cpu_memory_by_period(project_id, start_time, end_time,
|
||||
filters: Optional[schemas.SessionsSearchPayloadSchema]):
|
||||
params = {
|
||||
"project_id": project_id, "startTimestamp": start_time, "endTimestamp": end_time,
|
||||
"step_size": metrics.__get_step_size(endTimestamp=end_time, startTimestamp=start_time, density=3)
|
||||
}
|
||||
params, sub_query = __filter_subquery(project_id=project_id, filters=filters, params=params)
|
||||
conditions = ["event_type = 'PERFORMANCE'"]
|
||||
query = f"""WITH toUInt32(toStartOfInterval(toDateTime(%(startTimestamp)s/1000), INTERVAL %(step_size)s second)) AS start,
|
||||
toUInt32(toStartOfInterval(toDateTime(%(endTimestamp)s/1000), INTERVAL %(step_size)s second)) AS end
|
||||
SELECT T1.hh, countIf(T2.session_id != 0) as sessions, avg(T2.avg_cpu) as cpu_used,
|
||||
avg(T2.avg_used_js_heap_size) as memory_used, T2.url_host as names, groupUniqArray(T2.url_path) as sources
|
||||
FROM (SELECT arrayJoin(arrayMap(x -> toDateTime(x), range(start, end, %(step_size)s))) as hh) AS T1
|
||||
LEFT JOIN (SELECT session_id, url_host, url_path, avg_used_js_heap_size, avg_cpu, toStartOfInterval(datetime, INTERVAL %(step_size)s second) as dtime
|
||||
FROM experimental.events
|
||||
{sub_query}
|
||||
WHERE project_id = {project_id}
|
||||
AND {" AND ".join(conditions)}) AS T2 ON T2.dtime = T1.hh
|
||||
GROUP BY T1.hh, T2.url_host
|
||||
ORDER BY T1.hh DESC;"""
|
||||
with ch_client.ClickHouseClient() as conn:
|
||||
query = conn.format(query=query, params=params)
|
||||
logging.debug("--------")
|
||||
logging.debug(query)
|
||||
logging.debug("--------")
|
||||
res = conn.execute(query=query)
|
||||
if res is None or sum([r.get("sessions") for r in res]) == 0:
|
||||
return []
|
||||
|
||||
table_hh1, table_hh2, columns, this_period_resources, last_period_resources = __get_two_values(res, time_index='hh',
|
||||
name_index='names')
|
||||
|
||||
logging.debug(f'TB1\n{table_hh1}')
|
||||
logging.debug(f'TB2\n{table_hh2}')
|
||||
del res
|
||||
|
||||
memory_idx = columns.index('memory_used')
|
||||
cpu_idx = columns.index('cpu_used')
|
||||
|
||||
mem_newvalue = _mean_table_index(table_hh1, memory_idx)
|
||||
mem_oldvalue = _mean_table_index(table_hh2, memory_idx)
|
||||
cpu_newvalue = _mean_table_index(table_hh2, cpu_idx)
|
||||
cpu_oldvalue = _mean_table_index(table_hh2, cpu_idx)
|
||||
|
||||
cpu_ratio = 0
|
||||
mem_ratio = 0
|
||||
if mem_newvalue == 0:
|
||||
mem_newvalue = None
|
||||
mem_ratio = None
|
||||
if mem_oldvalue == 0:
|
||||
mem_oldvalue = None
|
||||
mem_ratio = None
|
||||
if cpu_newvalue == 0:
|
||||
cpu_newvalue = None
|
||||
cpu_ratio = None
|
||||
if cpu_oldvalue == 0:
|
||||
cpu_oldvalue = None
|
||||
cpu_ratio = None
|
||||
|
||||
output = list()
|
||||
if cpu_oldvalue is not None or cpu_newvalue is not None:
|
||||
output.append({'category': schemas.InsightCategories.RESOURCES,
|
||||
'name': 'cpu',
|
||||
'value': cpu_newvalue,
|
||||
'oldValue': cpu_oldvalue,
|
||||
'change': 100 * (
|
||||
cpu_newvalue - cpu_oldvalue) / cpu_oldvalue if cpu_ratio is not None else cpu_ratio,
|
||||
'isNew': True if cpu_newvalue is not None and cpu_oldvalue is None else False})
|
||||
if mem_oldvalue is not None or mem_newvalue is not None:
|
||||
output.append({'category': schemas.InsightCategories.RESOURCES,
|
||||
'name': 'memory',
|
||||
'value': mem_newvalue,
|
||||
'oldValue': mem_oldvalue,
|
||||
'change': 100 * (
|
||||
mem_newvalue - mem_oldvalue) / mem_oldvalue if mem_ratio is not None else mem_ratio,
|
||||
'isNew': True if mem_newvalue is not None and mem_oldvalue is None else False})
|
||||
return output
|
||||
|
||||
|
||||
def query_click_rage_by_period(project_id, start_time, end_time,
|
||||
filters: Optional[schemas.SessionsSearchPayloadSchema]):
|
||||
params = {
|
||||
"project_id": project_id, "startTimestamp": start_time, "endTimestamp": end_time,
|
||||
"step_size": metrics.__get_step_size(endTimestamp=end_time, startTimestamp=start_time, density=3)}
|
||||
params, sub_query = __filter_subquery(project_id=project_id, filters=filters, params=params)
|
||||
conditions = ["issue_type = 'click_rage'", "event_type = 'ISSUE'"]
|
||||
query = f"""WITH toUInt32(toStartOfInterval(toDateTime(%(startTimestamp)s/1000), INTERVAL %(step_size)s second)) AS start,
|
||||
toUInt32(toStartOfInterval(toDateTime(%(endTimestamp)s/1000), INTERVAL %(step_size)s second)) AS end
|
||||
SELECT T1.hh, countIf(T2.session_id != 0) as sessions, groupUniqArray(T2.url_host) as names, T2.url_path as sources
|
||||
FROM (SELECT arrayJoin(arrayMap(x -> toDateTime(x), range(start, end, %(step_size)s))) as hh) AS T1
|
||||
LEFT JOIN (SELECT session_id, url_host, url_path, toStartOfInterval(datetime, INTERVAL %(step_size)s second ) as dtime
|
||||
FROM experimental.events
|
||||
{sub_query}
|
||||
WHERE project_id = %(project_id)s
|
||||
AND datetime >= toDateTime(%(startTimestamp)s/1000)
|
||||
AND datetime < toDateTime(%(endTimestamp)s/1000)
|
||||
AND {" AND ".join(conditions)}) AS T2 ON T2.dtime = T1.hh
|
||||
GROUP BY T1.hh, T2.url_path
|
||||
ORDER BY T1.hh DESC;"""
|
||||
with ch_client.ClickHouseClient() as conn:
|
||||
query = conn.format(query=query, params=params)
|
||||
logging.debug("--------")
|
||||
logging.debug(query)
|
||||
logging.debug("--------")
|
||||
res = conn.execute(query=query)
|
||||
if res is None or sum([r.get("sessions") for r in res]) == 0:
|
||||
return []
|
||||
|
||||
table_hh1, table_hh2, columns, this_period_rage, last_period_rage = __get_two_values(res, time_index='hh',
|
||||
name_index='sources')
|
||||
del res
|
||||
|
||||
new_names = [x for x in this_period_rage if x not in last_period_rage]
|
||||
common_names = [x for x in this_period_rage if x not in new_names]
|
||||
|
||||
sessions_idx = columns.index('sessions')
|
||||
names_idx = columns.index('sources')
|
||||
|
||||
# raged_increment = dict()
|
||||
raged_values = dict()
|
||||
new_raged_values = dict()
|
||||
# TODO verify line (188) _tmp = table_hh2[:, sessions_idx][n].sum()
|
||||
for n in common_names:
|
||||
if n is None:
|
||||
continue
|
||||
_oldvalue = _sum_table_index(_table_where(table_hh2, names_idx, n), sessions_idx)
|
||||
_newvalue = _sum_table_index(_table_where(table_hh1, names_idx, n), sessions_idx)
|
||||
# raged_increment[n] = (_newvalue - _oldvalue) / _oldvalue
|
||||
raged_values[n] = _newvalue, _oldvalue, (_newvalue - _oldvalue) / _oldvalue
|
||||
|
||||
for n in new_names:
|
||||
if n is None:
|
||||
continue
|
||||
_newvalue = _sum_table_index(_table_where(table_hh1, names_idx, n), sessions_idx)
|
||||
new_raged_values[n] = _newvalue
|
||||
|
||||
total = _sum_table_index(table_hh1, sessions_idx)
|
||||
names, ratio = _table_slice(table_hh1, names_idx), _table_slice(table_hh1, sessions_idx)
|
||||
ratio = sorted(zip(names, ratio), key=lambda k: k[1], reverse=True)
|
||||
increase = sorted(raged_values.items(), key=lambda k: k[1][-1], reverse=True)
|
||||
names_ = set([k[0] for k in increase[:3] + ratio[:3]] + new_names[:3])
|
||||
|
||||
results = list()
|
||||
for n in names_:
|
||||
if n is None:
|
||||
continue
|
||||
data_ = {'category': schemas.InsightCategories.RAGE, 'name': n,
|
||||
'value': None, 'oldValue': None, 'ratio': None, 'change': None, 'isNew': True}
|
||||
for n_, v in ratio:
|
||||
if n == n_:
|
||||
if n in new_names:
|
||||
data_['value'] = new_raged_values[n]
|
||||
data_['ratio'] = 100 * v / total
|
||||
break
|
||||
for n_, v in increase:
|
||||
if n == n_:
|
||||
data_['value'] = v[0]
|
||||
data_['oldValue'] = v[1]
|
||||
data_['change'] = 100 * v[2]
|
||||
data_['isNew'] = False
|
||||
break
|
||||
results.append(data_)
|
||||
return results
|
||||
|
||||
|
||||
def fetch_selected(project_id, data: schemas.GetInsightsSchema):
|
||||
output = list()
|
||||
if data.metricValue is None or len(data.metricValue) == 0:
|
||||
data.metricValue = []
|
||||
for v in schemas.InsightCategories:
|
||||
data.metricValue.append(v)
|
||||
filters = None
|
||||
if len(data.series) > 0:
|
||||
filters = data.series[0].filter
|
||||
|
||||
if schemas.InsightCategories.ERRORS in data.metricValue:
|
||||
output += query_most_errors_by_period(project_id=project_id, start_time=data.startTimestamp,
|
||||
end_time=data.endTimestamp, filters=filters)
|
||||
if schemas.InsightCategories.NETWORK in data.metricValue:
|
||||
output += query_requests_by_period(project_id=project_id, start_time=data.startTimestamp,
|
||||
end_time=data.endTimestamp, filters=filters)
|
||||
if schemas.InsightCategories.RAGE in data.metricValue:
|
||||
output += query_click_rage_by_period(project_id=project_id, start_time=data.startTimestamp,
|
||||
end_time=data.endTimestamp, filters=filters)
|
||||
if schemas.InsightCategories.RESOURCES in data.metricValue:
|
||||
output += query_cpu_memory_by_period(project_id=project_id, start_time=data.startTimestamp,
|
||||
end_time=data.endTimestamp, filters=filters)
|
||||
return output
|
||||
|
|
@ -199,6 +199,12 @@ def create_member(tenant_id, user_id, data: schemas.CreateMemberSchema, backgrou
|
|||
role_id = data.roleId
|
||||
if role_id is None:
|
||||
role_id = roles.get_role_by_name(tenant_id=tenant_id, name="member").get("roleId")
|
||||
else:
|
||||
role = roles.get_role(tenant_id=tenant_id, role_id=role_id)
|
||||
if role is None:
|
||||
return {"errors": ["role not found"]}
|
||||
if role["name"].lower() == "owner" and role["protected"]:
|
||||
return {"errors": ["invalid role"]}
|
||||
invitation_token = __generate_invitation_token()
|
||||
user = get_deleted_user_by_email(email=data.email)
|
||||
if user is not None and user["tenantId"] == tenant_id:
|
||||
|
|
@ -333,7 +339,7 @@ def edit_member(user_id_to_update, tenant_id, changes: schemas.EditMemberSchema,
|
|||
if editor_id != user_id_to_update:
|
||||
admin = get_user_role(tenant_id=tenant_id, user_id=editor_id)
|
||||
if not admin["superAdmin"] and not admin["admin"]:
|
||||
return {"errors": ["unauthorized"]}
|
||||
return {"errors": ["unauthorized, you must have admin privileges"]}
|
||||
if admin["admin"] and user["superAdmin"]:
|
||||
return {"errors": ["only the owner can edit his own details"]}
|
||||
else:
|
||||
|
|
@ -343,10 +349,10 @@ def edit_member(user_id_to_update, tenant_id, changes: schemas.EditMemberSchema,
|
|||
return {"errors": ["cannot change your own admin privileges"]}
|
||||
if changes.roleId:
|
||||
if user["superAdmin"] and changes.roleId != user["roleId"]:
|
||||
changes.roleId = None
|
||||
return {"errors": ["owner's role cannot be changed"]}
|
||||
|
||||
if changes.roleId != user["roleId"]:
|
||||
elif user["superAdmin"]:
|
||||
changes.roleId = None
|
||||
elif changes.roleId != user["roleId"]:
|
||||
return {"errors": ["cannot change your own role"]}
|
||||
|
||||
if changes.name and len(changes.name) > 0:
|
||||
|
|
@ -357,6 +363,12 @@ def edit_member(user_id_to_update, tenant_id, changes: schemas.EditMemberSchema,
|
|||
|
||||
if changes.roleId is not None:
|
||||
_changes["roleId"] = changes.roleId
|
||||
role = roles.get_role(tenant_id=tenant_id, role_id=changes.roleId)
|
||||
if role is None:
|
||||
return {"errors": ["role not found"]}
|
||||
else:
|
||||
if role["name"].lower() == "owner" and role["protected"]:
|
||||
return {"errors": ["invalid role"]}
|
||||
|
||||
if len(_changes.keys()) > 0:
|
||||
update(tenant_id=tenant_id, user_id=user_id_to_update, changes=_changes, output=False)
|
||||
|
|
|
|||
|
|
@ -136,13 +136,13 @@ def add_edit(tenant_id, data: schemas.WebhookSchema, replace_none=None):
|
|||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=f"name already exists.")
|
||||
if data.webhook_id is not None:
|
||||
return update(tenant_id=tenant_id, webhook_id=data.webhook_id,
|
||||
changes={"endpoint": data.endpoint.unicode_string(),
|
||||
changes={"endpoint": data.endpoint,
|
||||
"authHeader": data.auth_header,
|
||||
"name": data.name},
|
||||
replace_none=replace_none)
|
||||
else:
|
||||
return add(tenant_id=tenant_id,
|
||||
endpoint=data.endpoint.unicode_string(),
|
||||
endpoint=data.endpoint,
|
||||
auth_header=data.auth_header,
|
||||
name=data.name,
|
||||
replace_none=replace_none)
|
||||
|
|
|
|||
|
|
@ -15,6 +15,9 @@ rm -rf ./chalicelib/core/collaboration_base.py
|
|||
rm -rf ./chalicelib/core/collaboration_msteams.py
|
||||
rm -rf ./chalicelib/core/collaboration_slack.py
|
||||
rm -rf ./chalicelib/core/countries.py
|
||||
rm -rf ./chalicelib/core/metrics.py
|
||||
rm -rf ./chalicelib/core/metrics_ch.py
|
||||
rm -rf ./chalicelib/core/custom_metrics.py
|
||||
rm -rf ./chalicelib/core/custom_metrics_predefined.py
|
||||
rm -rf ./chalicelib/core/dashboards.py
|
||||
rm -rf ./chalicelib/core/errors_favorite.py
|
||||
|
|
@ -97,4 +100,8 @@ rm -rf ./chalicelib/core/db_request_handler.py
|
|||
rm -rf ./chalicelib/utils/or_cache
|
||||
rm -rf ./routers/subs/health.py
|
||||
rm -rf ./chalicelib/core/spot.py
|
||||
rm -rf ./chalicelib/core/unprocessed_sessions.py
|
||||
rm -rf ./chalicelib/core/unprocessed_sessions.py
|
||||
rm -rf ./chalicelib/core/product_anaytics2.py
|
||||
rm -rf ./chalicelib/utils/ch_client.py
|
||||
rm -rf ./chalicelib/utils/ch_client_exp.py
|
||||
rm -rf ./routers/subs/product_anaytics.py
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ captcha_server=
|
|||
CH_COMPRESSION=true
|
||||
ch_host=
|
||||
ch_port=
|
||||
ch_port_http=
|
||||
ch_receive_timeout=10
|
||||
ch_timeout=30
|
||||
change_password_link=/reset-password?invitation=%s&&pass=%s
|
||||
|
|
@ -83,4 +84,5 @@ SITE_URL=
|
|||
sourcemaps_bucket=sourcemaps
|
||||
sourcemaps_reader=http://sourcemapreader-openreplay.app.svc.cluster.local:9000/sourcemaps/{}/sourcemaps
|
||||
TRACE_PERIOD=300
|
||||
TZ=UTC
|
||||
TZ=UTC
|
||||
EXP_CH_DRIVER=true
|
||||
|
|
@ -1,21 +1,22 @@
|
|||
# Keep this version to not have conflicts between requests and boto3
|
||||
urllib3==1.26.16
|
||||
requests==2.32.3
|
||||
boto3==1.35.60
|
||||
pyjwt==2.9.0
|
||||
boto3==1.35.76
|
||||
pyjwt==2.10.1
|
||||
psycopg2-binary==2.9.10
|
||||
psycopg[pool,binary]==3.2.3
|
||||
clickhouse-driver[lz4]==0.2.9
|
||||
clickhouse-connect==0.8.9
|
||||
elasticsearch==8.16.0
|
||||
jira==3.8.0
|
||||
cachetools==5.5.0
|
||||
|
||||
|
||||
|
||||
fastapi==0.115.5
|
||||
uvicorn[standard]==0.32.0
|
||||
fastapi==0.115.6
|
||||
uvicorn[standard]==0.32.1
|
||||
python-decouple==3.8
|
||||
pydantic[email]==2.9.2
|
||||
apscheduler==3.10.4
|
||||
pydantic[email]==2.10.3
|
||||
apscheduler==3.11.0
|
||||
|
||||
clickhouse-driver[lz4]==0.2.9
|
||||
azure-storage-blob==12.23.1
|
||||
|
|
@ -1,21 +1,22 @@
|
|||
# Keep this version to not have conflicts between requests and boto3
|
||||
urllib3==1.26.16
|
||||
requests==2.32.3
|
||||
boto3==1.35.60
|
||||
pyjwt==2.9.0
|
||||
boto3==1.35.76
|
||||
pyjwt==2.10.1
|
||||
psycopg2-binary==2.9.10
|
||||
psycopg[pool,binary]==3.2.3
|
||||
clickhouse-driver[lz4]==0.2.9
|
||||
clickhouse-connect==0.8.9
|
||||
elasticsearch==8.16.0
|
||||
jira==3.8.0
|
||||
cachetools==5.5.0
|
||||
|
||||
|
||||
|
||||
fastapi==0.115.5
|
||||
fastapi==0.115.6
|
||||
python-decouple==3.8
|
||||
pydantic[email]==2.9.2
|
||||
apscheduler==3.10.4
|
||||
pydantic[email]==2.10.3
|
||||
apscheduler==3.11.0
|
||||
|
||||
clickhouse-driver[lz4]==0.2.9
|
||||
redis==5.2.0
|
||||
azure-storage-blob==12.23.1
|
||||
|
|
|
|||
|
|
@ -1,24 +1,25 @@
|
|||
# Keep this version to not have conflicts between requests and boto3
|
||||
urllib3==1.26.16
|
||||
requests==2.32.3
|
||||
boto3==1.35.60
|
||||
pyjwt==2.9.0
|
||||
boto3==1.35.76
|
||||
pyjwt==2.10.1
|
||||
psycopg2-binary==2.9.10
|
||||
psycopg[pool,binary]==3.2.3
|
||||
clickhouse-driver[lz4]==0.2.9
|
||||
clickhouse-connect==0.8.9
|
||||
elasticsearch==8.16.0
|
||||
jira==3.8.0
|
||||
cachetools==5.5.0
|
||||
|
||||
|
||||
|
||||
fastapi==0.115.5
|
||||
uvicorn[standard]==0.32.0
|
||||
fastapi==0.115.6
|
||||
uvicorn[standard]==0.32.1
|
||||
gunicorn==23.0.0
|
||||
python-decouple==3.8
|
||||
pydantic[email]==2.9.2
|
||||
apscheduler==3.10.4
|
||||
pydantic[email]==2.10.3
|
||||
apscheduler==3.11.0
|
||||
|
||||
clickhouse-driver[lz4]==0.2.9
|
||||
# TODO: enable after xmlsec fix https://github.com/xmlsec/python-xmlsec/issues/252
|
||||
#--no-binary is used to avoid libxml2 library version incompatibilities between xmlsec and lxml
|
||||
python3-saml==1.16.0
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
from typing import Optional
|
||||
|
||||
from chalicelib.core import roles, traces, assist_records, sessions
|
||||
from chalicelib.core import sessions_insights, assist_stats
|
||||
from chalicelib.core import assist_stats
|
||||
from chalicelib.core import unlock, signals
|
||||
from chalicelib.utils import assist_helper
|
||||
|
||||
|
|
@ -132,13 +132,6 @@ def send_interactions(projectId: int, data: schemas.SignalsSchema = Body(...),
|
|||
return {'data': data}
|
||||
|
||||
|
||||
@app.post('/{projectId}/dashboard/insights', tags=["insights"])
|
||||
@app.post('/{projectId}/dashboard/insights', tags=["insights"])
|
||||
def sessions_search(projectId: int, data: schemas.GetInsightsSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {'data': sessions_insights.fetch_selected(data=data, project_id=projectId)}
|
||||
|
||||
|
||||
@public_app.get('/{project_id}/assist-stats/avg', tags=["assist-stats"])
|
||||
def get_assist_stats_avg(
|
||||
project_id: int,
|
||||
|
|
|
|||
|
|
@ -61,20 +61,6 @@ class SignalsSchema(BaseModel):
|
|||
data: dict = Field(default={})
|
||||
|
||||
|
||||
class InsightCategories(str, Enum):
|
||||
ERRORS = "errors"
|
||||
NETWORK = "network"
|
||||
RAGE = "rage"
|
||||
RESOURCES = "resources"
|
||||
|
||||
|
||||
class GetInsightsSchema(schemas._TimedSchema):
|
||||
startTimestamp: int = Field(default=TimeUTC.now(-7))
|
||||
endTimestamp: int = Field(default=TimeUTC.now())
|
||||
metricValue: List[InsightCategories] = Field(default=[])
|
||||
series: List[schemas.CardSeriesSchema] = Field(default=[])
|
||||
|
||||
|
||||
class CreateMemberSchema(schemas.CreateMemberSchema):
|
||||
roleId: Optional[int] = Field(default=None)
|
||||
|
||||
|
|
@ -150,15 +136,3 @@ class AssistRecordSearchPayloadSchema(schemas._PaginatedSchema, schemas._TimedSc
|
|||
user_id: Optional[int] = Field(default=None)
|
||||
query: Optional[str] = Field(default=None)
|
||||
order: Literal["asc", "desc"] = Field(default="desc")
|
||||
|
||||
|
||||
# TODO: move these to schema when Insights is supported on PG
|
||||
class CardInsights(schemas.CardInsights):
|
||||
metric_value: List[InsightCategories] = Field(default=[])
|
||||
|
||||
@model_validator(mode="after")
|
||||
def restrictions(self):
|
||||
return self
|
||||
|
||||
|
||||
CardSchema = ORUnion(Union[schemas.__cards_union_base, CardInsights], discriminator='metric_type')
|
||||
|
|
|
|||
|
|
@ -0,0 +1 @@
|
|||
CREATE OR REPLACE FUNCTION openreplay_version AS() -> 'v1.22.0-ee';
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
CREATE OR REPLACE FUNCTION openreplay_version AS() -> 'v1.21.0-ee';
|
||||
CREATE OR REPLACE FUNCTION openreplay_version AS() -> 'v1.22.0-ee';
|
||||
CREATE DATABASE IF NOT EXISTS experimental;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS experimental.autocomplete
|
||||
|
|
|
|||
32
ee/scripts/schema/db/init_dbs/postgresql/1.22.0/1.22.0.sql
Normal file
32
ee/scripts/schema/db/init_dbs/postgresql/1.22.0/1.22.0.sql
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
\set previous_version 'v1.21.0-ee'
|
||||
\set next_version 'v1.22.0-ee'
|
||||
SELECT openreplay_version() AS current_version,
|
||||
openreplay_version() = :'previous_version' AS valid_previous,
|
||||
openreplay_version() = :'next_version' AS is_next
|
||||
\gset
|
||||
|
||||
\if :valid_previous
|
||||
\echo valid previous DB version :'previous_version', starting DB upgrade to :'next_version'
|
||||
BEGIN;
|
||||
SELECT format($fn_def$
|
||||
CREATE OR REPLACE FUNCTION openreplay_version()
|
||||
RETURNS text AS
|
||||
$$
|
||||
SELECT '%1$s'
|
||||
$$ LANGUAGE sql IMMUTABLE;
|
||||
$fn_def$, :'next_version')
|
||||
\gexec
|
||||
|
||||
--
|
||||
|
||||
DELETE
|
||||
FROM public.metrics
|
||||
WHERE metrics.metric_type = 'insights';
|
||||
|
||||
COMMIT;
|
||||
|
||||
\elif :is_next
|
||||
\echo new version detected :'next_version', nothing to do
|
||||
\else
|
||||
\warn skipping DB upgrade of :'next_version', expected previous version :'previous_version', found :'current_version'
|
||||
\endif
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
\set or_version 'v1.21.0-ee'
|
||||
\set or_version 'v1.22.0-ee'
|
||||
SET client_min_messages TO NOTICE;
|
||||
\set ON_ERROR_STOP true
|
||||
SELECT EXISTS (SELECT 1
|
||||
|
|
|
|||
|
|
@ -0,0 +1 @@
|
|||
CREATE OR REPLACE FUNCTION openreplay_version AS() -> 'v1.21.0-ee';
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
\set previous_version 'v1.22.0-ee'
|
||||
\set next_version 'v1.21.0-ee'
|
||||
SELECT openreplay_version() AS current_version,
|
||||
openreplay_version() = :'previous_version' AS valid_previous,
|
||||
openreplay_version() = :'next_version' AS is_next
|
||||
\gset
|
||||
|
||||
\if :valid_previous
|
||||
\echo valid previous DB version :'previous_version', starting DB downgrade to :'next_version'
|
||||
BEGIN;
|
||||
SELECT format($fn_def$
|
||||
CREATE OR REPLACE FUNCTION openreplay_version()
|
||||
RETURNS text AS
|
||||
$$
|
||||
SELECT '%1$s'
|
||||
$$ LANGUAGE sql IMMUTABLE;
|
||||
$fn_def$, :'next_version')
|
||||
\gexec
|
||||
|
||||
COMMIT;
|
||||
|
||||
\elif :is_next
|
||||
\echo new version detected :'next_version', nothing to do
|
||||
\else
|
||||
\warn skipping DB downgrade of :'next_version', expected previous version :'previous_version', found :'current_version'
|
||||
\endif
|
||||
|
|
@ -72,7 +72,7 @@ function Player(props: IProps) {
|
|||
|
||||
React.useEffect(() => {
|
||||
playerContext.player.scale();
|
||||
}, [props.bottomBlock, props.fullscreen, playerContext.player, activeTab, fullView]);
|
||||
}, [bottomBlock, props.fullscreen, playerContext.player, activeTab, fullView]);
|
||||
|
||||
React.useEffect(() => {
|
||||
playerContext.player.addFullscreenBoundary(props.fullscreen || fullView);
|
||||
|
|
|
|||
|
|
@ -115,7 +115,8 @@ export const Resource = (resource: IResource) => ({
|
|||
|
||||
|
||||
export function getResourceFromResourceTiming(msg: ResourceTiming, sessStart: number) {
|
||||
const success = msg.duration > 0 // might be duration=0 when cached
|
||||
// duration might be duration=0 when cached
|
||||
const success = msg.duration > 0 || msg.encodedBodySize > 0 || msg.transferredSize > 0
|
||||
const type = getResourceType(msg.initiator, msg.url)
|
||||
return Resource({
|
||||
...msg,
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ export function validateIP(value) {
|
|||
|
||||
export function validateURL(value) {
|
||||
if (typeof value !== 'string') return false;
|
||||
const urlRegex = /^(http|https):\/\/(?:www\.)?[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,}(\/\S*)?$/i;
|
||||
const urlRegex = /^(http|https):\/\/(?:www\.)?[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,}(:\d+)?(\/\S*)?$/i;
|
||||
const ipRegex = /^(http|https):\/\/(?:localhost|(\d{1,3}\.){3}\d{1,3})(:\d+)?(\/\S*)?$/i;
|
||||
return urlRegex.test(value) || ipRegex.test(value);
|
||||
}
|
||||
|
|
@ -89,4 +89,4 @@ export const validatePassword = (password) => {
|
|||
const regex =
|
||||
/^(?=.*[a-z])(?=.*[A-Z])(?=.*\d)(?=.*[!@#$%^&*()_+\-=[\]{};':"\\|,.<>/?])[A-Za-z\d!@#$%^&*()_+\-=[\]{};':"\\|,.<>/?]{8,}$/;
|
||||
return regex.test(password);
|
||||
};
|
||||
};
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ S3_SECRET="${COMMON_S3_SECRET}"
|
|||
SITE_URL="${COMMON_PROTOCOL}://${COMMON_DOMAIN_NAME}"
|
||||
ch_host="clickhouse"
|
||||
ch_port="9000"
|
||||
ch_port_http="8123"
|
||||
ch_username="default"
|
||||
js_cache_bucket=sessions-assets
|
||||
jwt_secret="${COMMON_JWT_SECRET}"
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ S3_SECRET="${COMMON_S3_SECRET}"
|
|||
SITE_URL="${COMMON_PROTOCOL}://${COMMON_DOMAIN_NAME}"
|
||||
ch_host="clickhouse"
|
||||
ch_port="9000"
|
||||
ch_port_http="8123"
|
||||
ch_username="default"
|
||||
js_cache_bucket=sessions-assets
|
||||
jwt_secret="${COMMON_JWT_SECRET}"
|
||||
|
|
|
|||
|
|
@ -55,6 +55,8 @@ spec:
|
|||
value: "{{ .Values.global.clickhouse.chHost }}"
|
||||
- name: ch_port
|
||||
value: "{{ .Values.global.clickhouse.service.webPort }}"
|
||||
- name: ch_port_http
|
||||
value: "{{ .Values.global.clickhouse.service.dataPort }}"
|
||||
- name: ch_username
|
||||
value: '{{ .Values.global.clickhouse.username }}'
|
||||
- name: ch_password
|
||||
|
|
|
|||
|
|
@ -54,6 +54,8 @@ spec:
|
|||
value: "{{ .Values.global.clickhouse.chHost }}"
|
||||
- name: ch_port
|
||||
value: "{{ .Values.global.clickhouse.service.webPort }}"
|
||||
- name: ch_port_http
|
||||
value: "{{ .Values.global.clickhouse.service.dataPort }}"
|
||||
- name: sourcemaps_reader
|
||||
value: "http://sourcemapreader-openreplay.{{.Release.Namespace}}.{{.Values.global.clusterDomain}}:9000/%s/sourcemaps"
|
||||
- name: ASSIST_URL
|
||||
|
|
|
|||
|
|
@ -89,6 +89,7 @@ apiCrons:
|
|||
chalice:
|
||||
env:
|
||||
ch_port: 9000
|
||||
ch_port_http: 8123
|
||||
captcha_server: ''
|
||||
captcha_key: ''
|
||||
async_Token: ''
|
||||
|
|
|
|||
|
|
@ -213,6 +213,8 @@ spec:
|
|||
value: "{{.Values.global.clickhouse.chHost}}"
|
||||
- name: CH_PORT
|
||||
value: "{{.Values.global.clickhouse.service.webPort}}"
|
||||
- name: CH_PORT_HTTP
|
||||
value: "{{.Values.global.clickhouse.service.dataPort}}"
|
||||
- name: CH_USERNAME
|
||||
value: "{{.Values.global.clickhouse.username}}"
|
||||
- name: CH_PASSWORD
|
||||
|
|
@ -458,6 +460,8 @@ spec:
|
|||
value: "{{.Values.global.clickhouse.chHost}}"
|
||||
- name: CH_PORT
|
||||
value: "{{.Values.global.clickhouse.service.webPort}}"
|
||||
- name: CH_PORT_HTTP
|
||||
value: "{{.Values.global.clickhouse.service.dataPort}}"
|
||||
- name: CH_USERNAME
|
||||
value: "{{.Values.global.clickhouse.username}}"
|
||||
- name: CH_PASSWORD
|
||||
|
|
|
|||
32
scripts/schema/db/init_dbs/postgresql/1.22.0/1.22.0.sql
Normal file
32
scripts/schema/db/init_dbs/postgresql/1.22.0/1.22.0.sql
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
\set previous_version 'v1.21.0'
|
||||
\set next_version 'v1.22.0'
|
||||
SELECT openreplay_version() AS current_version,
|
||||
openreplay_version() = :'previous_version' AS valid_previous,
|
||||
openreplay_version() = :'next_version' AS is_next
|
||||
\gset
|
||||
|
||||
\if :valid_previous
|
||||
\echo valid previous DB version :'previous_version', starting DB upgrade to :'next_version'
|
||||
BEGIN;
|
||||
SELECT format($fn_def$
|
||||
CREATE OR REPLACE FUNCTION openreplay_version()
|
||||
RETURNS text AS
|
||||
$$
|
||||
SELECT '%1$s'
|
||||
$$ LANGUAGE sql IMMUTABLE;
|
||||
$fn_def$, :'next_version')
|
||||
\gexec
|
||||
|
||||
--
|
||||
|
||||
DELETE
|
||||
FROM public.metrics
|
||||
WHERE metrics.metric_type = 'insights';
|
||||
|
||||
COMMIT;
|
||||
|
||||
\elif :is_next
|
||||
\echo new version detected :'next_version', nothing to do
|
||||
\else
|
||||
\warn skipping DB upgrade of :'next_version', expected previous version :'previous_version', found :'current_version'
|
||||
\endif
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
\set or_version 'v1.21.0'
|
||||
\set or_version 'v1.22.0'
|
||||
SET client_min_messages TO NOTICE;
|
||||
\set ON_ERROR_STOP true
|
||||
SELECT EXISTS (SELECT 1
|
||||
|
|
|
|||
|
|
@ -91,7 +91,7 @@ dependencies {
|
|||
//noinspection GradleDynamicVersion
|
||||
implementation("com.facebook.react:react-native:0.20.1")
|
||||
implementation("org.jetbrains.kotlin:kotlin-stdlib:$kotlin_version")
|
||||
implementation("com.github.openreplay:android-tracker:v1.1.1")
|
||||
implementation("com.github.openreplay:android-tracker:v1.1.2")
|
||||
}
|
||||
|
||||
//allprojects {
|
||||
|
|
|
|||
Binary file not shown.
|
|
@ -1,4 +1,12 @@
|
|||
# 15.0.1
|
||||
## 15.0.3
|
||||
|
||||
- fixing `failuresOnly` option for network
|
||||
|
||||
## 15.0.2
|
||||
|
||||
- fixing crossdomain access check
|
||||
|
||||
## 15.0.1
|
||||
|
||||
- update dependencies
|
||||
- fix for cjs build process
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
{
|
||||
"name": "@openreplay/tracker",
|
||||
"description": "The OpenReplay tracker main package",
|
||||
"version": "15.0.1",
|
||||
"version": "15.0.3",
|
||||
"keywords": [
|
||||
"logging",
|
||||
"replay"
|
||||
|
|
|
|||
|
|
@ -97,7 +97,7 @@ function processOptions(obj: any): obj is Options {
|
|||
|
||||
const canAccessTop = () => {
|
||||
try {
|
||||
return Boolean(window.top)
|
||||
return Boolean(window.top?.document)
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
|
|
@ -116,7 +116,7 @@ export default class API {
|
|||
}
|
||||
if (
|
||||
(window as any).__OPENREPLAY__ ||
|
||||
(!this.crossdomainMode && inIframe() && canAccessTop() && (window.top as any)?.__OPENREPLAY__)
|
||||
(!this.crossdomainMode && inIframe() && canAccessTop() && (window.top as any).__OPENREPLAY__)
|
||||
) {
|
||||
console.error('OpenReplay: one tracker instance has been initialised already')
|
||||
return
|
||||
|
|
|
|||
|
|
@ -136,6 +136,9 @@ export default function (app: App, opts: Partial<Options> = {}) {
|
|||
setSessionTokenHeader,
|
||||
sanitize,
|
||||
(message) => {
|
||||
if (options.failuresOnly && message.status < 400) {
|
||||
return
|
||||
}
|
||||
app.send(
|
||||
NetworkRequest(
|
||||
message.requestType,
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue