Merge branch 'dev' into live-se-red
This commit is contained in:
commit
6078872f0e
33 changed files with 636 additions and 320 deletions
|
|
@ -4,9 +4,10 @@ from decouple import config
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
from . import errors as errors_legacy
|
||||
|
||||
if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
|
||||
logger.info(">>> Using experimental error search")
|
||||
from . import errors as errors_legacy
|
||||
from . import errors_ch as errors
|
||||
else:
|
||||
from . import errors
|
||||
|
|
|
|||
|
|
@ -1,19 +1,23 @@
|
|||
import json
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import sourcemaps
|
||||
from chalicelib.core.errors.modules import sessions
|
||||
from chalicelib.core.sourcemaps import sourcemaps
|
||||
from chalicelib.core.sessions import sessions_search
|
||||
from chalicelib.utils import pg_client, helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.utils.metrics_helper import __get_step_size
|
||||
from typing import Optional, List, Union, Literal
|
||||
|
||||
|
||||
def get(error_id, family=False):
|
||||
def get(error_id, family=False) -> dict | List[dict]:
|
||||
if family:
|
||||
return get_batch([error_id])
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
"SELECT * FROM public.errors WHERE error_id = %(error_id)s LIMIT 1;",
|
||||
"""SELECT *
|
||||
FROM public.errors
|
||||
WHERE error_id = %(error_id)s
|
||||
LIMIT 1;""",
|
||||
{"error_id": error_id})
|
||||
cur.execute(query=query)
|
||||
result = cur.fetchone()
|
||||
|
|
@ -47,9 +51,10 @@ def get_batch(error_ids):
|
|||
return helper.list_to_camel_case(errors)
|
||||
|
||||
|
||||
def __get_basic_constraints(platform=None, time_constraint=True, startTime_arg_name="startDate",
|
||||
endTime_arg_name="endDate", chart=False, step_size_name="step_size",
|
||||
project_key="project_id"):
|
||||
def __get_basic_constraints(platform: Optional[schemas.PlatformType] = None, time_constraint: bool = True,
|
||||
startTime_arg_name: str = "startDate", endTime_arg_name: str = "endDate",
|
||||
chart: bool = False, step_size_name: str = "step_size",
|
||||
project_key: Optional[str] = "project_id"):
|
||||
if project_key is None:
|
||||
ch_sub_query = []
|
||||
else:
|
||||
|
|
@ -102,8 +107,8 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id):
|
|||
data.endTimestamp = TimeUTC.now(1)
|
||||
if len(data.events) > 0 or len(data.filters) > 0:
|
||||
print("-- searching for sessions before errors")
|
||||
statuses = sessions.search_sessions(data=data, project_id=project_id, user_id=user_id, errors_only=True,
|
||||
error_status=data.status)
|
||||
statuses = sessions_search.search_sessions(data=data, project_id=project_id, user_id=user_id, errors_only=True,
|
||||
error_status=data.status)
|
||||
if len(statuses) == 0:
|
||||
return empty_response
|
||||
error_ids = [e["errorId"] for e in statuses]
|
||||
|
|
@ -199,11 +204,7 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id):
|
|||
else:
|
||||
if len(statuses) == 0:
|
||||
query = cur.mogrify(
|
||||
"""SELECT error_id,
|
||||
COALESCE((SELECT TRUE
|
||||
FROM public.user_viewed_errors AS ve
|
||||
WHERE errors.error_id = ve.error_id
|
||||
AND ve.user_id = %(user_id)s LIMIT 1), FALSE) AS viewed
|
||||
"""SELECT error_id
|
||||
FROM public.errors
|
||||
WHERE project_id = %(project_id)s AND error_id IN %(error_ids)s;""",
|
||||
{"project_id": project_id, "error_ids": tuple([r["error_id"] for r in rows]),
|
||||
|
|
@ -216,10 +217,6 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id):
|
|||
|
||||
for r in rows:
|
||||
r.pop("full_count")
|
||||
if r["error_id"] in statuses:
|
||||
r["viewed"] = statuses[r["error_id"]]["viewed"]
|
||||
else:
|
||||
r["viewed"] = False
|
||||
|
||||
return {
|
||||
'total': total,
|
||||
|
|
@ -314,41 +311,3 @@ def get_sessions(start_date, end_date, project_id, user_id, error_id):
|
|||
'total': total,
|
||||
'sessions': helper.list_to_camel_case(sessions_list)
|
||||
}
|
||||
|
||||
|
||||
ACTION_STATE = {
|
||||
"unsolve": 'unresolved',
|
||||
"solve": 'resolved',
|
||||
"ignore": 'ignored'
|
||||
}
|
||||
|
||||
|
||||
def change_state(project_id, user_id, error_id, action):
|
||||
errors = get(error_id, family=True)
|
||||
print(len(errors))
|
||||
status = ACTION_STATE.get(action)
|
||||
if errors is None or len(errors) == 0:
|
||||
return {"errors": ["error not found"]}
|
||||
if errors[0]["status"] == status:
|
||||
return {"errors": [f"error is already {status}"]}
|
||||
|
||||
if errors[0]["status"] == ACTION_STATE["solve"] and status == ACTION_STATE["ignore"]:
|
||||
return {"errors": [f"state transition not permitted {errors[0]['status']} -> {status}"]}
|
||||
|
||||
params = {
|
||||
"userId": user_id,
|
||||
"error_ids": tuple([e["errorId"] for e in errors]),
|
||||
"status": status}
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
"""UPDATE public.errors
|
||||
SET status = %(status)s
|
||||
WHERE error_id IN %(error_ids)s
|
||||
RETURNING status""",
|
||||
params)
|
||||
cur.execute(query=query)
|
||||
row = cur.fetchone()
|
||||
if row is not None:
|
||||
for e in errors:
|
||||
e["status"] = row["status"]
|
||||
return {"data": errors}
|
||||
|
|
|
|||
|
|
@ -62,8 +62,6 @@ def get_batch(error_ids):
|
|||
return errors_legacy.get_batch(error_ids=error_ids)
|
||||
|
||||
|
||||
|
||||
|
||||
def __get_basic_constraints(platform=None, time_constraint=True, startTime_arg_name="startDate",
|
||||
endTime_arg_name="endDate", type_condition=True, project_key="project_id", table_name=None):
|
||||
ch_sub_query = [f"{project_key} =toUInt16(%(project_id)s)"]
|
||||
|
|
@ -341,7 +339,7 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id):
|
|||
|
||||
main_ch_query = f"""\
|
||||
SELECT details.error_id AS error_id,
|
||||
name, message, users, total, viewed,
|
||||
name, message, users, total,
|
||||
sessions, last_occurrence, first_occurrence, chart
|
||||
FROM (SELECT error_id,
|
||||
name,
|
||||
|
|
@ -350,13 +348,8 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id):
|
|||
COUNT(DISTINCT events.session_id) AS sessions,
|
||||
MAX(datetime) AS max_datetime,
|
||||
MIN(datetime) AS min_datetime,
|
||||
COUNT(DISTINCT events.error_id) OVER() AS total,
|
||||
any(isNotNull(viewed_error_id)) AS viewed
|
||||
COUNT(DISTINCT events.error_id) OVER() AS total
|
||||
FROM {MAIN_EVENTS_TABLE} AS events
|
||||
LEFT JOIN (SELECT error_id AS viewed_error_id
|
||||
FROM {exp_ch_helper.get_user_viewed_errors_table()}
|
||||
WHERE project_id=%(project_id)s
|
||||
AND user_id=%(userId)s) AS viewed_errors ON(events.error_id=viewed_errors.viewed_error_id)
|
||||
INNER JOIN (SELECT session_id, coalesce(user_id,toString(user_uuid)) AS user_id
|
||||
FROM {MAIN_SESSIONS_TABLE} AS s
|
||||
{subquery_part}
|
||||
|
|
@ -413,48 +406,3 @@ def get_sessions(start_date, end_date, project_id, user_id, error_id):
|
|||
project_id=project_id,
|
||||
user_id=user_id,
|
||||
error_id=error_id)
|
||||
|
||||
|
||||
def change_state(project_id, user_id, error_id, action):
|
||||
return errors_legacy.change_state(project_id=project_id, user_id=user_id, error_id=error_id, action=action)
|
||||
|
||||
|
||||
MAX_RANK = 2
|
||||
|
||||
|
||||
def __status_rank(status):
|
||||
return {
|
||||
'unresolved': MAX_RANK - 2,
|
||||
'ignored': MAX_RANK - 1,
|
||||
'resolved': MAX_RANK
|
||||
}.get(status)
|
||||
|
||||
|
||||
def merge(error_ids):
|
||||
error_ids = list(set(error_ids))
|
||||
errors = get_batch(error_ids)
|
||||
if len(error_ids) <= 1 or len(error_ids) > len(errors):
|
||||
return {"errors": ["invalid list of ids"]}
|
||||
error_ids = [e["errorId"] for e in errors]
|
||||
parent_error_id = error_ids[0]
|
||||
status = "unresolved"
|
||||
for e in errors:
|
||||
if __status_rank(status) < __status_rank(e["status"]):
|
||||
status = e["status"]
|
||||
if __status_rank(status) == MAX_RANK:
|
||||
break
|
||||
params = {
|
||||
"error_ids": tuple(error_ids),
|
||||
"parent_error_id": parent_error_id,
|
||||
"status": status
|
||||
}
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
"""UPDATE public.errors
|
||||
SET parent_error_id = %(parent_error_id)s, status = %(status)s
|
||||
WHERE error_id IN %(error_ids)s OR parent_error_id IN %(error_ids)s;""",
|
||||
params)
|
||||
cur.execute(query=query)
|
||||
# row = cur.fetchone()
|
||||
|
||||
return {"data": "success"}
|
||||
|
|
|
|||
|
|
@ -1,48 +0,0 @@
|
|||
from chalicelib.utils import pg_client
|
||||
|
||||
|
||||
def add_favorite_error(project_id, user_id, error_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(f"""INSERT INTO public.user_favorite_errors(user_id, error_id)
|
||||
VALUES (%(userId)s,%(error_id)s);""",
|
||||
{"userId": user_id, "error_id": error_id})
|
||||
)
|
||||
return {"errorId": error_id, "favorite": True}
|
||||
|
||||
|
||||
def remove_favorite_error(project_id, user_id, error_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(f"""DELETE FROM public.user_favorite_errors
|
||||
WHERE
|
||||
user_id = %(userId)s
|
||||
AND error_id = %(error_id)s;""",
|
||||
{"userId": user_id, "error_id": error_id})
|
||||
)
|
||||
return {"errorId": error_id, "favorite": False}
|
||||
|
||||
|
||||
def favorite_error(project_id, user_id, error_id):
|
||||
exists, favorite = error_exists_and_favorite(user_id=user_id, error_id=error_id)
|
||||
if not exists:
|
||||
return {"errors": ["cannot bookmark non-rehydrated errors"]}
|
||||
if favorite:
|
||||
return remove_favorite_error(project_id=project_id, user_id=user_id, error_id=error_id)
|
||||
return add_favorite_error(project_id=project_id, user_id=user_id, error_id=error_id)
|
||||
|
||||
|
||||
def error_exists_and_favorite(user_id, error_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(
|
||||
"""SELECT errors.error_id AS exists, ufe.error_id AS favorite
|
||||
FROM public.errors
|
||||
LEFT JOIN (SELECT error_id FROM public.user_favorite_errors WHERE user_id = %(userId)s) AS ufe USING (error_id)
|
||||
WHERE error_id = %(error_id)s;""",
|
||||
{"userId": user_id, "error_id": error_id})
|
||||
)
|
||||
r = cur.fetchone()
|
||||
if r is None:
|
||||
return False, False
|
||||
return True, r.get("favorite") is not None
|
||||
|
|
@ -1,37 +0,0 @@
|
|||
from chalicelib.utils import pg_client
|
||||
|
||||
|
||||
def add_viewed_error(project_id, user_id, error_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""INSERT INTO public.user_viewed_errors(user_id, error_id)
|
||||
VALUES (%(userId)s,%(error_id)s);""",
|
||||
{"userId": user_id, "error_id": error_id})
|
||||
)
|
||||
|
||||
|
||||
def viewed_error_exists(user_id, error_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
"""SELECT
|
||||
errors.error_id AS hydrated,
|
||||
COALESCE((SELECT TRUE
|
||||
FROM public.user_viewed_errors AS ve
|
||||
WHERE ve.error_id = %(error_id)s
|
||||
AND ve.user_id = %(userId)s LIMIT 1), FALSE) AS viewed
|
||||
FROM public.errors
|
||||
WHERE error_id = %(error_id)s""",
|
||||
{"userId": user_id, "error_id": error_id})
|
||||
cur.execute(
|
||||
query=query
|
||||
)
|
||||
r = cur.fetchone()
|
||||
if r:
|
||||
return r.get("viewed")
|
||||
return True
|
||||
|
||||
|
||||
def viewed_error(project_id, user_id, error_id):
|
||||
if viewed_error_exists(user_id=user_id, error_id=error_id):
|
||||
return None
|
||||
return add_viewed_error(project_id=project_id, user_id=user_id, error_id=error_id)
|
||||
|
|
@ -403,7 +403,9 @@ WITH sub_sessions AS (SELECT session_id {sub_sessions_extra_projection}
|
|||
{"UNION ALL".join(projection_query)};"""
|
||||
params = {"project_id": project_id, "startTimestamp": data.startTimestamp,
|
||||
"endTimestamp": data.endTimestamp, "density": data.density,
|
||||
"eventThresholdNumberInGroup": 4 if data.hide_excess else 8,
|
||||
# This is ignored because UI will take care of it
|
||||
# "eventThresholdNumberInGroup": 4 if data.hide_excess else 8,
|
||||
"eventThresholdNumberInGroup": 8,
|
||||
**extra_values}
|
||||
query = cur.mogrify(pg_query, params)
|
||||
_now = time()
|
||||
|
|
|
|||
|
|
@ -362,7 +362,9 @@ def path_analysis(project_id: int, data: schemas.CardPathAnalysis):
|
|||
_now = time()
|
||||
params = {"project_id": project_id, "startTimestamp": data.startTimestamp,
|
||||
"endTimestamp": data.endTimestamp, "density": data.density,
|
||||
"eventThresholdNumberInGroup": 4 if data.hide_excess else 8,
|
||||
# This is ignored because UI will take care of it
|
||||
# "eventThresholdNumberInGroup": 4 if data.hide_excess else 8,
|
||||
"eventThresholdNumberInGroup": 8,
|
||||
**extra_values}
|
||||
|
||||
ch_query1 = f"""\
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ import schemas
|
|||
from chalicelib.core import scope
|
||||
from chalicelib.core import assist, signup, feature_flags
|
||||
from chalicelib.core.metrics import heatmaps
|
||||
from chalicelib.core.errors import errors_favorite, errors_viewed, errors, errors_details
|
||||
from chalicelib.core.errors import errors, errors_details
|
||||
from chalicelib.core.sessions import sessions, sessions_notes, sessions_replay, sessions_favorite, sessions_viewed, \
|
||||
sessions_assignments, unprocessed_sessions, sessions_search
|
||||
from chalicelib.core import tenants, users, projects, license
|
||||
|
|
@ -329,13 +329,10 @@ def get_error_trace(projectId: int, sessionId: int, errorId: str,
|
|||
|
||||
|
||||
@app.get('/{projectId}/errors/{errorId}', tags=['errors'])
|
||||
def errors_get_details(projectId: int, errorId: str, background_tasks: BackgroundTasks, density24: int = 24,
|
||||
density30: int = 30, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
def errors_get_details(projectId: int, errorId: str, density24: int = 24, density30: int = 30,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = errors_details.get_details(project_id=projectId, user_id=context.user_id, error_id=errorId,
|
||||
**{"density24": density24, "density30": density30})
|
||||
if data.get("data") is not None:
|
||||
background_tasks.add_task(errors_viewed.viewed_error, project_id=projectId, user_id=context.user_id,
|
||||
error_id=errorId)
|
||||
return data
|
||||
|
||||
|
||||
|
|
@ -350,22 +347,15 @@ def errors_get_details_sourcemaps(projectId: int, errorId: str,
|
|||
}
|
||||
|
||||
|
||||
@app.get('/{projectId}/errors/{errorId}/{action}', tags=["errors"])
|
||||
def add_remove_favorite_error(projectId: int, errorId: str, action: str, startDate: int = TimeUTC.now(-7),
|
||||
endDate: int = TimeUTC.now(),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
if action == "favorite":
|
||||
return errors_favorite.favorite_error(project_id=projectId, user_id=context.user_id, error_id=errorId)
|
||||
elif action == "sessions":
|
||||
start_date = startDate
|
||||
end_date = endDate
|
||||
return {
|
||||
"data": errors.get_sessions(project_id=projectId, user_id=context.user_id, error_id=errorId,
|
||||
start_date=start_date, end_date=end_date)}
|
||||
elif action in list(errors.ACTION_STATE.keys()):
|
||||
return errors.change_state(project_id=projectId, user_id=context.user_id, error_id=errorId, action=action)
|
||||
else:
|
||||
return {"errors": ["undefined action"]}
|
||||
@app.get('/{projectId}/errors/{errorId}/sessions', tags=["errors"])
|
||||
def get_errors_sessions(projectId: int, errorId: str, action: str, startDate: int = TimeUTC.now(-7),
|
||||
endDate: int = TimeUTC.now(),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
start_date = startDate
|
||||
end_date = endDate
|
||||
return {
|
||||
"data": errors.get_sessions(project_id=projectId, user_id=context.user_id, error_id=errorId,
|
||||
start_date=start_date, end_date=end_date)}
|
||||
|
||||
|
||||
@app.get('/{projectId}/assist/sessions/{sessionId}', tags=["assist"])
|
||||
|
|
|
|||
2
ee/api/.gitignore
vendored
2
ee/api/.gitignore
vendored
|
|
@ -291,6 +291,4 @@ Pipfile.lock
|
|||
/chalicelib/core/errors/modules/*
|
||||
/chalicelib/core/errors/errors.py
|
||||
/chalicelib/core/errors/errors_ch.py
|
||||
/chalicelib/core/errors/errors_favorite.py
|
||||
/chalicelib/core/errors/errors_viewed.py
|
||||
/chalicelib/core/errors/errors_details.py
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ requests = "==2.32.3"
|
|||
boto3 = "==1.35.86"
|
||||
pyjwt = "==2.10.1"
|
||||
psycopg2-binary = "==2.9.10"
|
||||
psycopg = {extras = ["binary", "pool"], version = "==3.2.3"}
|
||||
psycopg = {extras = ["pool", "binary"], version = "==3.2.3"}
|
||||
clickhouse-driver = {extras = ["lz4"], version = "==0.2.9"}
|
||||
clickhouse-connect = "==0.8.11"
|
||||
elasticsearch = "==8.17.0"
|
||||
|
|
|
|||
|
|
@ -1,16 +0,0 @@
|
|||
import logging
|
||||
|
||||
from chalicelib.core.errors.errors_viewed import *
|
||||
from chalicelib.utils import ch_client, exp_ch_helper
|
||||
|
||||
_add_viewed_error = add_viewed_error
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def add_viewed_error(project_id, user_id, error_id):
|
||||
_add_viewed_error(project_id=project_id, user_id=user_id, error_id=error_id)
|
||||
with ch_client.ClickHouseClient() as cur:
|
||||
query = f"""INSERT INTO {exp_ch_helper.get_user_viewed_errors_table()}(project_id,user_id, error_id)
|
||||
VALUES (%(project_id)s,%(userId)s,%(error_id)s);"""
|
||||
params = {"userId": user_id, "error_id": error_id, "project_id": project_id}
|
||||
cur.execute(query=query, params=params)
|
||||
|
|
@ -108,9 +108,7 @@ rm -rf ./chalicelib/core/alerts/alerts_processor.py
|
|||
rm -rf ./chalicelib/core/alerts/alerts_processor_ch.py
|
||||
rm -rf ./chalicelib/core/alerts/alerts_listener.py
|
||||
rm -rf ./chalicelib/core/alerts/modules/helpers.py
|
||||
rm -rf /chalicelib/core/errors/modules
|
||||
rm -rf /chalicelib/core/errors/errors.py
|
||||
rm -rf /chalicelib/core/errors/errors_ch.py
|
||||
rm -rf /chalicelib/core/errors/errors_favorite.py
|
||||
rm -rf /chalicelib/core/errors/errors_viewed.py
|
||||
rm -rf /chalicelib/core/errors/errors_details.py
|
||||
rm -rf ./chalicelib/core/errors/modules
|
||||
rm -rf ./chalicelib/core/errors/errors.py
|
||||
rm -rf ./chalicelib/core/errors/errors_ch.py
|
||||
rm -rf ./chalicelib/core/errors/errors_details.py
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ from starlette.responses import RedirectResponse, FileResponse, JSONResponse, Re
|
|||
import schemas
|
||||
from chalicelib.core import scope
|
||||
from chalicelib.core import assist, signup, feature_flags
|
||||
from chalicelib.core.errors import errors, errors_viewed, errors_favorite
|
||||
from chalicelib.core.errors import errors
|
||||
from chalicelib.core.metrics import heatmaps
|
||||
from chalicelib.core.sessions import sessions, sessions_notes, sessions_replay, sessions_favorite, sessions_assignments, \
|
||||
sessions_viewed, unprocessed_sessions
|
||||
|
|
@ -349,13 +349,10 @@ def get_error_trace(projectId: int, sessionId: int, errorId: str,
|
|||
|
||||
@app.get('/{projectId}/errors/{errorId}', tags=['errors'],
|
||||
dependencies=[OR_scope(Permissions.DEV_TOOLS, ServicePermissions.DEV_TOOLS)])
|
||||
def errors_get_details(projectId: int, errorId: str, background_tasks: BackgroundTasks, density24: int = 24,
|
||||
density30: int = 30, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
def errors_get_details(projectId: int, errorId: str, density24: int = 24, density30: int = 30,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = errors.get_details(project_id=projectId, user_id=context.user_id, error_id=errorId,
|
||||
**{"density24": density24, "density30": density30})
|
||||
if data.get("data") is not None:
|
||||
background_tasks.add_task(errors_viewed.viewed_error, project_id=projectId, user_id=context.user_id,
|
||||
error_id=errorId)
|
||||
return data
|
||||
|
||||
|
||||
|
|
@ -371,22 +368,15 @@ def errors_get_details_sourcemaps(projectId: int, errorId: str,
|
|||
}
|
||||
|
||||
|
||||
@app.get('/{projectId}/errors/{errorId}/{action}', tags=["errors"], dependencies=[OR_scope(Permissions.DEV_TOOLS)])
|
||||
def add_remove_favorite_error(projectId: int, errorId: str, action: str, startDate: int = TimeUTC.now(-7),
|
||||
endDate: int = TimeUTC.now(),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
if action == "favorite":
|
||||
return errors_favorite.favorite_error(project_id=projectId, user_id=context.user_id, error_id=errorId)
|
||||
elif action == "sessions":
|
||||
start_date = startDate
|
||||
end_date = endDate
|
||||
return {
|
||||
"data": errors.get_sessions(project_id=projectId, user_id=context.user_id, error_id=errorId,
|
||||
start_date=start_date, end_date=end_date)}
|
||||
elif action in list(errors.ACTION_STATE.keys()):
|
||||
return errors.change_state(project_id=projectId, user_id=context.user_id, error_id=errorId, action=action)
|
||||
else:
|
||||
return {"errors": ["undefined action"]}
|
||||
@app.get('/{projectId}/errors/{errorId}/sessions', tags=["errors"], dependencies=[OR_scope(Permissions.DEV_TOOLS)])
|
||||
def get_errors_sessions(projectId: int, errorId: str, action: str, startDate: int = TimeUTC.now(-7),
|
||||
endDate: int = TimeUTC.now(),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
start_date = startDate
|
||||
end_date = endDate
|
||||
return {
|
||||
"data": errors.get_sessions(project_id=projectId, user_id=context.user_id, error_id=errorId,
|
||||
start_date=start_date, end_date=end_date)}
|
||||
|
||||
|
||||
@app.get('/{projectId}/assist/sessions/{sessionId}', tags=["assist"],
|
||||
|
|
|
|||
|
|
@ -23,6 +23,9 @@ DELETE
|
|||
FROM public.metrics
|
||||
WHERE metrics.metric_type = 'insights';
|
||||
|
||||
DROP TABLE IF EXISTS public.user_favorite_errors;
|
||||
DROP TABLE IF EXISTS public.user_viewed_errors;
|
||||
|
||||
COMMIT;
|
||||
|
||||
\elif :is_next
|
||||
|
|
|
|||
|
|
@ -395,21 +395,6 @@ CREATE INDEX errors_project_id_error_id_integration_idx ON public.errors (projec
|
|||
CREATE INDEX errors_error_id_idx ON public.errors (error_id);
|
||||
CREATE INDEX errors_parent_error_id_idx ON public.errors (parent_error_id);
|
||||
|
||||
CREATE TABLE public.user_favorite_errors
|
||||
(
|
||||
user_id integer NOT NULL REFERENCES public.users (user_id) ON DELETE CASCADE,
|
||||
error_id text NOT NULL REFERENCES public.errors (error_id) ON DELETE CASCADE,
|
||||
PRIMARY KEY (user_id, error_id)
|
||||
);
|
||||
|
||||
CREATE TABLE public.user_viewed_errors
|
||||
(
|
||||
user_id integer NOT NULL REFERENCES public.users (user_id) ON DELETE CASCADE,
|
||||
error_id text NOT NULL REFERENCES public.errors (error_id) ON DELETE CASCADE,
|
||||
PRIMARY KEY (user_id, error_id)
|
||||
);
|
||||
CREATE INDEX user_viewed_errors_user_id_idx ON public.user_viewed_errors (user_id);
|
||||
CREATE INDEX user_viewed_errors_error_id_idx ON public.user_viewed_errors (error_id);
|
||||
|
||||
CREATE TYPE device_type AS ENUM ('desktop', 'tablet', 'mobile', 'other');
|
||||
CREATE TYPE country AS ENUM ('UN', 'RW', 'SO', 'YE', 'IQ', 'SA', 'IR', 'CY', 'TZ', 'SY', 'AM', 'KE', 'CD', 'DJ', 'UG', 'CF', 'SC', 'JO', 'LB', 'KW', 'OM', 'QA', 'BH', 'AE', 'IL', 'TR', 'ET', 'ER', 'EG', 'SD', 'GR', 'BI', 'EE', 'LV', 'AZ', 'LT', 'SJ', 'GE', 'MD', 'BY', 'FI', 'AX', 'UA', 'MK', 'HU', 'BG', 'AL', 'PL', 'RO', 'XK', 'ZW', 'ZM', 'KM', 'MW', 'LS', 'BW', 'MU', 'SZ', 'RE', 'ZA', 'YT', 'MZ', 'MG', 'AF', 'PK', 'BD', 'TM', 'TJ', 'LK', 'BT', 'IN', 'MV', 'IO', 'NP', 'MM', 'UZ', 'KZ', 'KG', 'TF', 'HM', 'CC', 'PW', 'VN', 'TH', 'ID', 'LA', 'TW', 'PH', 'MY', 'CN', 'HK', 'BN', 'MO', 'KH', 'KR', 'JP', 'KP', 'SG', 'CK', 'TL', 'RU', 'MN', 'AU', 'CX', 'MH', 'FM', 'PG', 'SB', 'TV', 'NR', 'VU', 'NC', 'NF', 'NZ', 'FJ', 'LY', 'CM', 'SN', 'CG', 'PT', 'LR', 'CI', 'GH', 'GQ', 'NG', 'BF', 'TG', 'GW', 'MR', 'BJ', 'GA', 'SL', 'ST', 'GI', 'GM', 'GN', 'TD', 'NE', 'ML', 'EH', 'TN', 'ES', 'MA', 'MT', 'DZ', 'FO', 'DK', 'IS', 'GB', 'CH', 'SE', 'NL', 'AT', 'BE', 'DE', 'LU', 'IE', 'MC', 'FR', 'AD', 'LI', 'JE', 'IM', 'GG', 'SK', 'CZ', 'NO', 'VA', 'SM', 'IT', 'SI', 'ME', 'HR', 'BA', 'AO', 'NA', 'SH', 'BV', 'BB', 'CV', 'GY', 'GF', 'SR', 'PM', 'GL', 'PY', 'UY', 'BR', 'FK', 'GS', 'JM', 'DO', 'CU', 'MQ', 'BS', 'BM', 'AI', 'TT', 'KN', 'DM', 'AG', 'LC', 'TC', 'AW', 'VG', 'VC', 'MS', 'MF', 'BL', 'GP', 'GD', 'KY', 'BZ', 'SV', 'GT', 'HN', 'NI', 'CR', 'VE', 'EC', 'CO', 'PA', 'HT', 'AR', 'CL', 'BO', 'PE', 'MX', 'PF', 'PN', 'KI', 'TK', 'TO', 'WF', 'WS', 'NU', 'MP', 'GU', 'PR', 'VI', 'UM', 'AS', 'CA', 'US', 'PS', 'RS', 'AQ', 'SX', 'CW', 'BQ', 'SS','AC','AN','BU','CP','CS','CT','DD','DG','DY','EA','FQ','FX','HV','IC','JT','MI','NH','NQ','NT','PC','PU','PZ','RH','SU','TA','TP','VD','WK','YD','YU','ZR');
|
||||
|
|
|
|||
|
|
@ -17,6 +17,22 @@ $$ LANGUAGE sql IMMUTABLE;
|
|||
$fn_def$, :'next_version')
|
||||
\gexec
|
||||
|
||||
CREATE TABLE public.user_favorite_errors
|
||||
(
|
||||
user_id integer NOT NULL REFERENCES public.users (user_id) ON DELETE CASCADE,
|
||||
error_id text NOT NULL REFERENCES public.errors (error_id) ON DELETE CASCADE,
|
||||
PRIMARY KEY (user_id, error_id)
|
||||
);
|
||||
|
||||
CREATE TABLE public.user_viewed_errors
|
||||
(
|
||||
user_id integer NOT NULL REFERENCES public.users (user_id) ON DELETE CASCADE,
|
||||
error_id text NOT NULL REFERENCES public.errors (error_id) ON DELETE CASCADE,
|
||||
PRIMARY KEY (user_id, error_id)
|
||||
);
|
||||
CREATE INDEX user_viewed_errors_user_id_idx ON public.user_viewed_errors (user_id);
|
||||
CREATE INDEX user_viewed_errors_error_id_idx ON public.user_viewed_errors (error_id);
|
||||
|
||||
COMMIT;
|
||||
|
||||
\elif :is_next
|
||||
|
|
|
|||
23
scripts/helmcharts/openreplay/charts/analytics/.helmignore
Normal file
23
scripts/helmcharts/openreplay/charts/analytics/.helmignore
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
24
scripts/helmcharts/openreplay/charts/analytics/Chart.yaml
Normal file
24
scripts/helmcharts/openreplay/charts/analytics/Chart.yaml
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
apiVersion: v2
|
||||
name: analytics
|
||||
description: A Helm chart for Kubernetes
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
AppVersion: "v1.21.0"
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range $host := .Values.ingress.hosts }}
|
||||
{{- range .paths }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "http.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "analytics.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "analytics.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "analytics.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,62 @@
|
|||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "analytics.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "analytics.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "analytics.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "analytics.labels" -}}
|
||||
helm.sh/chart: {{ include "analytics.chart" . }}
|
||||
{{ include "analytics.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "analytics.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "analytics.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "analytics.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "analytics.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,126 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "analytics.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "analytics.labels" . | nindent 4 }}
|
||||
spec:
|
||||
{{- if not .Values.autoscaling.enabled }}
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "analytics.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
{{- with .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "analytics.selectorLabels" . | nindent 8 }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "analytics.serviceAccountName" . }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
shareProcessNamespace: true
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
{{- if .Values.global.enterpriseEditionLicense }}
|
||||
image: "{{ tpl .Values.image.repository . }}:{{ .Values.image.tag | default .Chart.AppVersion }}-ee"
|
||||
{{- else }}
|
||||
image: "{{ tpl .Values.image.repository . }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
{{- if .Values.healthCheck}}
|
||||
{{- .Values.healthCheck | toYaml | nindent 10}}
|
||||
{{- end}}
|
||||
env:
|
||||
{{- range $key, $val := .Values.env }}
|
||||
- name: {{ $key }}
|
||||
value: '{{ $val }}'
|
||||
{{- end}}
|
||||
{{- range $key, $val := .Values.global.env }}
|
||||
- name: {{ $key }}
|
||||
value: '{{ $val }}'
|
||||
{{- end }}
|
||||
- name: LICENSE_KEY
|
||||
value: '{{ .Values.global.enterpriseEditionLicense }}'
|
||||
- name: KAFKA_SERVERS
|
||||
value: '{{ .Values.global.kafka.kafkaHost }}:{{ .Values.global.kafka.kafkaPort }}'
|
||||
- name: KAFKA_USE_SSL
|
||||
value: '{{ .Values.global.kafka.kafkaUseSsl }}'
|
||||
- name: JWT_SECRET
|
||||
value: {{ .Values.global.jwtSecret }}
|
||||
- name: CH_USERNAME
|
||||
value: '{{ .Values.global.clickhouse.username }}'
|
||||
- name: CH_PASSWORD
|
||||
value: '{{ .Values.global.clickhouse.password }}'
|
||||
- name: CLICKHOUSE_STRING
|
||||
value: '{{ .Values.global.clickhouse.chHost }}:{{.Values.global.clickhouse.service.webPort}}/{{.Values.env.ch_db}}'
|
||||
- name: pg_password
|
||||
{{- if .Values.global.postgresql.existingSecret }}
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Values.global.postgresql.existingSecret }}
|
||||
key: postgresql-postgres-password
|
||||
{{- else }}
|
||||
value: '{{ .Values.global.postgresql.postgresqlPassword }}'
|
||||
{{- end}}
|
||||
- name: POSTGRES_STRING
|
||||
value: 'postgres://{{ .Values.global.postgresql.postgresqlUser }}:$(pg_password)@{{ .Values.global.postgresql.postgresqlHost }}:{{ .Values.global.postgresql.postgresqlPort }}/{{ .Values.global.postgresql.postgresqlDatabase }}'
|
||||
{{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }}
|
||||
ports:
|
||||
{{- range $key, $val := .Values.service.ports }}
|
||||
- name: {{ $key }}
|
||||
containerPort: {{ $val }}
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: datadir
|
||||
mountPath: /mnt/efs
|
||||
{{- include "openreplay.volume.redis_ca_certificate.mount" .Values.global.redis | nindent 12 }}
|
||||
{{- with .Values.persistence.mounts }}
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- if eq (tpl .Values.pvc.name . ) "hostPath" }}
|
||||
volumes:
|
||||
{{- with .Values.persistence.volumes }}
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
- name: datadir
|
||||
hostPath:
|
||||
# Ensure the file directory is created.
|
||||
path: {{ tpl .Values.pvc.hostMountPath . }}
|
||||
type: DirectoryOrCreate
|
||||
{{- else }}
|
||||
volumes:
|
||||
{{- with .Values.persistence.volumes }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
- name: datadir
|
||||
persistentVolumeClaim:
|
||||
claimName: "{{ tpl .Values.pvc.name . }}"
|
||||
{{- end }}
|
||||
{{- include "openreplay.volume.redis_ca_certificate" .Values.global.redis | nindent 6 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
{{- if .Values.autoscaling.enabled }}
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: {{ include "analytics.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "analytics.labels" . | nindent 4 }}
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: {{ include "analytics.fullname" . }}
|
||||
minReplicas: {{ .Values.autoscaling.minReplicas }}
|
||||
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
|
||||
metrics:
|
||||
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
|
||||
{{- end }}
|
||||
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: memory
|
||||
targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,36 @@
|
|||
{{- if .Values.ingress.enabled }}
|
||||
{{- $fullName := include "analytics.fullname" . -}}
|
||||
{{- $svcPort := .Values.service.ports.http -}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "analytics.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- with .Values.ingress.annotations }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /$1
|
||||
nginx.ingress.kubernetes.io/upstream-hash-by: $http_x_forwarded_for
|
||||
spec:
|
||||
ingressClassName: "{{ tpl .Values.ingress.className . }}"
|
||||
tls:
|
||||
- hosts:
|
||||
- {{ .Values.global.domainName }}
|
||||
{{- if .Values.ingress.tls.secretName}}
|
||||
secretName: {{ .Values.ingress.tls.secretName }}
|
||||
{{- end}}
|
||||
rules:
|
||||
- host: {{ .Values.global.domainName }}
|
||||
http:
|
||||
paths:
|
||||
- pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: {{ $fullName }}
|
||||
port:
|
||||
number: {{ $svcPort }}
|
||||
path: /analytics/(.*)
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "analytics.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "analytics.labels" . | nindent 4 }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
{{- range $key, $val := .Values.service.ports }}
|
||||
- port: {{ $val }}
|
||||
targetPort: {{ $key }}
|
||||
protocol: TCP
|
||||
name: {{ $key }}
|
||||
{{- end}}
|
||||
selector:
|
||||
{{- include "analytics.selectorLabels" . | nindent 4 }}
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) ( .Values.serviceMonitor.enabled ) }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ include "analytics.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "analytics.labels" . | nindent 4 }}
|
||||
{{- if .Values.serviceMonitor.additionalLabels }}
|
||||
{{- toYaml .Values.serviceMonitor.additionalLabels | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
endpoints:
|
||||
{{- .Values.serviceMonitor.scrapeConfigs | toYaml | nindent 4 }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "analytics.selectorLabels" . | nindent 6 }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "analytics.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "analytics.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: "{{ include "analytics.fullname" . }}-test-connection"
|
||||
labels:
|
||||
{{- include "analytics.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": test
|
||||
spec:
|
||||
containers:
|
||||
- name: wget
|
||||
image: busybox
|
||||
command: ['wget']
|
||||
args: ['{{ include "analytics.fullname" . }}:{{ .Values.service.port }}']
|
||||
restartPolicy: Never
|
||||
131
scripts/helmcharts/openreplay/charts/analytics/values.yaml
Normal file
131
scripts/helmcharts/openreplay/charts/analytics/values.yaml
Normal file
|
|
@ -0,0 +1,131 @@
|
|||
# Default values for openreplay.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: "{{ .Values.global.openReplayContainerRegistry }}/analytics"
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: ""
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: "analytics"
|
||||
fullnameOverride: "analytics-openreplay"
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
securityContext:
|
||||
runAsUser: 1001
|
||||
runAsGroup: 1001
|
||||
podSecurityContext:
|
||||
runAsUser: 1001
|
||||
runAsGroup: 1001
|
||||
fsGroup: 1001
|
||||
fsGroupChangePolicy: "OnRootMismatch"
|
||||
# podSecurityContext: {}
|
||||
# fsGroup: 2000
|
||||
|
||||
# securityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
http: 8080
|
||||
metrics: 8888
|
||||
|
||||
serviceMonitor:
|
||||
enabled: true
|
||||
additionalLabels:
|
||||
release: observability
|
||||
scrapeConfigs:
|
||||
- port: metrics
|
||||
honorLabels: true
|
||||
interval: 15s
|
||||
path: /metrics
|
||||
scheme: http
|
||||
scrapeTimeout: 10s
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
className: "{{ .Values.global.ingress.controller.ingressClassResource.name }}"
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: "letsencrypt-prod"
|
||||
nginx.ingress.kubernetes.io/proxy-connect-timeout: "120"
|
||||
nginx.ingress.kubernetes.io/proxy-send-timeout: "300"
|
||||
nginx.ingress.kubernetes.io/proxy-read-timeout: "300"
|
||||
nginx.ingress.kubernetes.io/cors-allow-methods: POST,GET,PATCH,DELETE
|
||||
nginx.ingress.kubernetes.io/cors-allow-headers: Content-Type,Authorization,Content-Encoding,X-Openreplay-Batch
|
||||
nginx.ingress.kubernetes.io/cors-allow-origin: '*'
|
||||
nginx.ingress.kubernetes.io/enable-cors: "true"
|
||||
nginx.ingress.kubernetes.io/cors-expose-headers: "Content-Length"
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
tls:
|
||||
secretName: openreplay-ssl
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
autoscaling:
|
||||
enabled: false
|
||||
minReplicas: 1
|
||||
maxReplicas: 5
|
||||
targetCPUUtilizationPercentage: 80
|
||||
# targetMemoryUtilizationPercentage: 80
|
||||
|
||||
env:
|
||||
TOKEN_SECRET: secret_token_string # TODO: generate on build
|
||||
|
||||
|
||||
pvc:
|
||||
# This can be either persistentVolumeClaim or hostPath.
|
||||
# In case of pvc, you'll have to provide the pvc name.
|
||||
# For example
|
||||
# name: openreplay-efs
|
||||
name: "{{ .Values.global.pvcRWXName }}"
|
||||
hostMountPath: "{{ .Values.global.orTmpDir }}"
|
||||
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
|
||||
persistence: {}
|
||||
# # Spec of spec.template.spec.containers[*].volumeMounts
|
||||
# mounts:
|
||||
# - name: kafka-ssl
|
||||
# mountPath: /opt/kafka/ssl
|
||||
# # Spec of spec.template.spec.volumes
|
||||
# volumes:
|
||||
# - name: kafka-ssl
|
||||
# secret:
|
||||
# secretName: kafka-ssl
|
||||
|
|
@ -193,14 +193,3 @@ CREATE TABLE IF NOT EXISTS experimental.ios_events
|
|||
PARTITION BY toYYYYMM(datetime)
|
||||
ORDER BY (project_id, datetime, event_type, session_id, message_id)
|
||||
TTL datetime + INTERVAL 1 MONTH;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS experimental.user_viewed_errors
|
||||
(
|
||||
project_id UInt16,
|
||||
user_id UInt32,
|
||||
error_id String,
|
||||
_timestamp DateTime DEFAULT now()
|
||||
) ENGINE = ReplacingMergeTree(_timestamp)
|
||||
PARTITION BY toYYYYMM(_timestamp)
|
||||
ORDER BY (project_id, user_id, error_id)
|
||||
TTL _timestamp + INTERVAL 3 MONTH;
|
||||
|
|
@ -193,14 +193,3 @@ CREATE TABLE IF NOT EXISTS experimental.ios_events
|
|||
PARTITION BY toYYYYMM(datetime)
|
||||
ORDER BY (project_id, datetime, event_type, session_id, message_id)
|
||||
TTL datetime + INTERVAL 1 MONTH;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS experimental.user_viewed_errors
|
||||
(
|
||||
project_id UInt16,
|
||||
user_id UInt32,
|
||||
error_id String,
|
||||
_timestamp DateTime DEFAULT now()
|
||||
) ENGINE = ReplacingMergeTree(_timestamp)
|
||||
PARTITION BY toYYYYMM(_timestamp)
|
||||
ORDER BY (project_id, user_id, error_id)
|
||||
TTL _timestamp + INTERVAL 3 MONTH;
|
||||
|
|
@ -23,6 +23,10 @@ DELETE
|
|||
FROM public.metrics
|
||||
WHERE metrics.metric_type = 'insights';
|
||||
|
||||
DROP TABLE IF EXISTS public.user_favorite_errors;
|
||||
DROP TABLE IF EXISTS public.user_viewed_errors;
|
||||
|
||||
|
||||
COMMIT;
|
||||
|
||||
\elif :is_next
|
||||
|
|
|
|||
|
|
@ -357,22 +357,6 @@ CREATE INDEX errors_project_id_error_id_integration_idx ON public.errors (projec
|
|||
CREATE INDEX errors_error_id_idx ON public.errors (error_id);
|
||||
CREATE INDEX errors_parent_error_id_idx ON public.errors (parent_error_id);
|
||||
|
||||
CREATE TABLE public.user_favorite_errors
|
||||
(
|
||||
user_id integer NOT NULL REFERENCES public.users (user_id) ON DELETE CASCADE,
|
||||
error_id text NOT NULL REFERENCES public.errors (error_id) ON DELETE CASCADE,
|
||||
PRIMARY KEY (user_id, error_id)
|
||||
);
|
||||
|
||||
CREATE TABLE public.user_viewed_errors
|
||||
(
|
||||
user_id integer NOT NULL REFERENCES public.users (user_id) ON DELETE CASCADE,
|
||||
error_id text NOT NULL REFERENCES public.errors (error_id) ON DELETE CASCADE,
|
||||
PRIMARY KEY (user_id, error_id)
|
||||
);
|
||||
CREATE INDEX user_viewed_errors_user_id_idx ON public.user_viewed_errors (user_id);
|
||||
CREATE INDEX user_viewed_errors_error_id_idx ON public.user_viewed_errors (error_id);
|
||||
|
||||
CREATE TYPE device_type AS ENUM ('desktop', 'tablet', 'mobile', 'other');
|
||||
CREATE TYPE country AS ENUM ('UN', 'RW', 'SO', 'YE', 'IQ', 'SA', 'IR', 'CY', 'TZ', 'SY', 'AM', 'KE', 'CD', 'DJ', 'UG', 'CF', 'SC', 'JO', 'LB', 'KW', 'OM', 'QA', 'BH', 'AE', 'IL', 'TR', 'ET', 'ER', 'EG', 'SD', 'GR', 'BI', 'EE', 'LV', 'AZ', 'LT', 'SJ', 'GE', 'MD', 'BY', 'FI', 'AX', 'UA', 'MK', 'HU', 'BG', 'AL', 'PL', 'RO', 'XK', 'ZW', 'ZM', 'KM', 'MW', 'LS', 'BW', 'MU', 'SZ', 'RE', 'ZA', 'YT', 'MZ', 'MG', 'AF', 'PK', 'BD', 'TM', 'TJ', 'LK', 'BT', 'IN', 'MV', 'IO', 'NP', 'MM', 'UZ', 'KZ', 'KG', 'TF', 'HM', 'CC', 'PW', 'VN', 'TH', 'ID', 'LA', 'TW', 'PH', 'MY', 'CN', 'HK', 'BN', 'MO', 'KH', 'KR', 'JP', 'KP', 'SG', 'CK', 'TL', 'RU', 'MN', 'AU', 'CX', 'MH', 'FM', 'PG', 'SB', 'TV', 'NR', 'VU', 'NC', 'NF', 'NZ', 'FJ', 'LY', 'CM', 'SN', 'CG', 'PT', 'LR', 'CI', 'GH', 'GQ', 'NG', 'BF', 'TG', 'GW', 'MR', 'BJ', 'GA', 'SL', 'ST', 'GI', 'GM', 'GN', 'TD', 'NE', 'ML', 'EH', 'TN', 'ES', 'MA', 'MT', 'DZ', 'FO', 'DK', 'IS', 'GB', 'CH', 'SE', 'NL', 'AT', 'BE', 'DE', 'LU', 'IE', 'MC', 'FR', 'AD', 'LI', 'JE', 'IM', 'GG', 'SK', 'CZ', 'NO', 'VA', 'SM', 'IT', 'SI', 'ME', 'HR', 'BA', 'AO', 'NA', 'SH', 'BV', 'BB', 'CV', 'GY', 'GF', 'SR', 'PM', 'GL', 'PY', 'UY', 'BR', 'FK', 'GS', 'JM', 'DO', 'CU', 'MQ', 'BS', 'BM', 'AI', 'TT', 'KN', 'DM', 'AG', 'LC', 'TC', 'AW', 'VG', 'VC', 'MS', 'MF', 'BL', 'GP', 'GD', 'KY', 'BZ', 'SV', 'GT', 'HN', 'NI', 'CR', 'VE', 'EC', 'CO', 'PA', 'HT', 'AR', 'CL', 'BO', 'PE', 'MX', 'PF', 'PN', 'KI', 'TK', 'TO', 'WF', 'WS', 'NU', 'MP', 'GU', 'PR', 'VI', 'UM', 'AS', 'CA', 'US', 'PS', 'RS', 'AQ', 'SX', 'CW', 'BQ', 'SS','AC','AN','BU','CP','CS','CT','DD','DG','DY','EA','FQ','FX','HV','IC','JT','MI','NH','NQ','NT','PC','PU','PZ','RH','SU','TA','TP','VD','WK','YD','YU','ZR');
|
||||
|
||||
|
|
|
|||
42
scripts/schema/db/rollback_dbs/postgresql/1.22.0/1.22.0.sql
Normal file
42
scripts/schema/db/rollback_dbs/postgresql/1.22.0/1.22.0.sql
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
\set previous_version 'v1.22.0'
|
||||
\set next_version 'v1.21.0'
|
||||
SELECT openreplay_version() AS current_version,
|
||||
openreplay_version() = :'previous_version' AS valid_previous,
|
||||
openreplay_version() = :'next_version' AS is_next
|
||||
\gset
|
||||
|
||||
\if :valid_previous
|
||||
\echo valid previous DB version :'previous_version', starting DB downgrade to :'next_version'
|
||||
BEGIN;
|
||||
SELECT format($fn_def$
|
||||
CREATE OR REPLACE FUNCTION openreplay_version()
|
||||
RETURNS text AS
|
||||
$$
|
||||
SELECT '%1$s'
|
||||
$$ LANGUAGE sql IMMUTABLE;
|
||||
$fn_def$, :'next_version')
|
||||
\gexec
|
||||
|
||||
CREATE TABLE public.user_favorite_errors
|
||||
(
|
||||
user_id integer NOT NULL REFERENCES public.users (user_id) ON DELETE CASCADE,
|
||||
error_id text NOT NULL REFERENCES public.errors (error_id) ON DELETE CASCADE,
|
||||
PRIMARY KEY (user_id, error_id)
|
||||
);
|
||||
|
||||
CREATE TABLE public.user_viewed_errors
|
||||
(
|
||||
user_id integer NOT NULL REFERENCES public.users (user_id) ON DELETE CASCADE,
|
||||
error_id text NOT NULL REFERENCES public.errors (error_id) ON DELETE CASCADE,
|
||||
PRIMARY KEY (user_id, error_id)
|
||||
);
|
||||
CREATE INDEX user_viewed_errors_user_id_idx ON public.user_viewed_errors (user_id);
|
||||
CREATE INDEX user_viewed_errors_error_id_idx ON public.user_viewed_errors (error_id);
|
||||
|
||||
COMMIT;
|
||||
|
||||
\elif :is_next
|
||||
\echo new version detected :'next_version', nothing to do
|
||||
\else
|
||||
\warn skipping DB downgrade of :'next_version', expected previous version :'previous_version', found :'current_version'
|
||||
\endif
|
||||
Loading…
Add table
Reference in a new issue